max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
Unsupervised/pix2pixHD/extract_frames.py | Kebniss/AutoDetect | 1 | 1600 | <filename>Unsupervised/pix2pixHD/extract_frames.py
import os
import cv2
import argparse
from utils import *
from tqdm import tqdm
from glob import glob
from pathlib import Path
def _extract_frames(video_path, parent, start=0, sampling_f=1):
vidcap = cv2.VideoCapture(video_path)
success, image = success, image = vidcap.read()
count = -1
saved = 0
print(f'Processing: {video_path}')
while success:
count += 1
if count % 300 == 0:
print('Processing frame: ', count)
if count % sampling_f == 0:
# sampling
cv2.imwrite(''.join([dest_folder, f"/{count + start}.jpg"]), image)
saved += 1
success, image = vidcap.read() # read next
print(f'Successfully saved {saved} frames to {dest_folder}')
return count + start
parser = argparse.ArgumentParser(
description='build a "frame dataset" from a given video')
parser.add_argument('-input', dest="input", required=True,
help='''Path to a single video or a folder. If path to folder the algorithm
will extract frames from all files with extension defined in
--extension and save them under separate folders under dest_folder.
The frames from each video will be saved under a folder with its name.
''')
parser.add_argument('--dest-folder', dest="dest_folder", default='./dataset/',
help='''Path where to store frames. NB all files in this folder will be
removed before adding the new frames''')
parser.add_argument('--same-folder', dest="same_folder", default=False,
help='''Set it to True if you want to save the frames of all videos to the
same folder in ascending order going from the first frame of the first video
to the last frame of the last video. If True frames will be saved in
dest_folder/frames.''')
parser.add_argument('--sampling', help='how many fps', default='3')
parser.add_argument('--run-type', help='train or test', default='train')
parser.add_argument('--extension', help='avi, mp4, mov...', default='mp4')
parser.add_argument('-width', help='output width', default=640, type=int)
parser.add_argument('-height', help='output height', default=480, type=int)
args = parser.parse_args()
mkdir(args.dest_folder)
if (args.width % 32 != 0) or (args.height % 32 != 0):
raise Exception("Please use width and height that are divisible by 32")
if os.path.isdir(args.input):
inp = str(Path(args.input) / f'*.{args.extension}')
videos = [v for v in glob(inp)]
if not videos:
raise Exception(f'No {args.extension} files in input directory {args.input}')
elif os.path.isfile(args.input):
_, ext = get_filename_extension(args.input)
if ext != args.extension:
raise ValueError(f'Correct inputs: folder or path to {args.extension} file only')
videos = [args.input]
else:
raise ValueError(f'Correct inputs: folder or path to {args.extension} file only')
if args.same_folder:
start = 0
dest_folder = str(Path(args.dest_folder) / f'{args.run_type}_frames')
mkdir(dest_folder)
for v in tqdm(videos):
if not args.same_folder:
start = 0
name, _ = get_filename_extension(v)
dest_folder = str(Path(args.dest_folder) / name)
mkdir(dest_folder)
start = _extract_frames(v, dest_folder, start, sampling_f=int(args.sampling))
| 3.171875 | 3 |
burp-filter-options/filter-options.py | parsiya/Parsia-Code | 21 | 1601 | <reponame>parsiya/Parsia-Code
# modified "example traffic redirector"
# https://raw.githubusercontent.com/PortSwigger/example-traffic-redirector/master/python/TrafficRedirector.py
# Idea: https://github.com/pajswigger/filter-options/blob/master/src/filter-options.kt
# Usage: Put both files in a directory and add filter-options.py to Burp. Nees Jython.
# Blog post: https://parsiya.net/blog/2019-04-06-hiding-options-an-adventure-in-dealing-with-burp-proxy-in-an-extension/
# support for burp-exceptions - see https://github.com/securityMB/burp-exceptions
try:
from exceptions_fix import FixBurpExceptions
import sys
except ImportError:
pass
# support for burputils - https://github.com/parsiya/burputils
try:
from burputils import BurpUtils
except ImportError:
pass
from burp import IBurpExtender
from burp import IHttpListener
class BurpExtender(IBurpExtender, IHttpListener):
# implement IBurpExtender
# set everything up
def registerExtenderCallbacks(self, callbacks):
# obtain an extension helpers object
self.utils = BurpUtils(callbacks.getHelpers())
# support for burp-exceptions
try:
sys.stdout = callbacks.getStdout()
except:
pass
# set our extension name
callbacks.setExtensionName("Filter OPTIONS")
# register an HTTP listener
callbacks.registerHttpListener(self)
#
# implement IHttpListener
#
def processHttpMessage(self, toolFlag, messageIsRequest, messageInfo):
# only process responses
if messageIsRequest:
return
# now we only have responses
# get the request associated with the response
requestInfo = self.utils.getInfo(True, messageInfo)
# return if the request method was not OPTIONS
if requestInfo.getMethod() != "OPTIONS":
return
# get response info
responseInfo = self.utils.getInfo(False, messageInfo)
# get headers using utils
headers = self.utils.getHeaders(responseInfo)
# overwrite the Content-Type header. Overwrite adds the header if it
# does not exist.
headers.overwrite("Content-Type", "text/css; charset=UTF-8")
# put everything back together
bodyBytes = self.utils.getBody(messageIsRequest, messageInfo)
# Debug
# rawHeaders = headers.exportRaw()
# build message
modifiedmsg = self.utils.burpHelper.buildHttpMessage(headers.exportRaw(), bodyBytes)
# set modified message response
self.utils.setRequestResponse(messageIsRequest, modifiedmsg, messageInfo)
# this should be reflected in response tab
# done
print "--------"
return
# support for burp-exceptions
try:
FixBurpExceptions()
except:
pass | 2.109375 | 2 |
AppServer/google/appengine/tools/devappserver2/login.py | loftwah/appscale | 790 | 1602 | <reponame>loftwah/appscale
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Handles login/logout pages and dealing with user cookies.
Includes a WSGI application that serves the login page and handles login and
logout HTTP requests. It accepts these GET query parameters:
continue: URL to redirect to after a login or logout has completed.
email: Email address to set for the client.
admin: If 'True', the client should be logged in as an admin.
action: What action to take ('Login' or 'Logout').
To view the current user information and a form for logging in and out,
supply no parameters.
"""
import cgi
import Cookie
import hashlib
import logging
import os
import sha
import sys
import urllib
import uuid
import webapp2
app_dashboard_lib = '/../../../../../AppDashboard/lib'
sys.path.append(os.path.dirname(__file__) + app_dashboard_lib)
from app_dashboard_helper import AppDashboardHelper
# URL of the login page within the dev appserver.
LOGIN_URL_RELATIVE = '_ah/login'
# CGI parameter constants.
CONTINUE_PARAM = 'continue'
_EMAIL_PARAM = 'email'
_ADMIN_PARAM = 'admin'
ACTION_PARAM = 'action'
# Values for the action parameter.
LOGOUT_ACTION = 'logout'
LOGIN_ACTION = 'login'
# Name of the cookie that stores the user info.
_COOKIE_NAME = 'dev_appserver_login'
# Indicates that the user has admin access to all applications.
CLOUD_ADMIN_MARKER = 'CLOUD_ADMIN'
# The port that the AppDashboard serves HTTPS traffic on.
DASHBOARD_HTTPS_PORT = "1443"
def get_user_info(http_cookie, cookie_name=_COOKIE_NAME):
"""Gets the requestor's user info from an HTTP Cookie header.
Args:
http_cookie: The value of the 'Cookie' HTTP request header.
cookie_name: The name of the cookie that stores the user info.
Returns:
A tuple (email, admin, user_id) where:
email: The user's email address, if any.
admin: True if the user is an admin; False otherwise.
user_id: The user ID, if any.
"""
try:
cookie = Cookie.SimpleCookie(http_cookie)
except Cookie.CookieError:
return '', False, ''
cookie_dict = dict((k, v.value) for k, v in cookie.iteritems())
return _get_user_info_from_dict(cookie_dict, cookie_name)
def _get_user_info_from_dict(cookie_dict, cookie_name=_COOKIE_NAME):
"""Gets the requestor's user info from a cookie dictionary.
Args:
cookie_dict: A dictionary mapping cookie names onto values.
cookie_name: The name of the cookie that stores the user info.
Returns:
A tuple (email, admin, user_id) where:
email: The user's email address, if any.
admin: True if the user is an admin; False otherwise.
user_id: The user ID, if any.
"""
cookie_secret = os.environ['COOKIE_SECRET']
cookie_value = cookie_dict.get(cookie_name, '')
cookie_value = cookie_value.replace("%3A",":")
cookie_value = cookie_value.replace("%40",'@')
cookie_value = cookie_value.replace("%2C",",")
email, nickname, admin, hsh = (cookie_value.split(':') + ['', '', '', ''])[:4]
if email == '':
nickname = ''
admin = ''
return '', False, ''
else:
vhsh = sha.new(email+nickname+admin+cookie_secret).hexdigest()
if hsh != vhsh:
logging.info("{0} has an invalid cookie, so ignoring it.".format(email))
return '', False, ''
admin_apps = admin.split(',')
current_app = os.environ['APPLICATION_ID']
is_admin = current_app in admin_apps or CLOUD_ADMIN_MARKER in admin_apps
return email, is_admin, nickname
def _create_cookie_data(email, admin):
"""Creates cookie payload data.
Args:
email: The user's email address.
admin: True if the user is an admin; False otherwise.
Returns:
A string containing the cookie payload.
"""
if email:
user_id_digest = hashlib.md5(email.lower()).digest()
user_id = '1' + ''.join(['%02d' % ord(x) for x in user_id_digest])[:20]
else:
user_id = ''
return '%s:%s:%s' % (email, admin, user_id)
def _set_user_info_cookie(email, admin, cookie_name=_COOKIE_NAME):
"""Creates a cookie to set the user information for the requestor.
Args:
email: The email to set for the user.
admin: True if the user should be admin; False otherwise.
cookie_name: The name of the cookie that stores the user info.
Returns:
Set-Cookie value for setting the user info of the requestor.
"""
cookie_value = _create_cookie_data(email, admin)
cookie = Cookie.SimpleCookie()
cookie[cookie_name] = cookie_value
cookie[cookie_name]['path'] = '/'
return cookie[cookie_name].OutputString()
def _clear_user_info_cookie(cookie_name=_COOKIE_NAME):
"""Clears the user info cookie from the requestor, logging them out.
Args:
cookie_name: The name of the cookie that stores the user info.
Returns:
A Set-Cookie value for clearing the user info of the requestor.
"""
cookie = Cookie.SimpleCookie()
cookie[cookie_name] = ''
cookie[cookie_name]['path'] = '/'
cookie[cookie_name]['max-age'] = '0'
if AppDashboardHelper.USE_SHIBBOLETH:
cookie[cookie_name]['domain'] = AppDashboardHelper.\
SHIBBOLETH_COOKIE_DOMAIN
return cookie[cookie_name].OutputString()
_LOGIN_TEMPLATE = """<html>
<head>
<title>Login</title>
</head>
<body>
<form method="get" action="%(login_url)s"
style="text-align:center; font: 13px sans-serif">
<div style="width: 20em; margin: 1em auto;
text-align:left;
padding: 0 2em 1.25em 2em;
background-color: #d6e9f8;
border: 2px solid #67a7e3">
<h3>%(login_message)s</h3>
<p style="padding: 0; margin: 0">
<label for="email" style="width: 3em">Email:</label>
<input name="email" type="email" value="%(email)s" id="email"/>
</p>
<p style="margin: .5em 0 0 3em; font-size:12px">
<input name="admin" type="checkbox" value="True"
%(admin_checked)s id="admin"/>
<label for="admin">Sign in as Administrator</label>
</p>
<p style="margin-left: 3em">
<input name="action" value="Login" type="submit"
id="submit-login" />
<input name="action" value="Logout" type="submit"
id="submit-logout" />
</p>
</div>
<input name="continue" type="hidden" value="%(continue_url)s"/>
</form>
</body>
</html>
"""
def _render_login_template(login_url, continue_url, email, admin):
"""Renders the login page.
Args:
login_url: The parameter to _login_response.
continue_url: The parameter to _login_response.
email: The email address of the current user, if any.
admin: True if the user is currently an admin; False otherwise.
Returns:
A string containing the contents of the login page.
"""
if email:
login_message = 'Logged in'
else:
login_message = 'Not logged in'
email = 'test\x40example.com'
admin_checked = 'checked' if admin else ''
template_dict = {
'email': cgi.escape(email, quote=True),
'admin_checked': admin_checked,
'login_message': login_message,
'login_url': cgi.escape(login_url, quote=True),
'continue_url': cgi.escape(continue_url, quote=True),
}
return _LOGIN_TEMPLATE % template_dict
def login_redirect(application_url, continue_url, start_response):
"""Writes a login redirection URL to a user.
This redirects to login_url with a continue parameter to return to
continue_url. The login_url should be on the canonical front-end server,
regardless of the host:port the user connected to.
Args:
application_url: The URL of the dev appserver domain
(e.g., 'http://localhost:8080').
continue_url: The URL to continue to after the user logs in.
start_response: A WSGI start_response function.
Returns:
An (empty) iterable over strings containing the body of the HTTP response.
"""
if AppDashboardHelper.USE_SHIBBOLETH:
redirect_url = '{0}:{1}/login?{2}={3}'.format(
AppDashboardHelper.SHIBBOLETH_CONNECTOR,
AppDashboardHelper.SHIBBOLETH_CONNECTOR_PORT,
CONTINUE_PARAM,
urllib.quote(continue_url)
)
else:
hostname = os.environ['NGINX_HOST']
redirect_url = 'https://{0}:{1}/login?{2}={3}'.format(
hostname,
DASHBOARD_HTTPS_PORT,
CONTINUE_PARAM,
urllib.quote(continue_url))
start_response('302 Requires login',
[('Location', redirect_url)])
return []
def fake_admin():
""" Generate the fake admin login secret
Returns:
A string containing the fake login secret
"""
return hashlib.sha1('{}/{}'.format(
os.environ.get('APPNAME', str(uuid.uuid4())),
os.environ.get('COOKIE_SECRET', str(uuid.uuid4())))).hexdigest()
class Handler(webapp2.RequestHandler):
"""The request handler for the login and logout pages."""
def get(self):
action = self.request.get(ACTION_PARAM)
set_email = self.request.get(_EMAIL_PARAM)
set_admin = self.request.get(_ADMIN_PARAM).lower() == 'true'
continue_url = self.request.get(CONTINUE_PARAM)
login_url = self.request.path_url
if action:
redirect_url = continue_url or login_url
# Perform the action.
if action.lower() == LOGOUT_ACTION.lower():
self.response.headers['Set-Cookie'] = _clear_user_info_cookie()
if AppDashboardHelper.USE_SHIBBOLETH:
redirect_url = AppDashboardHelper.SHIBBOLETH_LOGOUT_URL
elif action.lower() == LOGIN_ACTION.lower() and set_email:
self.response.headers['Set-Cookie'] = _set_user_info_cookie(set_email,
set_admin)
# URLs should be ASCII-only byte strings.
if isinstance(redirect_url, unicode):
redirect_url = redirect_url.encode('ascii')
# Redirect the user after performing the action.
self.response.status = 302
self.response.status_message = 'Redirecting to continue URL'
self.response.headers['Location'] = redirect_url
else:
# Send the user to the AppDashboard to log in before letting them view the
# specified URL.
if AppDashboardHelper.USE_SHIBBOLETH:
appscale_login_url = "{0}:{1}/login".format(
AppDashboardHelper.SHIBBOLETH_CONNECTOR, DASHBOARD_HTTPS_PORT)
else:
appscale_login_url = "https://{0}:{1}/login".format(
os.environ['NGINX_HOST'], DASHBOARD_HTTPS_PORT)
redirect_url = '{0}?{1}={2}'.format(appscale_login_url, CONTINUE_PARAM,
continue_url)
self.response.status = 302
self.response.status_message = 'Redirecting to login service URL'
self.response.headers['Location'] = redirect_url
application = webapp2.WSGIApplication([('/.*', Handler)], debug=True)
| 2.0625 | 2 |
sdks/python/apache_beam/runners/portability/job_server.py | noah-goodrich/beam | 1 | 1603 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import absolute_import
import atexit
import os
import shutil
import signal
import subprocess
import sys
import tempfile
import threading
import grpc
from apache_beam.portability.api import beam_job_api_pb2_grpc
from apache_beam.runners.portability import local_job_service
from apache_beam.utils import subprocess_server
from apache_beam.version import __version__ as beam_version
class JobServer(object):
def start(self):
"""Starts this JobServer, returning a grpc service to which to submit jobs.
"""
raise NotImplementedError(type(self))
def stop(self):
"""Stops this job server."""
raise NotImplementedError(type(self))
class ExternalJobServer(JobServer):
def __init__(self, endpoint, timeout=None):
self._endpoint = endpoint
self._timeout = timeout
def start(self):
channel = grpc.insecure_channel(self._endpoint)
grpc.channel_ready_future(channel).result(timeout=self._timeout)
return beam_job_api_pb2_grpc.JobServiceStub(channel)
def stop(self):
pass
class EmbeddedJobServer(JobServer):
def start(self):
return local_job_service.LocalJobServicer()
def stop(self):
pass
class StopOnExitJobServer(JobServer):
"""Wraps a JobServer such that its stop will automatically be called on exit.
"""
def __init__(self, job_server):
self._lock = threading.Lock()
self._job_server = job_server
self._started = False
def start(self):
with self._lock:
if not self._started:
self._endpoint = self._job_server.start()
self._started = True
atexit.register(self.stop)
signal.signal(signal.SIGINT, self.stop)
return self._endpoint
def stop(self):
with self._lock:
if self._started:
self._job_server.stop()
self._started = False
class SubprocessJobServer(JobServer):
"""An abstract base class for JobServers run as an external process."""
def __init__(self):
self._local_temp_root = None
self._server = None
def subprocess_cmd_and_endpoint(self):
raise NotImplementedError(type(self))
def start(self):
if self._server is None:
self._local_temp_root = tempfile.mkdtemp(prefix='beam-temp')
cmd, endpoint = self.subprocess_cmd_and_endpoint()
port = int(endpoint.split(':')[-1])
self._server = subprocess_server.SubprocessServer(
beam_job_api_pb2_grpc.JobServiceStub, cmd, port=port)
return self._server.start()
def stop(self):
if self._local_temp_root:
shutil.rmtree(self._local_temp_root)
self._local_temp_root = None
return self._server.stop()
def local_temp_dir(self, **kwargs):
return tempfile.mkdtemp(dir=self._local_temp_root, **kwargs)
class JavaJarJobServer(SubprocessJobServer):
MAVEN_REPOSITORY = 'https://repo.maven.apache.org/maven2/org/apache/beam'
JAR_CACHE = os.path.expanduser("~/.apache_beam/cache")
def java_arguments(self, job_port, artifacts_dir):
raise NotImplementedError(type(self))
def path_to_jar(self):
raise NotImplementedError(type(self))
@staticmethod
def path_to_beam_jar(gradle_target):
return subprocess_server.JavaJarServer.path_to_beam_jar(gradle_target)
@staticmethod
def local_jar(url):
return subprocess_server.JavaJarServer.local_jar(url)
def subprocess_cmd_and_endpoint(self):
jar_path = self.local_jar(self.path_to_jar())
artifacts_dir = self.local_temp_dir(prefix='artifacts')
job_port, = subprocess_server.pick_port(None)
return (
['java', '-jar', jar_path] + list(
self.java_arguments(job_port, artifacts_dir)),
'localhost:%s' % job_port)
class DockerizedJobServer(SubprocessJobServer):
"""
Spins up the JobServer in a docker container for local execution.
"""
def __init__(self, job_host="localhost",
job_port=None,
artifact_port=None,
expansion_port=None,
harness_port_range=(8100, 8200),
max_connection_retries=5):
super(DockerizedJobServer, self).__init__()
self.job_host = job_host
self.job_port = job_port
self.expansion_port = expansion_port
self.artifact_port = artifact_port
self.harness_port_range = harness_port_range
self.max_connection_retries = max_connection_retries
def subprocess_cmd_and_endpoint(self):
# TODO This is hardcoded to Flink at the moment but should be changed
job_server_image_name = os.environ['USER'] + \
"-docker-apache.bintray.io/beam/flink-job-server:latest"
docker_path = subprocess.check_output(
['which', 'docker']).strip().decode('utf-8')
cmd = ["docker", "run",
# We mount the docker binary and socket to be able to spin up
# "sibling" containers for the SDK harness.
"-v", ':'.join([docker_path, "/bin/docker"]),
"-v", "/var/run/docker.sock:/var/run/docker.sock"]
self.job_port, self.artifact_port, self.expansion_port = (
subprocess_server.pick_port(
self.job_port, self.artifact_port, self.expansion_port))
args = ['--job-host', self.job_host,
'--job-port', str(self.job_port),
'--artifact-port', str(self.artifact_port),
'--expansion-port', str(self.expansion_port)]
if sys.platform == "darwin":
# Docker-for-Mac doesn't support host networking, so we need to explictly
# publish ports from the Docker container to be able to connect to it.
# Also, all other containers need to be aware that they run Docker-on-Mac
# to connect against the internal Docker-for-Mac address.
cmd += ["-e", "DOCKER_MAC_CONTAINER=1"]
cmd += ["-p", "{}:{}".format(self.job_port, self.job_port)]
cmd += ["-p", "{}:{}".format(self.artifact_port, self.artifact_port)]
cmd += ["-p", "{}:{}".format(self.expansion_port, self.expansion_port)]
cmd += ["-p", "{0}-{1}:{0}-{1}".format(
self.harness_port_range[0], self.harness_port_range[1])]
else:
# This shouldn't be set for MacOS because it detroys port forwardings,
# even though host networking is not supported on MacOS.
cmd.append("--network=host")
cmd.append(job_server_image_name)
return cmd + args, '%s:%s' % (self.job_host, self.job_port)
| 1.6875 | 2 |
sympy/printing/pycode.py | tachycline/sympy | 0 | 1604 | from collections import defaultdict
from functools import wraps
from itertools import chain
from sympy.core import sympify
from .precedence import precedence
from .codeprinter import CodePrinter
_kw_py2and3 = {
'and', 'as', 'assert', 'break', 'class', 'continue', 'def', 'del', 'elif',
'else', 'except', 'finally', 'for', 'from', 'global', 'if', 'import', 'in',
'is', 'lambda', 'not', 'or', 'pass', 'raise', 'return', 'try', 'while',
'with', 'yield', 'None' # 'None' is actually not in Python 2's keyword.kwlist
}
_kw_only_py2 = {'exec', 'print'}
_kw_only_py3 = {'False', 'nonlocal', 'True'}
_known_functions = {
'Abs': 'abs',
}
_known_functions_math = {
'acos': 'acos',
'acosh': 'acosh',
'asin': 'asin',
'asinh': 'asinh',
'atan': 'atan',
'atan2': 'atan2',
'atanh': 'atanh',
'ceiling': 'ceil',
'cos': 'cos',
'cosh': 'cosh',
'erf': 'erf',
'erfc': 'erfc',
'exp': 'exp',
'expm1': 'expm1',
'factorial': 'factorial',
'floor': 'floor',
'gamma': 'gamma',
'hypot': 'hypot',
'loggamma': 'lgamma',
'log': 'log',
'log10': 'log10',
'log1p': 'log1p',
'log2': 'log2',
'sin': 'sin',
'sinh': 'sinh',
'Sqrt': 'sqrt',
'tan': 'tan',
'tanh': 'tanh'
} # Not used from ``math``: [copysign isclose isfinite isinf isnan ldexp frexp pow modf
# radians trunc fmod fsum gcd degrees fabs]
_known_constants_math = {
'Exp1': 'e',
'Pi': 'pi',
# Only in python >= 3.5:
# 'Infinity': 'inf',
# 'NaN': 'nan'
}
def _print_known_func(self, expr):
known = self.known_functions[expr.__class__.__name__]
return '{name}({args})'.format(name=self._module_format(known),
args=', '.join(map(self._print, expr.args)))
def _print_known_const(self, expr):
known = self.known_constants[expr.__class__.__name__]
return self._module_format(known)
class PythonCodePrinter(CodePrinter):
printmethod = "_pythoncode"
language = "Python"
standard = "python3"
reserved_words = _kw_py2and3.union(_kw_only_py3)
modules = None # initialized to a set in __init__
tab = ' '
_kf = dict(chain(
_known_functions.items(),
[(k, 'math.' + v) for k, v in _known_functions_math.items()]
))
_kc = {k: 'math.'+v for k, v in _known_constants_math.items()}
_operators = {'and': 'and', 'or': 'or', 'not': 'not'}
_default_settings = dict(
CodePrinter._default_settings,
user_functions={},
precision=17,
inline=True,
fully_qualified_modules=True
)
def __init__(self, settings=None):
super(PythonCodePrinter, self).__init__(settings)
self.module_imports = defaultdict(set)
self.known_functions = dict(self._kf, **(settings or {}).get(
'user_functions', {}))
self.known_constants = dict(self._kc, **(settings or {}).get(
'user_constants', {}))
def _declare_number_const(self, name, value):
return "%s = %s" % (name, value)
def _module_format(self, fqn, register=True):
parts = fqn.split('.')
if register and len(parts) > 1:
self.module_imports['.'.join(parts[:-1])].add(parts[-1])
if self._settings['fully_qualified_modules']:
return fqn
else:
return fqn.split('(')[0].split('[')[0].split('.')[-1]
def _format_code(self, lines):
return lines
def _get_comment(self, text):
return " # {0}".format(text)
def _print_NaN(self, expr):
return "float('nan')"
def _print_Infinity(self, expr):
return "float('inf')"
def _print_Mod(self, expr):
PREC = precedence(expr)
return ('{0} % {1}'.format(*map(lambda x: self.parenthesize(x, PREC), expr.args)))
def _print_Piecewise(self, expr):
result = []
i = 0
for arg in expr.args:
e = arg.expr
c = arg.cond
result.append('((')
result.append(self._print(e))
result.append(') if (')
result.append(self._print(c))
result.append(') else (')
i += 1
result = result[:-1]
result.append(') else None)')
result.append(')'*(2*i - 2))
return ''.join(result)
def _print_ITE(self, expr):
from sympy.functions.elementary.piecewise import Piecewise
return self._print(expr.rewrite(Piecewise))
def _print_Sum(self, expr):
loops = (
'for {i} in range({a}, {b}+1)'.format(
i=self._print(i),
a=self._print(a),
b=self._print(b))
for i, a, b in expr.limits)
return '(builtins.sum({function} {loops}))'.format(
function=self._print(expr.function),
loops=' '.join(loops))
def _print_ImaginaryUnit(self, expr):
return '1j'
def _print_MatrixBase(self, expr):
name = expr.__class__.__name__
func = self.known_functions.get(name, name)
return "%s(%s)" % (func, self._print(expr.tolist()))
_print_SparseMatrix = \
_print_MutableSparseMatrix = \
_print_ImmutableSparseMatrix = \
_print_Matrix = \
_print_DenseMatrix = \
_print_MutableDenseMatrix = \
_print_ImmutableMatrix = \
_print_ImmutableDenseMatrix = \
lambda self, expr: self._print_MatrixBase(expr)
for k in PythonCodePrinter._kf:
setattr(PythonCodePrinter, '_print_%s' % k, _print_known_func)
for k in _known_constants_math:
setattr(PythonCodePrinter, '_print_%s' % k, _print_known_const)
def pycode(expr, **settings):
return PythonCodePrinter(settings).doprint(expr)
_not_in_mpmath = 'log1p log2'.split()
_in_mpmath = [(k, v) for k, v in _known_functions_math.items() if k not in _not_in_mpmath]
_known_functions_mpmath = dict(_in_mpmath)
_known_constants_mpmath = {
'Pi': 'pi'
}
class MpmathPrinter(PythonCodePrinter):
"""
Lambda printer for mpmath which maintains precision for floats
"""
printmethod = "_mpmathcode"
_kf = dict(chain(
_known_functions.items(),
[(k, 'mpmath.' + v) for k, v in _known_functions_mpmath.items()]
))
def _print_Integer(self, e):
return '%s(%d)' % (self._module_format('mpmath.mpf'), e)
def _print_Float(self, e):
# XXX: This does not handle setting mpmath.mp.dps. It is assumed that
# the caller of the lambdified function will have set it to sufficient
# precision to match the Floats in the expression.
# Remove 'mpz' if gmpy is installed.
args = str(tuple(map(int, e._mpf_)))
return '{func}({args})'.format(func=self._module_format('mpmath.mpf'), args=args)
def _print_uppergamma(self,e): #printer for the uppergamma function
return "{0}({1}, {2}, {3})".format(
self._module_format('mpmath.gammainc'), self._print(e.args[0]), self._print(e.args[1]),
self._module_format('mpmath.inf'))
def _print_lowergamma(self,e): #printer for the lowergamma functioin
return "{0}({1}, 0, {2})".format(
self._module_format('mpmath.gammainc'), self._print(e.args[0]), self._print(e.args[1]))
def _print_log2(self, e):
return '{0}({1})/{0}(2)'.format(
self._module_format('mpmath.log'), self._print(e.args[0]))
def _print_log1p(self, e):
return '{0}({1}+1)'.format(
self._module_format('mpmath.log'), self._print(e.args[0]))
for k in MpmathPrinter._kf:
setattr(MpmathPrinter, '_print_%s' % k, _print_known_func)
for k in _known_constants_mpmath:
setattr(MpmathPrinter, '_print_%s' % k, _print_known_const)
_not_in_numpy = 'erf erfc factorial gamma lgamma'.split()
_in_numpy = [(k, v) for k, v in _known_functions_math.items() if k not in _not_in_numpy]
_known_functions_numpy = dict(_in_numpy, **{
'acos': 'arccos',
'acosh': 'arccosh',
'asin': 'arcsin',
'asinh': 'arcsinh',
'atan': 'arctan',
'atan2': 'arctan2',
'atanh': 'arctanh',
'exp2': 'exp2',
})
class NumPyPrinter(PythonCodePrinter):
"""
Numpy printer which handles vectorized piecewise functions,
logical operators, etc.
"""
printmethod = "_numpycode"
_kf = dict(chain(
PythonCodePrinter._kf.items(),
[(k, 'numpy.' + v) for k, v in _known_functions_numpy.items()]
))
_kc = {k: 'numpy.'+v for k, v in _known_constants_math.items()}
def _print_seq(self, seq, delimiter=', '):
"General sequence printer: converts to tuple"
# Print tuples here instead of lists because numba supports
# tuples in nopython mode.
return '({},)'.format(delimiter.join(self._print(item) for item in seq))
def _print_MatMul(self, expr):
"Matrix multiplication printer"
return '({0})'.format(').dot('.join(self._print(i) for i in expr.args))
def _print_DotProduct(self, expr):
# DotProduct allows any shape order, but numpy.dot does matrix
# multiplication, so we have to make sure it gets 1 x n by n x 1.
arg1, arg2 = expr.args
if arg1.shape[0] != 1:
arg1 = arg1.T
if arg2.shape[1] != 1:
arg2 = arg2.T
return "%s(%s, %s)" % (self._module_format('numpy.dot'), self._print(arg1), self._print(arg2))
def _print_Piecewise(self, expr):
"Piecewise function printer"
exprs = '[{0}]'.format(','.join(self._print(arg.expr) for arg in expr.args))
conds = '[{0}]'.format(','.join(self._print(arg.cond) for arg in expr.args))
# If [default_value, True] is a (expr, cond) sequence in a Piecewise object
# it will behave the same as passing the 'default' kwarg to select()
# *as long as* it is the last element in expr.args.
# If this is not the case, it may be triggered prematurely.
return '{0}({1}, {2}, default=numpy.nan)'.format(self._module_format('numpy.select'), conds, exprs)
def _print_Relational(self, expr):
"Relational printer for Equality and Unequality"
op = {
'==' :'equal',
'!=' :'not_equal',
'<' :'less',
'<=' :'less_equal',
'>' :'greater',
'>=' :'greater_equal',
}
if expr.rel_op in op:
lhs = self._print(expr.lhs)
rhs = self._print(expr.rhs)
return '{op}({lhs}, {rhs})'.format(op=self._module_format('numpy.'+op[expr.rel_op]),
lhs=lhs, rhs=rhs)
return super(NumPyPrinter, self)._print_Relational(expr)
def _print_And(self, expr):
"Logical And printer"
# We have to override LambdaPrinter because it uses Python 'and' keyword.
# If LambdaPrinter didn't define it, we could use StrPrinter's
# version of the function and add 'logical_and' to NUMPY_TRANSLATIONS.
return '{0}({1})'.format(self._module_format('numpy.logical_and'), ','.join(self._print(i) for i in expr.args))
def _print_Or(self, expr):
"Logical Or printer"
# We have to override LambdaPrinter because it uses Python 'or' keyword.
# If LambdaPrinter didn't define it, we could use StrPrinter's
# version of the function and add 'logical_or' to NUMPY_TRANSLATIONS.
return '{0}({1})'.format(self._module_format('numpy.logical_or'), ','.join(self._print(i) for i in expr.args))
def _print_Not(self, expr):
"Logical Not printer"
# We have to override LambdaPrinter because it uses Python 'not' keyword.
# If LambdaPrinter didn't define it, we would still have to define our
# own because StrPrinter doesn't define it.
return '{0}({1})'.format(self._module_format('numpy.logical_not'), ','.join(self._print(i) for i in expr.args))
def _print_Min(self, expr):
return '{0}(({1}))'.format(self._module_format('numpy.amin'), ','.join(self._print(i) for i in expr.args))
def _print_Max(self, expr):
return '{0}(({1}))'.format(self._module_format('numpy.amax'), ','.join(self._print(i) for i in expr.args))
def _print_Pow(self, expr):
if expr.exp == 0.5:
return '{0}({1})'.format(self._module_format('numpy.sqrt'), self._print(expr.base))
else:
return super(NumPyPrinter, self)._print_Pow(expr)
def _print_arg(self, expr):
return "%s(%s)" % (self._module_format('numpy.angle'), self._print(expr.args[0]))
def _print_im(self, expr):
return "%s(%s)" % (self._module_format('numpy.imag', self._print(expr.args[0])))
def _print_Mod(self, expr):
return "%s(%s)" % (self._module_format('numpy.mod'), ', '.join(map(self._print, expr.args)))
def _print_re(self, expr):
return "%s(%s)" % (self._module_format('numpy.real'), self._print(expr.args[0]))
def _print_MatrixBase(self, expr):
func = self.known_functions.get(expr.__class__.__name__, None)
if func is None:
func = self._module_format('numpy.array')
return "%s(%s)" % (func, self._print(expr.tolist()))
for k in NumPyPrinter._kf:
setattr(NumPyPrinter, '_print_%s' % k, _print_known_func)
for k in NumPyPrinter._kc:
setattr(NumPyPrinter, '_print_%s' % k, _print_known_const)
_known_functions_scipy_special = {
'erf': 'erf',
'erfc': 'erfc',
'gamma': 'gamma',
'loggamma': 'gammaln'
}
_known_constants_scipy_constants = {
'GoldenRatio': 'golden_ratio'
}
class SciPyPrinter(NumPyPrinter):
_kf = dict(chain(
NumPyPrinter._kf.items(),
[(k, 'scipy.special.' + v) for k, v in _known_functions_scipy_special.items()]
))
_kc = {k: 'scipy.constants.' + v for k, v in _known_constants_scipy_constants.items()}
def _print_SparseMatrix(self, expr):
i, j, data = [], [], []
for (r, c), v in expr._smat.items():
i.append(r)
j.append(c)
data.append(v)
return "{name}({data}, ({i}, {j}), shape={shape})".format(
name=self._module_format('scipy.sparse.coo_matrix'),
data=data, i=i, j=j, shape=expr.shape
)
_print_ImmutableSparseMatrix = _print_SparseMatrix
for k in SciPyPrinter._kf:
setattr(SciPyPrinter, '_print_%s' % k, _print_known_func)
for k in SciPyPrinter._kc:
setattr(SciPyPrinter, '_print_%s' % k, _print_known_const)
class SymPyPrinter(PythonCodePrinter):
_kf = dict([(k, 'sympy.' + v) for k, v in chain(
_known_functions.items(),
_known_functions_math.items()
)])
def _print_Function(self, expr):
mod = expr.func.__module__ or ''
return '%s(%s)' % (self._module_format(mod + ('.' if mod else '') + expr.func.__name__),
', '.join(map(self._print, expr.args)))
| 2.28125 | 2 |
arguments_setting.py | Projectoy/ml_framework | 0 | 1605 | <gh_stars>0
import argparse, os
class ArgumentManager:
def __init__(self, model_list):
self.model_list = model_list
self.args = self.get_input_arguments()
self.validate_arguments()
def get_input_arguments(self):
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument("--configuration", "-c", required=True, help="the path of a configuration file(json type)")
parser.add_argument("--model", "-m", required=True, help="the model to process")
parser.add_argument("--task", "-t", required=True, help="training/testing")
return parser.parse_args()
def validate_arguments(self):
self.validate_configuration_path()
self.validate_model()
self.validate_task()
def validate_task(self):
task = self.args.task
assert task == "training" or task == "testing", "task should be training or testing"
def validate_model(self):
model = self.args.model
assert model in self.model_list, "model is not in the prepared model list"
def validate_configuration_path(self):
config_path = self.args.configuration
assert os.path.exists(config_path), "configuration path is inappropriate (not found file)"
def get_configuraiton_file_path(self):
return self.args.configuration
def get_model_type(self):
return self.args.model
def get_task_type(self):
return self.args.task | 2.921875 | 3 |
fileHandler.py | Omer-Sella/ldpc | 0 | 1606 | # -*- coding: utf-8 -*-
"""
Created on Thu Nov 28 12:10:11 2019
@author: Omer
"""
## File handler
## This file was initially intended purely to generate the matrices for the near earth code found in: https://public.ccsds.org/Pubs/131x1o2e2s.pdf
## The values from the above pdf were copied manually to a txt file, and it is the purpose of this file to parse it.
## The emphasis here is on correctness, I currently do not see a reason to generalise this file, since matrices will be saved in either json or some matrix friendly format.
import numpy as np
from scipy.linalg import circulant
#import matplotlib.pyplot as plt
import scipy.io
import common
import hashlib
import os
projectDir = os.environ.get('LDPC')
if projectDir == None:
import pathlib
projectDir = pathlib.Path(__file__).parent.absolute()
## <NAME>: added on 01/12/2020, need to make sure this doesn't break anything.
import sys
sys.path.insert(1, projectDir)
FILE_HANDLER_INT_DATA_TYPE = np.int32
GENERAL_CODE_MATRIX_DATA_TYPE = np.int32
NIBBLE_CONVERTER = np.array([8, 4, 2, 1], dtype = GENERAL_CODE_MATRIX_DATA_TYPE)
def nibbleToHex(inputArray):
n = NIBBLE_CONVERTER.dot(inputArray)
if n == 10:
h = 'A'
elif n== 11:
h = 'B'
elif n== 12:
h = 'C'
elif n== 13:
h = 'D'
elif n== 14:
h = 'E'
elif n== 15:
h = 'F'
else:
h = str(n)
return h
def binaryArraytoHex(inputArray):
d1 = len(inputArray)
assert (d1 % 4 == 0)
outputArray = np.zeros(d1//4, dtype = str)
outputString = ''
for j in range(d1//4):
nibble = inputArray[4 * j : 4 * j + 4]
h = nibbleToHex(nibble)
outputArray[j] = h
outputString = outputString + h
return outputArray, outputString
def hexStringToBinaryArray(hexString):
outputBinary = np.array([], dtype = GENERAL_CODE_MATRIX_DATA_TYPE)
for i in hexString:
if i == '0':
nibble = np.array([0,0,0,0], dtype = GENERAL_CODE_MATRIX_DATA_TYPE)
elif i == '1':
nibble = np.array([0,0,0,1], dtype = GENERAL_CODE_MATRIX_DATA_TYPE)
elif i == '2':
nibble = np.array([0,0,1,0], dtype = GENERAL_CODE_MATRIX_DATA_TYPE)
elif i == '3':
nibble = np.array([0,0,1,1], dtype = GENERAL_CODE_MATRIX_DATA_TYPE)
elif i == '4':
nibble = np.array([0,1,0,0], dtype = GENERAL_CODE_MATRIX_DATA_TYPE)
elif i == '5':
nibble = np.array([0,1,0,1], dtype = GENERAL_CODE_MATRIX_DATA_TYPE)
elif i == '6':
nibble = np.array([0,1,1,0], dtype = GENERAL_CODE_MATRIX_DATA_TYPE)
elif i == '7':
nibble = np.array([0,1,1,1], dtype = GENERAL_CODE_MATRIX_DATA_TYPE)
elif i == '8':
nibble = np.array([1,0,0,0], dtype = GENERAL_CODE_MATRIX_DATA_TYPE)
elif i == '9':
nibble = np.array([1,0,0,1], dtype = GENERAL_CODE_MATRIX_DATA_TYPE)
elif i == 'A':
nibble = np.array([1,0,1,0], dtype = GENERAL_CODE_MATRIX_DATA_TYPE)
elif i == 'B':
nibble = np.array([1,0,1,1], dtype = GENERAL_CODE_MATRIX_DATA_TYPE)
elif i == 'C':
nibble = np.array([1,1,0,0], dtype = GENERAL_CODE_MATRIX_DATA_TYPE)
elif i == 'D':
nibble = np.array([1,1,0,1], dtype = GENERAL_CODE_MATRIX_DATA_TYPE)
elif i == 'E':
nibble = np.array([1,1,1,0], dtype = GENERAL_CODE_MATRIX_DATA_TYPE)
elif i == 'F':
nibble = np.array([1,1,1,1], dtype = GENERAL_CODE_MATRIX_DATA_TYPE)
else:
#print('Error, 0-9 or A-F')
pass
nibble = np.array([], dtype = GENERAL_CODE_MATRIX_DATA_TYPE)
outputBinary = np.hstack((outputBinary, nibble))
return outputBinary
def hexToCirculant(hexStr, circulantSize):
binaryArray = hexStringToBinaryArray(hexStr)
if len(binaryArray) < circulantSize:
binaryArray = np.hstack(np.zeros(circulantSize-len(binaryArray), dtype = GENERAL_CODE_MATRIX_DATA_TYPE))
else:
binaryArray = binaryArray[1:]
circulantMatrix = circulant(binaryArray)
circulantMatrix = circulantMatrix.T
return circulantMatrix
def hotLocationsToCirculant(locationList, circulantSize):
generatingVector = np.zeros(circulantSize, dtype = GENERAL_CODE_MATRIX_DATA_TYPE)
generatingVector[locationList] = 1
newCirculant = circulant(generatingVector)
newCirculant = newCirculant.T
return newCirculant
def readMatrixFromFile(fileName, dim0, dim1, circulantSize, isRow = True, isHex = True, isGenerator = True ):
# This function assumes that each line in the file contains the non zero locations of the first row of a circulant.
# Each line in the file then defines a circulant, and the order in which they are defined is top to bottom left to right, i.e.:
# line 0 defines circulant 0,0
with open(fileName) as fid:
lines = fid.readlines()
if isGenerator:
for i in range((dim0 // circulantSize) ):
bLeft = hexToCirculant(lines[2 * i], circulantSize)
bRight = hexToCirculant(lines[2 * i + 1], circulantSize)
newBlock = np.hstack((bLeft, bRight))
if i == 0:
accumulatedBlock = newBlock
else:
accumulatedBlock = np.vstack((accumulatedBlock, newBlock))
newMatrix = np.hstack((np.eye(dim0, dtype = GENERAL_CODE_MATRIX_DATA_TYPE), accumulatedBlock))
else:
for i in range((dim1 // circulantSize)):
locationList1 = list(lines[ i].rstrip('\n').split(','))
locationList1 = list(map(int, locationList1))
upBlock = hotLocationsToCirculant(locationList1, circulantSize)
if i == 0:
accumulatedUpBlock1 = upBlock
else:
accumulatedUpBlock1 = np.hstack((accumulatedUpBlock1, upBlock))
for i in range((dim1 // circulantSize)):
locationList = list(lines[(dim1 // circulantSize) + i].rstrip('\n').split(','))
locationList = list(map(int, locationList))
newBlock = hotLocationsToCirculant(locationList, circulantSize)
if i == 0:
accumulatedBlock2 = newBlock
else:
accumulatedBlock2 = np.hstack((accumulatedBlock2, newBlock))
newMatrix = np.vstack((accumulatedUpBlock1, accumulatedBlock2))
return newMatrix
def binaryMatrixToHexString(binaryMatrix, circulantSize):
leftPadding = np.array(4 - (circulantSize % 4))
m,n = binaryMatrix.shape
#print(m)
#print(n)
assert( m % circulantSize == 0)
assert (n % circulantSize == 0)
M = m // circulantSize
N = n // circulantSize
hexName = ''
for r in range(M):
for k in range(N):
nextLine = np.hstack((leftPadding, binaryMatrix[ r * circulantSize , k * circulantSize : (k + 1) * circulantSize]))
hexArray, hexString = binaryArraytoHex(nextLine)
hexName = hexName + hexString
return hexName
def saveCodeInstance(parityMatrix, circulantSize, codewordSize, evaluationData = None, path = None, evaluationTime = 0, numberOfNonZero = 0, fileName = None):
print("*** in saveCodeInstance ...")
m, n = parityMatrix.shape
M = m // circulantSize
N = n // circulantSize
if fileName == None:
fileName = binaryMatrixToHexString(parityMatrix, circulantSize)
fileNameSHA224 = str(circulantSize) + '_' + str(M) + '_' + str(N) + '_' + str(hashlib.sha224(str(fileName).encode('utf-8')).hexdigest())
fileNameWithPath = path + fileNameSHA224
else:
fileNameWithPath = path + fileName
print("*** " + fileName)
workspaceDict = {}
workspaceDict['parityMatrix'] = parityMatrix
workspaceDict['fileName'] = fileName
if evaluationData != None:
scatterSNR, scatterBER, scatterITR, snrAxis, averageSnrAxis, berData, averageNumberOfIterations = evaluationData.getStatsV2()
workspaceDict['snrData'] = scatterSNR
workspaceDict['berData'] = scatterBER
workspaceDict['itrData'] = scatterITR
workspaceDict['averageSnrAxis'] = averageSnrAxis
workspaceDict['averageNumberOfIterations'] = averageNumberOfIterations
workspaceDict['evaluationTime'] = evaluationTime
workspaceDict['nonZero'] = numberOfNonZero
scipy.io.savemat((fileNameWithPath + '.mat'), workspaceDict)
#evaluationData.plotStats(codewordSize, fileNameWithPath)
print("*** Finishing saveCodeInstance !")
return fileName
def testFileHandler():
nearEarthGenerator = readMatrixFromFile(projectDir + '/codeMatrices/nearEarthGenerator.txt', 7154, 8176, 511, True, True, True)
nearEarthParity = readMatrixFromFile(projectDir + '/codeMatrices/nearEarthParity.txt', 1022, 8176, 511, True, False, False)
return 'OK'
def plotResults(path, makeMat = False):
i = 10
evaluationFaildAt = np.zeros(4, dtype = FILE_HANDLER_INT_DATA_TYPE)
evalTimes = []
numberOfIterationsAtHigh = []
for root, dirs, files in os.walk(path):
for file in files:
if str(file).endswith('.mat'):
i = i + 1
mat = scipy.io.loadmat(str(os.path.join(root, file)))
snrAxis = mat['snrAxis']
snrActual = mat['averageSnrAxis']
if len(snrAxis) < 3:
evaluationFaildAt[len(snrAxis)] = evaluationFaildAt[len(snrAxis)] + 1
berAxis = mat['berData']
if ('evaluationTime' in mat.keys()):
evalTimes.append(mat['evaluationTime'])
averageNumberOfIterations = mat['averageNumberOfIterations']
numberOfIterationsAtHigh.append(averageNumberOfIterations[-1])
common.plotSNRvsBER(snrActual, berAxis, fileName = None, inputLabel = '', figureNumber = i, figureName = str(file))
else:
pass
return evalTimes, evaluationFaildAt, numberOfIterationsAtHigh
#plt.imshow(nearEarthParity)
#nearEarthParity = readMatrixFromFile('/home/oss22/swift/swift/codeMatrices/nearEarthParity.txt', 1022, 8176, 511, True, False, False)
#import networkx as nx
#from networkx.algorithms import bipartite
#B = nx.Graph()
#B.add_nodes_from(range(1022), bipartite=0)
#B.add_nodes_from(range(1022, 7156 + 1022), bipartite=1)
# Add edges only between nodes of opposite node sets
#for i in range(8176):
# for j in range(1022):
# if nearEarthParity[j,i] != 0:
# B.add_edges_from([(j, 7156 + i)])
#X, Y = bipartite.sets(B)
#pos = dict()
#pos.update( (n, (1, i)) for i, n in enumerate(X) )
#pos.update( (n, (2, i)) for i, n in enumerate(Y) )
#nx.draw(B, pos=pos)
#plt.show()
| 2.390625 | 2 |
stage/configuration/test_amazon_s3_origin.py | Sentienz/datacollector-tests | 0 | 1607 | <gh_stars>0
import logging
import pytest
from streamsets.testframework.markers import aws, sdc_min_version
from streamsets.testframework.utils import get_random_string
logger = logging.getLogger(__name__)
S3_SANDBOX_PREFIX = 'sandbox'
LOG_FIELD_MAPPING = [{'fieldPath': '/date', 'group': 1},
{'fieldPath': '/time', 'group': 2},
{'fieldPath': '/timehalf', 'group': 3},
{'fieldPath': '/info', 'group': 4},
{'fieldPath': '/file', 'group': 5},
{'fieldPath': '/message', 'group': 6}]
REGULAR_EXPRESSION = r'(\S+) (\S+) (\S+) (\S+) (\S+) (.*)'
# log to be written int the file on s3
data_format_content = {
'COMMON_LOG_FORMAT': '127.0.0.1 - frank [10/Oct/2000:13:55:36 -0700] '
'"GET /apache.gif HTTP/1.0" 200 232',
'LOG4J': '200 [main] DEBUG org.StreamSets.Log4j unknown - This is sample log message',
'APACHE_ERROR_LOG_FORMAT': '[Wed Oct 11 14:32:52 2000] [error] [client 127.0.0.1] client '
'denied by server configuration:/export/home/live/ap/htdocs/test',
'COMBINED_LOG_FORMAT': '127.0.0.1 - frank [10/Oct/2000:13:55:36 -0700] "GET /apache.gif'
' HTTP/1.0" 200 2326 "http://www.example.com/strt.html" "Mozilla/4.08'
' [en] (Win98; I ;Nav)"',
'APACHE_CUSTOM_LOG_FORMAT': '10.185.248.71 - - [09/Jan/2015:9:12:06 +0000] "GET '
'/inventoryServic/inventory/purchaseItem?userId=20253471&itemId=23434300 '
'HTTP/1.1" 500 17 ',
'CEF': '10.217.31.247 CEF:0|Citrix|NetScaler|NS10.0|APPFW|APPFW_STARTURL|6|src=10.217.253.78 '
'spt=53743 method=GET request=http://vpx247.example.net/FFC/login.html msg=Disallow Illegal URL.',
'LEEF': 'LEEF: 2.0|Trend Micro|Deep Security Agent|<DSA version>|4000030|cat=Anti-Malware '
'name=HEU_AEGIS_CRYPT desc=HEU_AEGIS_CRYPT sev=6 cn1=241 msg=Realtime',
'REGEX': '2019-04-30 08:23:53 AM [INFO] [streamsets.sdk.sdc_api] Pipeline Filewriterpipeline53'}
# data to verify the output of amazon s3 origin.
get_data_to_verify_output = {
'LOG4J': {'severity': 'DEBUG', 'relativetime': '200', 'thread': 'main', 'category': 'org.StreamSets.Log4j',
'ndc': 'unknown', 'message': 'This is sample log message'},
'COMMON_LOG_FORMAT': {'request': '/apache.gif', 'auth': 'frank', 'ident': '-', 'response': '200', 'bytes':
'232', 'clientip': '127.0.0.1', 'verb': 'GET', 'httpversion': '1.0', 'rawrequest': None,
'timestamp': '10/Oct/2000:13:55:36 -0700'},
'APACHE_ERROR_LOG_FORMAT': {'message': 'client denied by server configuration:/export/home/live/ap/htdocs/'
'test', 'timestamp': 'Wed Oct 11 14:32:52 2000', 'loglevel': 'error',
'clientip': '127.0.0.1'},
'COMBINED_LOG_FORMAT': {'request': '/apache.gif', 'agent': '"Mozilla/4.08 [en] (Win98; I ;Nav)"', 'auth':
'frank', 'ident': '-', 'verb': 'GET', 'referrer': '"http://www.example.com/strt.'
'html"', 'response': '200', 'bytes': '2326', 'clientip': '127.0.0.1',
'httpversion': '1.0', 'rawrequest': None, 'timestamp': '10/Oct/2000:13:55:36 -0700'},
'APACHE_CUSTOM_LOG_FORMAT': {'remoteUser': '-', 'requestTime': '09/Jan/2015:9:12:06 +0000', 'request': 'GET '
'/inventoryServic/inventory/purchaseItem?userId=20253471&itemId=23434300 HTTP/1.1',
'logName': '-', 'remoteHost': '10.185.248.71', 'bytesSent': '17', 'status': '500'},
'CEF': {'severity': '6', 'product': 'NetScaler', 'extensions': {'msg': 'Disallow Illegal URL.', 'request':
'http://vpx247.example.net/FFC/login.html', 'method': 'GET', 'src': '10.217.253.78', 'spt': '53743'},
'signature': 'APPFW', 'vendor': 'Citrix', 'cefVersion': 0, 'name': 'APPFW_STARTURL',
'version': 'NS10.0'},
'GROK': {'request': '/inventoryServic/inventory/purchaseItem?userId=20253471&itemId=23434300', 'auth': '-',
'ident': '-', 'response': '500', 'bytes': '17', 'clientip': '10.185.248.71', 'verb': 'GET',
'httpversion': '1.1', 'rawrequest': None, 'timestamp': '09/Jan/2015:9:12:06 +0000'},
'LEEF': {'eventId': '4000030', 'product': 'Deep Security Agent', 'extensions': {'cat': 'Realtime'},
'leefVersion': 2.0, 'vendor': 'Trend Micro', 'version': '<DSA version>'},
'REGEX': {'/time': '08:23:53', '/date': '2019-04-30', '/timehalf': 'AM',
'/info': '[INFO]', '/message': 'Pipeline Filewriterpipeline53', '/file': '[streamsets.sdk.sdc_api]'}}
@pytest.mark.skip('Not yet implemented')
def test_configuration_access_key_id(sdc_builder, sdc_executor):
pass
@pytest.mark.skip('Not yet implemented')
def test_configuration_bucket(sdc_builder, sdc_executor):
pass
@pytest.mark.skip('Not yet implemented')
def test_configuration_connection_timeout(sdc_builder, sdc_executor):
pass
@pytest.mark.parametrize('task', ['CREATE_NEW_OBJECT'])
@pytest.mark.skip('Not yet implemented')
def test_configuration_content(sdc_builder, sdc_executor, task):
pass
@pytest.mark.parametrize('task', ['COPY_OBJECT'])
@pytest.mark.parametrize('delete_original_object', [False, True])
@pytest.mark.skip('Not yet implemented')
def test_configuration_delete_original_object(sdc_builder, sdc_executor, task, delete_original_object):
pass
@pytest.mark.parametrize('region', ['OTHER'])
@pytest.mark.skip('Not yet implemented')
def test_configuration_endpoint(sdc_builder, sdc_executor, region):
pass
@pytest.mark.parametrize('task', ['COPY_OBJECT'])
@pytest.mark.skip('Not yet implemented')
def test_configuration_new_object_path(sdc_builder, sdc_executor, task):
pass
@pytest.mark.skip('Not yet implemented')
def test_configuration_object(sdc_builder, sdc_executor):
pass
@pytest.mark.parametrize('on_record_error', ['DISCARD', 'STOP_PIPELINE', 'TO_ERROR'])
@pytest.mark.skip('Not yet implemented')
def test_configuration_on_record_error(sdc_builder, sdc_executor, on_record_error):
pass
@pytest.mark.skip('Not yet implemented')
def test_configuration_preconditions(sdc_builder, sdc_executor):
pass
@pytest.mark.parametrize('use_proxy', [True])
@pytest.mark.skip('Not yet implemented')
def test_configuration_proxy_host(sdc_builder, sdc_executor, use_proxy):
pass
@pytest.mark.parametrize('use_proxy', [True])
@pytest.mark.skip('Not yet implemented')
def test_configuration_proxy_password(sdc_builder, sdc_executor, use_proxy):
pass
@pytest.mark.parametrize('use_proxy', [True])
@pytest.mark.skip('Not yet implemented')
def test_configuration_proxy_port(sdc_builder, sdc_executor, use_proxy):
pass
@pytest.mark.parametrize('use_proxy', [True])
@pytest.mark.skip('Not yet implemented')
def test_configuration_proxy_user(sdc_builder, sdc_executor, use_proxy):
pass
@pytest.mark.parametrize('region', ['AP_NORTHEAST_1', 'AP_NORTHEAST_2', 'AP_NORTHEAST_3', 'AP_SOUTHEAST_1', 'AP_SOUTHEAST_2', 'AP_SOUTH_1', 'CA_CENTRAL_1', 'CN_NORTHWEST_1', 'CN_NORTH_1', 'EU_CENTRAL_1', 'EU_WEST_1', 'EU_WEST_2', 'EU_WEST_3', 'OTHER', 'SA_EAST_1', 'US_EAST_1', 'US_EAST_2', 'US_GOV_WEST_1', 'US_WEST_1', 'US_WEST_2'])
@pytest.mark.skip('Not yet implemented')
def test_configuration_region(sdc_builder, sdc_executor, region):
pass
@pytest.mark.skip('Not yet implemented')
def test_configuration_required_fields(sdc_builder, sdc_executor):
pass
@pytest.mark.skip('Not yet implemented')
def test_configuration_retry_count(sdc_builder, sdc_executor):
pass
@pytest.mark.skip('Not yet implemented')
def test_configuration_secret_access_key(sdc_builder, sdc_executor):
pass
@pytest.mark.skip('Not yet implemented')
def test_configuration_socket_timeout(sdc_builder, sdc_executor):
pass
@pytest.mark.parametrize('task', ['CHANGE_EXISTING_OBJECT'])
@pytest.mark.skip('Not yet implemented')
def test_configuration_tags(sdc_builder, sdc_executor, task):
pass
@pytest.mark.parametrize('task', ['CHANGE_EXISTING_OBJECT', 'COPY_OBJECT', 'CREATE_NEW_OBJECT'])
@pytest.mark.skip('Not yet implemented')
def test_configuration_task(sdc_builder, sdc_executor, task):
pass
@pytest.mark.parametrize('use_proxy', [False, True])
@pytest.mark.skip('Not yet implemented')
def test_configuration_use_proxy(sdc_builder, sdc_executor, use_proxy):
pass
@aws('s3')
@pytest.mark.parametrize('data_format', ['LOG'])
@pytest.mark.parametrize('log_format', ['COMMON_LOG_FORMAT', 'APACHE_ERROR_LOG_FORMAT', 'COMBINED_LOG_FORMAT',
'APACHE_CUSTOM_LOG_FORMAT', 'REGEX', 'GROK', 'LOG4J', 'CEF', 'LEEF'])
def test_configurations_data_format_log(sdc_executor, sdc_builder, aws, data_format, log_format):
"""Check whether S3 origin can parse different log format or not. A log file is being created in s3 bucket
mentioned below .S3 origin reads the log file and parse the same.
Pipeline for the same-
s3_origin >> trash
s3_origin >= pipeline_finisher_executor
"""
if log_format == 'GROK':
file_content = data_format_content['APACHE_CUSTOM_LOG_FORMAT']
else:
file_content = data_format_content[log_format]
client = aws.s3
s3_key = f'{S3_SANDBOX_PREFIX}/{get_random_string()}'
attributes = {'bucket': aws.s3_bucket_name,
'prefix_pattern': f'{s3_key}/*',
'number_of_threads': 1,
'read_order': 'LEXICOGRAPHICAL',
'data_format': data_format,
'log_format': log_format,
'custom_log_format': '%h %l %u [%t] "%r" %>s %b',
'regular_expression': REGULAR_EXPRESSION,
'field_path_to_regex_group_mapping': LOG_FIELD_MAPPING
}
pipeline = get_aws_origin_to_trash_pipeline(sdc_builder, attributes, aws)
s3_origin = pipeline.origin_stage
try:
client.put_object(Bucket=aws.s3_bucket_name, Key=f'{s3_key}/{get_random_string()}.log', Body=file_content)
output_records = execute_pipeline_and_get_output(sdc_executor, s3_origin, pipeline)
assert output_records[0].field == get_data_to_verify_output[log_format]
finally:
if sdc_executor.get_pipeline_status(pipeline).response.json().get('status') == 'RUNNING':
sdc_executor.stop_pipeline(pipeline)
# cleaning up s3 bucket
delete_aws_objects(client, aws, s3_key)
def get_aws_origin_to_trash_pipeline(sdc_builder, attributes, aws):
# Build pipeline.
builder = sdc_builder.get_pipeline_builder()
builder.add_error_stage('Discard')
s3_origin = builder.add_stage('Amazon S3', type='origin')
s3_origin.set_attributes(**attributes)
trash = builder.add_stage('Trash')
pipeline_finisher_executor = builder.add_stage('Pipeline Finisher Executor')
pipeline_finisher_executor.set_attributes(stage_record_preconditions=["${record:eventType() == 'no-more-data'}"])
s3_origin >> trash
s3_origin >= pipeline_finisher_executor
s3_origin_pipeline = builder.build().configure_for_environment(aws)
s3_origin_pipeline.configuration['shouldRetry'] = False
return s3_origin_pipeline
def delete_aws_objects(client, aws, s3_key):
# Clean up S3.
delete_keys = {'Objects': [{'Key': k['Key']}
for k in
client.list_objects_v2(Bucket=aws.s3_bucket_name, Prefix=s3_key)['Contents']]}
client.delete_objects(Bucket=aws.s3_bucket_name, Delete=delete_keys)
def execute_pipeline_and_get_output(sdc_executor, s3_origin, pipeline):
sdc_executor.add_pipeline(pipeline)
snapshot = sdc_executor.capture_snapshot(pipeline, start_pipeline=True).snapshot
output_records = snapshot[s3_origin].output
return output_records
| 1.929688 | 2 |
model_building/svr_experiment_configuration.py | eubr-atmosphere/a-MLLibrary | 3 | 1608 | <gh_stars>1-10
"""
Copyright 2019 <NAME>
Copyright 2019 <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import sklearn.svm as svm
import model_building.experiment_configuration as ec
class SVRExperimentConfiguration(ec.ExperimentConfiguration):
"""
Class representing a single experiment configuration for linear regression
Attributes
----------
_linear_regression : LinearRegression
The actual scikt object which performs the linear regression
Methods
-------
_train()
Performs the actual building of the linear model
compute_estimations()
Compute the estimated values for a give set of data
"""
def __init__(self, campaign_configuration, hyperparameters, regression_inputs, prefix):
"""
campaign_configuration: dict of dict:
The set of options specified by the user though command line and campaign configuration files
hyperparameters: dictionary
The set of hyperparameters of this experiment configuration
regression_inputs: RegressionInputs
The input of the regression problem to be solved
"""
super().__init__(campaign_configuration, hyperparameters, regression_inputs, prefix)
self.technique = ec.Technique.SVR
self._regressor = svm.SVR(C=self._hyperparameters['C'], epsilon=self._hyperparameters['epsilon'],
gamma=self._hyperparameters['gamma'], kernel=self._hyperparameters['kernel'],
degree=self._hyperparameters['degree'])
def _compute_signature(self, prefix):
"""
Compute the signature associated with this experiment configuration
"""
signature = prefix.copy()
signature.append("C_" + str(self._hyperparameters['C']))
signature.append("epsilon_" + str(self._hyperparameters['epsilon']))
signature.append("gamma_" + str(self._hyperparameters['gamma']))
signature.append("kernel_" + str(self._hyperparameters['kernel']))
signature.append("degree_" + str(self._hyperparameters['degree']))
return signature
def _train(self):
"""
Build the model with the experiment configuration represented by this object
"""
self._logger.debug("Building model for %s", self._signature)
assert self._regression_inputs
xdata, ydata = self._regression_inputs.get_xy_data(self._regression_inputs.inputs_split["training"])
self._regressor.fit(xdata, ydata)
self._logger.debug("Model built")
# for idx, col_name in enumerate(self._regression_inputs.x_columns):
# self._logger.debug("The coefficient for %s is %f", col_name, self._linear_regression.coef_[idx])
def compute_estimations(self, rows):
"""
Compute the estimations and the MAPE for runs in rows
"""
xdata, _ = self._regression_inputs.get_xy_data(rows)
return self._regressor.predict(xdata)
| 2.359375 | 2 |
src/scs_host/sys/host_gpi.py | south-coast-science/scs_host_rpi | 0 | 1609 | <reponame>south-coast-science/scs_host_rpi<filename>src/scs_host/sys/host_gpi.py<gh_stars>0
"""
Created on 12 May 2017
@author: <NAME> (<EMAIL>)
"""
from scs_host.sys.host_gpio import HostGPIO
# --------------------------------------------------------------------------------------------------------------------
# noinspection PyUnusedLocal,PyAbstractClass
class HostGPI(HostGPIO):
"""
classdocs
"""
# ----------------------------------------------------------------------------------------------------------------
def __init__(self, pin):
raise NotImplementedError()
# ----------------------------------------------------------------------------------------------------------------
@property
def state(self):
raise NotImplementedError()
def wait(self, edge):
raise NotImplementedError()
# ----------------------------------------------------------------------------------------------------------------
def __str__(self, *args, **kwargs):
raise NotImplementedError()
| 1.882813 | 2 |
30-39/35. final_class/final_class.py | dcragusa/PythonMorsels | 1 | 1610 | <filename>30-39/35. final_class/final_class.py
class Unsubclassable:
def __init_subclass__(cls, **kwargs):
raise TypeError('Unacceptable base type')
def prevent_subclassing():
raise TypeError('Unacceptable base type')
def final_class(cls):
setattr(cls, '__init_subclass__', prevent_subclassing)
return cls
class UnsubclassableType(type):
def __new__(cls, name, bases, dct):
c = super().__new__(cls, name, bases, dct)
setattr(c, '__init_subclass__', prevent_subclassing)
return c
| 2.8125 | 3 |
benchmark/AMS/HIGGSTES/TP.py | victor-estrade/SystGradDescent | 2 | 1611 | <gh_stars>1-10
#!/usr/bin/env python
# coding: utf-8
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from __future__ import unicode_literals
# Command line :
# python -m benchmark.VAR.GG.TP
import os
import logging
from config import SEED
from config import _ERROR
from config import _TRUTH
import numpy as np
import pandas as pd
from visual.misc import set_plot_config
set_plot_config()
from utils.log import set_logger
from utils.log import flush
from utils.log import print_line
from utils.model import get_model
from utils.model import get_optimizer
from utils.model import train_or_load_neural_net
from utils.evaluation import evaluate_summary_computer
from utils.images import gather_images
from visual.misc import plot_params
from problem.higgs import HiggsConfigTesOnly as Config
from problem.higgs import get_generators_torch
from problem.higgs import GeneratorCPU
from problem.higgs import GeneratorTorch
from problem.higgs import HiggsNLL as NLLComputer
from model.tangent_prop import TangentPropClassifier
from archi.classic import L4 as ARCHI
from ...my_argparser import TP_parse_args
from collections import OrderedDict
from .common import measurement
DATA_NAME = 'HIGGSTES'
BENCHMARK_NAME = 'VAR-'+DATA_NAME
N_ITER = 30
class TrainGenerator:
def __init__(self, data_generator, cuda=False):
self.data_generator = data_generator
if cuda:
self.data_generator.cuda()
else:
self.data_generator.cpu()
self.mu = self.tensor(Config.CALIBRATED.mu, requires_grad=True)
self.tes = self.tensor(Config.CALIBRATED.tes, requires_grad=True)
self.jes = self.tensor(Config.CALIBRATED.jes, requires_grad=True)
self.les = self.tensor(Config.CALIBRATED.les, requires_grad=True)
self.params = (self.tes, self.jes, self.tes, self.mu)
self.nuisance_params = OrderedDict([
('tes', self.tes),
('jes', self.jes),
('les', self.les),
])
def generate(self, n_samples=None):
X, y, w = self.data_generator.diff_generate(*self.params, n_samples=n_samples)
return X, y, w
def reset(self):
self.data_generator.reset()
def tensor(self, data, requires_grad=False, dtype=None):
return self.data_generator.tensor(data, requires_grad=requires_grad, dtype=dtype)
def build_model(args, i_cv):
args.net = ARCHI(n_in=29, n_out=2, n_unit=args.n_unit)
args.optimizer = get_optimizer(args)
model = get_model(args, TangentPropClassifier)
model.set_info(DATA_NAME, BENCHMARK_NAME, i_cv)
return model
# =====================================================================
# MAIN
# =====================================================================
def main():
# BASIC SETUP
logger = set_logger()
args = TP_parse_args(main_description="Training launcher for INFERNO on GG benchmark")
logger.info(args)
flush(logger)
# INFO
model = build_model(args, -1)
os.makedirs(model.results_directory, exist_ok=True)
# RUN
logger.info(f'Running runs [{args.start_cv},{args.end_cv}[')
results = [run(args, i_cv) for i_cv in range(args.start_cv, args.end_cv)]
results = pd.concat(results, ignore_index=True)
# EVALUATION
results.to_csv(os.path.join(model.results_directory, 'threshold.csv'))
print(results)
print("DONE !")
def run(args, i_cv):
logger = logging.getLogger()
print_line()
logger.info('Running iter n°{}'.format(i_cv))
print_line()
# LOAD/GENERATE DATA
logger.info('Set up data generator')
config = Config()
seed = SEED + i_cv * 5
train_generator, valid_generator, test_generator = get_generators_torch(seed, cuda=args.cuda)
train_generator = TrainGenerator(train_generator, cuda=args.cuda)
valid_generator = GeneratorCPU(valid_generator)
test_generator = GeneratorCPU(test_generator)
# SET MODEL
logger.info('Set up classifier')
model = build_model(args, i_cv)
os.makedirs(model.results_path, exist_ok=True)
flush(logger)
# TRAINING / LOADING
train_or_load_neural_net(model, train_generator, retrain=args.retrain)
# MEASUREMENT
results = measurement(model, i_cv, config, valid_generator, test_generator)
print(results)
return results
if __name__ == '__main__':
main()
| 1.84375 | 2 |
papermill/tests/test_adl.py | dmartinpro/papermill | 0 | 1612 | import unittest
from ..adl import ADL
import six
if six.PY3:
from unittest.mock import Mock, MagicMock
else:
from mock import Mock, MagicMock
class ADLTest(unittest.TestCase):
"""
Tests for `ADL`
"""
def setUp(self):
self.ls = Mock(return_value=["foo", "bar", "baz"])
self.fakeFile = MagicMock()
self.fakeFile.__iter__.return_value = [b"a", b"b", b"c"]
self.fakeFile.__enter__.return_value = self.fakeFile
self.open = Mock(return_value=self.fakeFile)
self.fakeAdapter = Mock(open=self.open, ls=self.ls)
self.adl = ADL()
self.adl._create_adapter = Mock(return_value=self.fakeAdapter)
def test_split_url_raises_exception_on_invalid_url(self):
with self.assertRaises(Exception) as context:
ADL._split_url("this_is_not_a_valid_url")
self.assertTrue("Invalid ADL url 'this_is_not_a_valid_url'" in str(context.exception))
def test_split_url_splits_valid_url(self):
(store_name, path) = ADL._split_url("adl://foo.azuredatalakestore.net/bar/baz")
self.assertEqual(store_name, "foo")
self.assertEqual(path, "bar/baz")
def test_listdir_calls_ls_on_adl_adapter(self):
self.assertEqual(
self.adl.listdir("adl://foo_store.azuredatalakestore.net/path/to/file"),
["foo", "bar", "baz"],
)
self.ls.assert_called_once_with("path/to/file")
def test_read_opens_and_reads_file(self):
self.assertEquals(
self.adl.read("adl://foo_store.azuredatalakestore.net/path/to/file"), ["a", "b", "c"]
)
self.fakeFile.__iter__.assert_called_once_with()
def test_write_opens_file_and_writes_to_it(self):
self.adl.write("hello world", "adl://foo_store.azuredatalakestore.net/path/to/file")
self.fakeFile.write.assert_called_once_with(b"hello world")
| 2.9375 | 3 |
users/views.py | AnvarKhan/django-python | 1 | 1613 | <reponame>AnvarKhan/django-python
from django.views.generic import CreateView
from django.urls import reverse_lazy
from .forms import CustomUserCreationForm
class SignUpView(CreateView):
form_class = CustomUserCreationForm
success_url = reverse_lazy('login')
template_name = 'signup.html'
| 2.15625 | 2 |
st3/package_util/compat/typing.py | Thom1729/package_util | 18 | 1614 | <filename>st3/package_util/compat/typing.py
try:
from typing import * # noqa: F401, F403
except ImportError:
from .typing_stubs import * # type: ignore # noqa: F401, F403
| 1.304688 | 1 |
stanza/models/common/dropout.py | rasimuvaikas/stanza | 3,633 | 1615 | <reponame>rasimuvaikas/stanza
import torch
import torch.nn as nn
class WordDropout(nn.Module):
""" A word dropout layer that's designed for embedded inputs (e.g., any inputs to an LSTM layer).
Given a batch of embedded inputs, this layer randomly set some of them to be a replacement state.
Note that this layer assumes the last dimension of the input to be the hidden dimension of a unit.
"""
def __init__(self, dropprob):
super().__init__()
self.dropprob = dropprob
def forward(self, x, replacement=None):
if not self.training or self.dropprob == 0:
return x
masksize = [y for y in x.size()]
masksize[-1] = 1
dropmask = torch.rand(*masksize, device=x.device) < self.dropprob
res = x.masked_fill(dropmask, 0)
if replacement is not None:
res = res + dropmask.float() * replacement
return res
def extra_repr(self):
return 'p={}'.format(self.dropprob)
class LockedDropout(nn.Module):
"""
A variant of dropout layer that consistently drops out the same parameters over time. Also known as the variational dropout.
This implementation was modified from the LockedDropout implementation in the flair library (https://github.com/zalandoresearch/flair).
"""
def __init__(self, dropprob, batch_first=True):
super().__init__()
self.dropprob = dropprob
self.batch_first = batch_first
def forward(self, x):
if not self.training or self.dropprob == 0:
return x
if not self.batch_first:
m = x.new_empty(1, x.size(1), x.size(2), requires_grad=False).bernoulli_(1 - self.dropprob)
else:
m = x.new_empty(x.size(0), 1, x.size(2), requires_grad=False).bernoulli_(1 - self.dropprob)
mask = m.div(1 - self.dropprob).expand_as(x)
return mask * x
def extra_repr(self):
return 'p={}'.format(self.dropprob)
class SequenceUnitDropout(nn.Module):
""" A unit dropout layer that's designed for input of sequence units (e.g., word sequence, char sequence, etc.).
Given a sequence of unit indices, this layer randomly set some of them to be a replacement id (usually set to be <UNK>).
"""
def __init__(self, dropprob, replacement_id):
super().__init__()
self.dropprob = dropprob
self.replacement_id = replacement_id
def forward(self, x):
""" :param: x must be a LongTensor of unit indices. """
if not self.training or self.dropprob == 0:
return x
masksize = [y for y in x.size()]
dropmask = torch.rand(*masksize, device=x.device) < self.dropprob
res = x.masked_fill(dropmask, self.replacement_id)
return res
def extra_repr(self):
return 'p={}, replacement_id={}'.format(self.dropprob, self.replacement_id)
| 2.875 | 3 |
Day01-15/code/Day15/pdf2.py | bdfd/Python_Zero2Hero_DS | 3 | 1616 | <gh_stars>1-10
"""
读取PDF文件
Version: 0.1
Author: BDFD
Date: 2018-03-26
"""
from PyPDF2 import PdfFileReader
with open('./res/Python课程大纲.pdf', 'rb') as f:
reader = PdfFileReader(f, strict=False)
print(reader.numPages)
if reader.isEncrypted:
reader.decrypt('')
current_page = reader.getPage(5)
print(current_page)
print(current_page.extractText())
| 3 | 3 |
qt__pyqt__pyside__pyqode/qt__class_tree__parse_and_print__recursively__from__doc_qt_io/gui.py | DazEB2/SimplePyScripts | 0 | 1617 | <reponame>DazEB2/SimplePyScripts<gh_stars>0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
from PyQt5 import QtWidgets as qtw
from PyQt5.QtTest import QTest
import time
import requests
from bs4 import BeautifulSoup
from console import get_inherited_children, ROOT_URL
class MainWindow(qtw.QMainWindow):
def __init__(self):
super().__init__()
self.setWindowTitle('qt__class_tree__parse_and_print__recursively__from__doc_qt_io')
self.tree = qtw.QTreeWidget()
self.tree.setAlternatingRowColors(True)
self.tree.setHeaderLabel('NAME')
self.setCentralWidget(self.tree)
self.number_total_class = 0
def _fill_root(self, node: qtw.QTreeWidgetItem, url: str, global_number: int, indent_level=0):
if global_number > 0 and self.number_total_class >= global_number:
return
QTest.qWait(1000)
indent = ' ' * indent_level
rs = requests.get(url)
root = BeautifulSoup(rs.content, 'html.parser')
name_class = root.select_one('.context > .title').text.split()[0]
inherited_children = get_inherited_children(url, root)
number_inherited_children = len(inherited_children)
if number_inherited_children > 0:
name_class = '{} ({})'.format(name_class, number_inherited_children)
print(indent + name_class + ':')
else:
print(indent + name_class)
item = qtw.QTreeWidgetItem([name_class])
if not node:
self.tree.addTopLevelItem(item)
else:
node.addChild(item)
node.setExpanded(True)
self.number_total_class += 1
for name, url in inherited_children:
self._fill_root(item, url, global_number, indent_level + 1)
def fill_tree(self, global_number=-1):
self.number_total_class = 0
self.tree.clear()
t = time.clock()
self._fill_root(None, ROOT_URL, global_number)
qtw.QMessageBox.information(
self,
'Complete!',
'Items: {}.\nElapsed: {:.3f} sec'.format(self.number_total_class, time.clock() - t)
)
def closeEvent(self, e):
quit()
if __name__ == '__main__':
app = qtw.QApplication([])
w = MainWindow()
w.resize(500, 500)
w.show()
w.fill_tree()
app.exec()
| 2.4375 | 2 |
common/OpTestASM.py | kyle-ibm/op-test | 0 | 1618 | #!/usr/bin/env python3
# encoding=utf8
# IBM_PROLOG_BEGIN_TAG
# This is an automatically generated prolog.
#
# $Source: op-test-framework/common/OpTestASM.py $
#
# OpenPOWER Automated Test Project
#
# Contributors Listed Below - COPYRIGHT 2017
# [+] International Business Machines Corp.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
#
# IBM_PROLOG_END_TAG
'''
OpTestASM: Advanced System Management (FSP Web UI)
--------------------------------------------------
This class can contains common functions which are useful for
FSP ASM Web page. Some functionality is only accessible through
the FSP Web UI (such as progress codes), so we scrape it.
'''
import time
import subprocess
import os
import pexpect
import sys
import subprocess
from .OpTestConstants import OpTestConstants as BMC_CONST
from .OpTestError import OpTestError
import http.cookiejar
import urllib.request
import urllib.parse
import urllib.error
import re
import ssl
class OpTestASM:
def __init__(self, i_fspIP, i_fspUser, i_fspPasswd):
self.host_name = i_fspIP
self.user_name = i_fspUser
self.password = <PASSWORD>
self.url = "https://%s/cgi-bin/cgi?" % self.host_name
self.cj = http.cookiejar.CookieJar()
context = ssl.create_default_context()
context.check_hostname = False
context.verify_mode = ssl.CERT_NONE
opener = urllib.request.build_opener(urllib.request.HTTPSHandler(context=context))
opener.addheaders = [('User-agent', 'LTCTest')]
opener.add_handler(urllib.request.HTTPCookieProcessor(self.cj))
urllib.request.install_opener(opener)
self.setforms()
def setforms(self):
if "FW860" in self.ver():
self.hrdwr = 'p8'
self.frms = {'pwr': '59',
'dbg': '78',
'immpwroff': '32'}
else:
self.hrdwr = 'p7'
self.frms = {'pwr': '60',
'dbg': '79',
'immpwroff': '33'}
def getcsrf(self, form):
while True:
try:
myurl = urllib.request.urlopen(self.url+form, timeout=10)
except urllib.error.URLError:
time.sleep(2)
continue
break
out = myurl.read().decode("utf-8")
if 'CSRF_TOKEN' in out:
return re.findall('CSRF_TOKEN.*value=\'(.*)\'', out)[0]
else:
return '0'
def getpage(self, form):
myurl = urllib.request.urlopen(self.url+form, timeout=60)
return myurl.read().decode("utf-8")
def submit(self, form, param):
param['CSRF_TOKEN'] = self.getcsrf(form)
data = urllib.parse.urlencode(param).encode("utf-8")
req = urllib.request.Request(self.url+form, data)
return urllib.request.urlopen(req)
def login(self):
if not len(self.cj) == 0:
return True
param = {'user': self.user_name,
'password': <PASSWORD>,
'login': 'Log in',
'lang': '0',
'CSRF_TOKEN': ''}
form = "form=2"
resp = self.submit(form, param)
count = 0
while count < 2:
if not len(self.cj) == 0:
break
# the login can quietly fail because the FSP has 'too many users' logged in,
# even though it actually doesn't. let's check to see if this is the case
# by trying a request.
if "Too many users" in self.getpage("form=2"):
raise OpTestError("FSP reports 'Too many users', FSP needs power cycle")
time.sleep(10)
self.submit(form, param)
msg = "Login failed with user:{0} and password:{1}".format(
self.user_name, self.password)
print(msg)
count += 1
if count == 2:
print(msg)
return False
return True
def logout(self):
param = {'submit': 'Log out',
'CSRF_TOKEN': ''}
form = "form=1"
self.submit(form, param)
def ver(self):
form = "form=1"
return self.getpage(form)
def execommand(self, cmd):
if not self.login():
raise OpTestError("Failed to login ASM page")
param = {'form': '16',
'exe': 'Execute',
'CSRF_TOKEN': '',
'cmd': cmd}
form = "form=16&frm=0"
self.submit(form, param)
def disablefirewall(self):
if not self.login():
raise OpTestError("Failed to login ASM page")
self.execommand('iptables -F')
self.logout()
def clearlogs(self):
if not self.login():
raise OpTestError("Failed to login ASM page")
param = {'form': '30',
'clear': "Clear all error/event log entries",
'CSRF_TOKEN': ''}
form = "form=30"
self.submit(form, param)
self.logout()
def powerstat(self):
form = "form=%s" % self.frms['pwr']
return self.getpage(form)
def start_debugvtty_session(self, partitionId='0', sessionId='0',
sessionTimeout='600'):
if not self.login():
raise OpTestError("Failed to login ASM page")
param = {'form': '81',
'p': partitionId,
's': sessionId,
't': sessionTimeout,
'Save settings': 'Save settings',
'CSRF_TOKEN': ''}
form = "form=81"
self.submit(form, param)
self.logout()
def enable_err_injct_policy(self):
if not self.login():
raise OpTestError("Failed to login ASM page")
param = {'form': '56',
'p': '1',
'submit': 'Save settings',
'CSRF_TOKEN': ''}
form = "form=56"
self.submit(form, param)
self.logout()
| 1.640625 | 2 |
test/test_storage.py | jrabasco/PyPasser | 0 | 1619 | #!/usr/bin/python3.4
__author__ = "<NAME>"
import sys
import os
sys.path.append("..")
import unittest
from modules import storage
from modules.service import Service
from modules.database import Database
class TestStorage(unittest.TestCase):
def setUp(self):
self.service = Service()
self.database = Database()
open("test.service", "w+").close()
open("test.db", "w+").close()
def test_write_read_service(self):
self.service.service_name = "Hello"
self.service.username = "This"
self.service.password = "<PASSWORD>"
storage.write("test", self.service, "test.service")
service2 = Service()
storage.read("test", service2, "test.service")
self.assertEqual(service2.service_name, self.service.service_name)
self.assertEqual(service2.username, self.service.username)
self.assertEqual(service2.password, self.service.password)
def test_write_read_database(self):
self.database.add_service(Service())
self.database.add_service(Service())
self.database.name = "Hey"
storage.write("test", self.database, "test.db")
database2 = Database()
storage.read("test", database2, "test.db")
self.assertEqual(database2.name, self.database.name)
for i in range(len(self.database.services)):
self.assertEqual(database2.services[i].service_name, self.database.services[i].service_name)
self.assertEqual(database2.services[i].username, self.database.services[i].username)
self.assertEqual(database2.services[i].password, self.database.services[i].password)
def tearDown(self):
os.remove(os.getcwd() + "/test.service")
os.remove(os.getcwd() + "/test.db")
if __name__ == "__main__":
unittest.main() | 3.078125 | 3 |
virt/ansible-latest/lib/python2.7/site-packages/ansible/plugins/lookup/template.py | lakhlaifi/RedHat-Ansible | 1 | 1620 | <filename>virt/ansible-latest/lib/python2.7/site-packages/ansible/plugins/lookup/template.py
# Copyright: (c) 2012, <NAME> <<EMAIL>>
# Copyright: (c) 2012-17, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = """
lookup: template
author: <NAME> <<EMAIL>>
version_added: "0.9"
short_description: retrieve contents of file after templating with Jinja2
description:
- Returns a list of strings; for each template in the list of templates you pass in, returns a string containing the results of processing that template.
options:
_terms:
description: list of files to template
convert_data:
type: bool
description: whether to convert YAML into data. If False, strings that are YAML will be left untouched.
variable_start_string:
description: The string marking the beginning of a print statement.
default: '{{'
version_added: '2.8'
type: str
variable_end_string:
description: The string marking the end of a print statement.
default: '}}'
version_added: '2.8'
type: str
"""
EXAMPLES = """
- name: show templating results
debug:
msg: "{{ lookup('template', './some_template.j2') }}"
- name: show templating results with different variable start and end string
debug:
msg: "{{ lookup('template', './some_template.j2', variable_start_string='[%', variable_end_string='%]') }}"
"""
RETURN = """
_raw:
description: file(s) content after templating
"""
import os
from ansible.errors import AnsibleError
from ansible.plugins.lookup import LookupBase
from ansible.module_utils._text import to_bytes, to_text
from ansible.template import generate_ansible_template_vars
from ansible.utils.display import Display
display = Display()
class LookupModule(LookupBase):
def run(self, terms, variables, **kwargs):
convert_data_p = kwargs.get('convert_data', True)
lookup_template_vars = kwargs.get('template_vars', {})
ret = []
variable_start_string = kwargs.get('variable_start_string', None)
variable_end_string = kwargs.get('variable_end_string', None)
for term in terms:
display.debug("File lookup term: %s" % term)
lookupfile = self.find_file_in_search_path(variables, 'templates', term)
display.vvvv("File lookup using %s as file" % lookupfile)
if lookupfile:
b_template_data, show_data = self._loader._get_file_contents(lookupfile)
template_data = to_text(b_template_data, errors='surrogate_or_strict')
# set jinja2 internal search path for includes
searchpath = variables.get('ansible_search_path', [])
if searchpath:
# our search paths aren't actually the proper ones for jinja includes.
# We want to search into the 'templates' subdir of each search path in
# addition to our original search paths.
newsearchpath = []
for p in searchpath:
newsearchpath.append(os.path.join(p, 'templates'))
newsearchpath.append(p)
searchpath = newsearchpath
searchpath.insert(0, os.path.dirname(lookupfile))
self._templar.environment.loader.searchpath = searchpath
if variable_start_string is not None:
self._templar.environment.variable_start_string = variable_start_string
if variable_end_string is not None:
self._templar.environment.variable_end_string = variable_end_string
# The template will have access to all existing variables,
# plus some added by ansible (e.g., template_{path,mtime}),
# plus anything passed to the lookup with the template_vars=
# argument.
vars = variables.copy()
vars.update(generate_ansible_template_vars(lookupfile))
vars.update(lookup_template_vars)
self._templar.set_available_variables(vars)
# do the templating
res = self._templar.template(template_data, preserve_trailing_newlines=True,
convert_data=convert_data_p, escape_backslashes=False)
ret.append(res)
else:
raise AnsibleError("the template file %s could not be found for the lookup" % term)
return ret
| 2.546875 | 3 |
setup.py | ripiuk/fant_sizer | 0 | 1621 | from setuptools import setup, find_packages
from os.path import join, dirname
setup(
name="fant_sizer",
version="0.7",
author="<NAME>",
author_email="<EMAIL>",
description="fant_sizer command-line file-information",
url="https://github.com/ripiuk/fant_sizer",
keywords="file command-line information size tool recursively",
license="MIT",
classifiers=[
'Topic :: Utilities',
'Environment :: Console',
'Natural Language :: English',
'License :: OSI Approved :: MIT License',
'Intended Audience :: Developers',
'Intended Audience :: Information Technology',
'Development Status :: 5 - Production/Stable',
'Programming Language :: Python :: 3.6'
],
packages=find_packages(),
long_description=open(join(dirname(__file__), "README.rst")).read(),
entry_points={
"console_scripts":
['fant_sizer = fant_sizer.fant_sizer:_main'],
},
)
| 1.445313 | 1 |
2018/Round 1A/A.py | elvisyjlin/google-code-jam | 0 | 1622 | def solve():
# Read input
R, C, H, V = map(int, input().split())
choco = []
for _ in range(R):
choco.append([0] * C)
choco_row, choco_col = [0]*R, [0]*C
num_choco = 0
for i in range(R):
row = input()
for j in range(C):
if row[j] == '@':
choco_col[j] += 1
choco[i][j] = 1
choco_row[i] = row.count('@')
num_choco += choco_row[i]
# Find H and V cuts
if num_choco == 0:
return 'POSSIBLE'
H_idx, V_idx = [], []
flag = True
if num_choco%(H+1)==0 and num_choco%(V+1)==0:
num_choco_h = num_choco/(H+1)
num_choco_v = num_choco/(V+1)
accum = 0
for i, r in enumerate(choco_row):
accum += r
if accum == num_choco_h:
accum = 0
H_idx.append(i)
elif accum > num_choco_h:
flag = False
break
if not flag:
return 'IMPOSSIBLE'
accum = 0
for i, c in enumerate(choco_col):
accum += c
if accum == num_choco_v:
accum = 0
V_idx.append(i)
elif accum > num_choco_v:
flag = False
break
if not flag:
return 'IMPOSSIBLE'
else:
return 'IMPOSSIBLE'
# Check each piece
r_from = 0
num_prev = None
for r in H_idx:
c_from = 0
for c in V_idx:
num = 0
for i in range(r_from, r+1):
for j in range(c_from, c+1):
num += choco[i][j]
if num_prev is None:
num_prev = num
elif num_prev != num:
return 'IMPOSSIBLE'
c_from = c+1
r_from = r+1
return 'POSSIBLE'
if __name__ == '__main__':
T = int(input())
for t in range(T):
print('Case #{}: {}'.format(t+1, solve()))
| 3.09375 | 3 |
desktop/libs/liboozie/src/liboozie/submittion_tests.py | vinaymundada27/Hue | 1 | 1623 | <reponame>vinaymundada27/Hue
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from django.contrib.auth.models import User
from nose.plugins.attrib import attr
from nose.tools import assert_equal, assert_true, assert_not_equal
from hadoop import cluster, pseudo_hdfs4
from hadoop.conf import HDFS_CLUSTERS, MR_CLUSTERS, YARN_CLUSTERS
from liboozie.submittion import Submission
from oozie.tests import OozieMockBase
from desktop.lib.test_utils import clear_sys_caches
from desktop.lib.django_test_util import make_logged_in_client
LOG = logging.getLogger(__name__)
@attr('requires_hadoop')
def test_copy_files():
cluster = pseudo_hdfs4.shared_cluster()
try:
c = make_logged_in_client()
user = User.objects.get(username='test')
prefix = '/tmp/test_copy_files'
if cluster.fs.exists(prefix):
cluster.fs.rmtree(prefix)
# Jars in various locations
deployment_dir = '%s/workspace' % prefix
external_deployment_dir = '%s/deployment' % prefix
jar_1 = '%s/udf1.jar' % prefix
jar_2 = '%s/lib/udf2.jar' % prefix
jar_3 = '%s/udf3.jar' % deployment_dir
jar_4 = '%s/lib/udf4.jar' % deployment_dir # Never move
cluster.fs.mkdir(prefix)
cluster.fs.create(jar_1)
cluster.fs.create(jar_2)
cluster.fs.create(jar_3)
cluster.fs.create(jar_4)
class MockNode():
def __init__(self, jar_path):
self.jar_path = jar_path
class MockJob():
def __init__(self):
self.node_list = [
MockNode(jar_1),
MockNode(jar_2),
MockNode(jar_3),
MockNode(jar_4),
]
def get_application_filename(self):
return 'workflow.xml'
submission = Submission(user, job=MockJob(), fs=cluster.fs, jt=cluster.jt)
submission._copy_files(deployment_dir, "<xml>My XML</xml>")
submission._copy_files(external_deployment_dir, "<xml>My XML</xml>")
# All sources still there
assert_true(cluster.fs.exists(jar_1))
assert_true(cluster.fs.exists(jar_2))
assert_true(cluster.fs.exists(jar_3))
assert_true(cluster.fs.exists(jar_4))
deployment_dir = deployment_dir + '/lib'
external_deployment_dir = external_deployment_dir + '/lib'
list_dir_workspace = cluster.fs.listdir(deployment_dir)
list_dir_deployement = cluster.fs.listdir(external_deployment_dir)
# All destinations there
assert_true(cluster.fs.exists(deployment_dir + '/udf1.jar'), list_dir_workspace)
assert_true(cluster.fs.exists(deployment_dir + '/udf2.jar'), list_dir_workspace)
assert_true(cluster.fs.exists(deployment_dir + '/udf3.jar'), list_dir_workspace)
assert_true(cluster.fs.exists(deployment_dir + '/udf4.jar'), list_dir_workspace)
assert_true(cluster.fs.exists(external_deployment_dir + '/udf1.jar'), list_dir_deployement)
assert_true(cluster.fs.exists(external_deployment_dir + '/udf2.jar'), list_dir_deployement)
assert_true(cluster.fs.exists(external_deployment_dir + '/udf3.jar'), list_dir_deployement)
assert_true(cluster.fs.exists(external_deployment_dir + '/udf4.jar'), list_dir_deployement)
stats_udf1 = cluster.fs.stats(deployment_dir + '/udf1.jar')
stats_udf2 = cluster.fs.stats(deployment_dir + '/udf2.jar')
stats_udf3 = cluster.fs.stats(deployment_dir + '/udf3.jar')
stats_udf4 = cluster.fs.stats(deployment_dir + '/udf4.jar')
submission._copy_files('%s/workspace' % prefix, "<xml>My XML</xml>")
assert_not_equal(stats_udf1['fileId'], cluster.fs.stats(deployment_dir + '/udf1.jar')['fileId'])
assert_not_equal(stats_udf2['fileId'], cluster.fs.stats(deployment_dir + '/udf2.jar')['fileId'])
assert_not_equal(stats_udf3['fileId'], cluster.fs.stats(deployment_dir + '/udf3.jar')['fileId'])
assert_equal(stats_udf4['fileId'], cluster.fs.stats(deployment_dir + '/udf4.jar')['fileId'])
finally:
try:
cluster.fs.rmtree(prefix)
except:
LOG.exception('failed to remove %s' % prefix)
class MockFs():
def __init__(self, logical_name=None):
self.fs_defaultfs = 'hdfs://curacao:8020'
self.logical_name = logical_name if logical_name else ''
class MockJt():
def __init__(self, logical_name=None):
self.logical_name = logical_name if logical_name else ''
class TestSubmission(OozieMockBase):
def test_get_properties(self):
submission = Submission(self.user, fs=MockFs())
assert_equal({}, submission.properties)
submission._update_properties('curacao:8032', '/deployment_dir')
assert_equal({
'jobTracker': 'curacao:8032',
'nameNode': 'hdfs://curacao:8020'
}, submission.properties)
def test_get_logical_properties(self):
submission = Submission(self.user, fs=MockFs(logical_name='fsname'), jt=MockJt(logical_name='jtname'))
assert_equal({}, submission.properties)
submission._update_properties('curacao:8032', '/deployment_dir')
assert_equal({
'jobTracker': 'jtname',
'nameNode': 'fsname'
}, submission.properties)
def test_update_properties(self):
finish = []
finish.append(MR_CLUSTERS.set_for_testing({'default': {}}))
finish.append(MR_CLUSTERS['default'].SUBMIT_TO.set_for_testing(True))
finish.append(YARN_CLUSTERS.set_for_testing({'default': {}}))
finish.append(YARN_CLUSTERS['default'].SUBMIT_TO.set_for_testing(True))
try:
properties = {
'user.name': 'hue',
'test.1': 'http://localhost/test?test1=test&test2=test',
'nameNode': 'hdfs://curacao:8020',
'jobTracker': 'jtaddress'
}
final_properties = properties.copy()
submission = Submission(None, properties=properties, oozie_id='test', fs=MockFs())
assert_equal(properties, submission.properties)
submission._update_properties('jtaddress', 'deployment-directory')
assert_equal(final_properties, submission.properties)
clear_sys_caches()
fs = cluster.get_hdfs()
jt = cluster.get_next_ha_mrcluster()[1]
final_properties = properties.copy()
final_properties.update({
'jobTracker': 'jtaddress',
'nameNode': fs.fs_defaultfs
})
submission = Submission(None, properties=properties, oozie_id='test', fs=fs, jt=jt)
assert_equal(properties, submission.properties)
submission._update_properties('jtaddress', 'deployment-directory')
assert_equal(final_properties, submission.properties)
finish.append(HDFS_CLUSTERS['default'].LOGICAL_NAME.set_for_testing('namenode'))
finish.append(MR_CLUSTERS['default'].LOGICAL_NAME.set_for_testing('jobtracker'))
clear_sys_caches()
fs = cluster.get_hdfs()
jt = cluster.get_next_ha_mrcluster()[1]
final_properties = properties.copy()
final_properties.update({
'jobTracker': 'jobtracker',
'nameNode': 'namenode'
})
submission = Submission(None, properties=properties, oozie_id='test', fs=fs, jt=jt)
assert_equal(properties, submission.properties)
submission._update_properties('jtaddress', 'deployment-directory')
assert_equal(final_properties, submission.properties)
finally:
clear_sys_caches()
for reset in finish:
reset()
def test_get_external_parameters(self):
xml = """
<workflow-app name="Pig" xmlns="uri:oozie:workflow:0.4">
<start to="Pig"/>
<action name="Pig">
<pig>
<job-tracker>${jobTracker}</job-tracker>
<name-node>${nameNode}</name-node>
<prepare>
<delete path="${output}"/>
</prepare>
<script>aggregate.pig</script>
<argument>-param</argument>
<argument>INPUT=${input}</argument>
<argument>-param</argument>
<argument>OUTPUT=${output}</argument>
<configuration>
<property>
<name>mapred.input.format.class</name>
<value>org.apache.hadoop.examples.SleepJob$SleepInputFormat</value>
</property>
</configuration>
</pig>
<ok to="end"/>
<error to="kill"/>
</action>
<kill name="kill">
<message>Action failed, error message[${wf:errorMessage(wf:lastErrorNode())}]</message>
</kill>
<end name="end"/>
</workflow-app>
"""
properties = """
#
# Licensed to the Hue
#
nameNode=hdfs://localhost:8020
jobTracker=localhost:8021
queueName=default
examplesRoot=examples
oozie.use.system.libpath=true
oozie.wf.application.path=${nameNode}/user/${user.name}/${examplesRoot}/apps/pig
"""
parameters = Submission(self.user)._get_external_parameters(xml, properties)
assert_equal({'oozie.use.system.libpath': 'true',
'input': '',
'jobTracker': 'localhost:8021',
'oozie.wf.application.path': '${nameNode}/user/${user.name}/${examplesRoot}/apps/pig',
'examplesRoot': 'examples',
'output': '',
'nameNode': 'hdfs://localhost:8020',
'queueName': 'default'
},
parameters)
| 1.710938 | 2 |
Training/train_baseHD.py | Wenyuan-Vincent-Li/SSL_Seg_GAN | 1 | 1624 | <gh_stars>1-10
import torch.nn as nn
import torch.optim as optim
import torch.utils.data
from Training import functions
from Training.imresize import imresize
import matplotlib.pyplot as plt
from Models.pix2pixHD_base import GANLoss, VGGLoss
from Models.pix2pixHD2 import mask2onehot
class Losses():
def __init__(self, opt):
self.criterionGAN = GANLoss(not opt.no_lsgan)
self.criterionFeat = nn.L1Loss()
if opt.contour:
self.crossEntropy = nn.BCEWithLogitsLoss()
else:
self.crossEntropy = nn.CrossEntropyLoss()
if not opt.no_vgg_loss:
self.criterionVGG = VGGLoss()
def train_single_scale(dataloader, netD, netG, netS, reals, Gs, Ss, in_s, in_s_S, NoiseAmp, NoiseAmpS, opt):
'''
:param netD: currD
:param netG: currG
:param netS: currS
:param reals: a list of image pyramid ## TODO: you can just pass image shape here
:param Gs: list of prev netG
:param Ss: list of prev netS
:param in_s: 0-> all zero [1, 3, 26, 26]
:param NoiseAmp: [] -> [1]
:param opt: config
:return:
'''
loss = Losses(opt)
real = reals[opt.scale_num] # find the current level image xn
opt.nzx = real[0]
opt.nzy = real[1]
# z_opt = 0 ## dummy z_opt
alpha = opt.alpha
# setup optimizer
optimizerD = optim.Adam(netD.parameters(), lr=opt.lr_d, betas=(opt.beta1, 0.999))
optimizerG = optim.Adam(netG.parameters(), lr=opt.lr_g, betas=(opt.beta1, 0.999))
optimizerS = optim.Adam(netS.parameters(), lr=opt.lr_s, betas=(opt.beta1, 0.999))
schedulerD = torch.optim.lr_scheduler.MultiStepLR(optimizer=optimizerD, milestones=[opt.niter * 0.8], gamma=opt.gamma)
schedulerG = torch.optim.lr_scheduler.MultiStepLR(optimizer=optimizerG, milestones=[opt.niter * 0.8], gamma=opt.gamma)
schedulerS = torch.optim.lr_scheduler.MultiStepLR(optimizer=optimizerS, milestones=[opt.niter * 0.8],
gamma=opt.gamma)
errD2plot = []
errG2plot = []
D_real2plot = []
D_fake2plot = []
for epoch in range(opt.niter): # niter = 2000
if Gs == [] and Ss == []:
noise_ = functions.generate_noise([1, opt.nzx, opt.nzy], opt.batchSize) # [None, 1, 32, 32]
noise_ = noise_.expand(opt.batchSize, 3, opt.nzx, opt.nzy)
## Noise_: for generated false samples through generator
else:
noise_ = functions.generate_noise([1, opt.nzx, opt.nzy], opt.batchSize)
for j, data in enumerate(dataloader):
data['image'] = data['image'].to(opt.device)
data['label'] = data['label'].long().to(opt.device)
############################
# (1) Update D network: maximize D(x) + D(G(z))
###########################
# train with real
netD.zero_grad()
pred_real = netD(data['image'], data['label'][:,0:1,...])
loss_D_real = loss.criterionGAN(pred_real, True)
D_x = loss_D_real.item()
# train with fake
if (j == 0) & (epoch == 0): # first iteration training in this level
if Gs == [] and Ss == []:
prev = torch.full([opt.batchSize, opt.nc_z, opt.nzx, opt.nzy], 0, device=opt.device)
in_s = prev # full of 0 [None, 3, 32, 32]
prev_S = torch.full([opt.batchSize, opt.label_nc, opt.nzx, opt.nzy], 0, device=opt.device)
in_s_S = prev_S # full of 0 [None, 4, 32, 32]
mask = data['label'][:,0:1,...]
opt.noise_amp = opt.noise_amp_init
opt.noise_amp_S = opt.noise_amp_init
else:
prev = draw_concat(Gs, data['down_scale_label'], reals, NoiseAmp, in_s, 'generator', opt)
## given a new noise, prev is a image generated by previous Generator with bilinear upsampling [1, 3, 33, 33]
criterion = nn.MSELoss()
RMSE = torch.sqrt(criterion(data['image'], prev))
opt.noise_amp = opt.noise_amp_init * RMSE
prev_S = draw_concat(Ss, data['down_scale_image'], reals, NoiseAmpS, in_s_S, 'segment', opt) ## prob with [None, 4, 32, 32]
onehot_label = mask2onehot(data['label'][:,0:1,...], opt.label_nc)
RMSE_S = torch.sqrt(criterion(onehot_label, prev_S))
# RMSE_S = 0
opt.noise_amp_S = opt.noise_amp_init * RMSE_S
mask = data['label'][:,0:1,...]
else:
prev = draw_concat(Gs, data['down_scale_label'], reals, NoiseAmp, in_s, 'generator', opt)
prev_S = draw_concat(Ss, data['down_scale_image'], reals, NoiseAmpS, in_s_S, 'segment', opt)
mask = data['label'][:,0:1,...]
if Gs == []:
noise = noise_ ## Gausiaan noise for generating image [None, 3, 42, 42]
else:
noise = opt.noise_amp * noise_ + prev ## [None, 3, 43, 43] new noise is equal to the prev generated image plus the gaussian noise.
fake = netG(noise.detach(), prev, mask) # [None, 3, 32, 32] the same size with the input image
# detach() make sure that the gradients don't go to the noise.
# prev:[None, 3, 42, 42] -> [None, 3, 43, 43] first step prev = 0, second step prev = a image generated by previous Generator with bilinaer upsampling
pred_fake = netD(fake.detach(), data['label'][:,0:1,...]) # output shape [1, 1, 16, 16] -> [1, 1, 23, 23]
# print(len(pred_fake), len(pred_fake[0]))
loss_D_fake = loss.criterionGAN(pred_fake, False)
D_G_z = loss_D_fake.item()
# segment_logit, segment_mask = netS(data['image'], mask2onehot(prev_S, opt.label_nc))
# print(data['image'].shape, onehot.shape)
# print(epoch, j)
segment_logit, segment_prob, segment_mask = netS(data['image'], prev_S.detach())
pred_fake_S = netD(data['image'], segment_prob.detach())
loss_D_fake_S = loss.criterionGAN(pred_fake_S, False)
D_S_z = loss_D_fake_S.item()
errD = (loss_D_real + 0.5 * loss_D_fake + 0.5 * loss_D_fake_S) ## Todo: figure out a proper coefficient
errD.backward()
optimizerD.step()
errD2plot.append(errD.detach()) ## errD for each iteration
############################
# (2) Update G network: maximize D(G(z))
###########################
netG.zero_grad()
pred_fake = netD(fake, data['label'][:,0:1,...])
loss_G_GAN = 0.5 * loss.criterionGAN(pred_fake, True)
# GAN feature matching loss
loss_G_GAN_Feat = 0
if not opt.no_ganFeat_loss:
feat_weights = 4.0 / (opt.n_layers_D + 1)
D_weights = 1.0 / opt.num_D
for i in range(opt.num_D):
for j in range(len(pred_fake[i]) - 1):
loss_G_GAN_Feat += D_weights * feat_weights * \
loss.criterionFeat(pred_fake[i][j],
pred_real[i][j].detach()) * opt.lambda_feat
# VGG feature matching loss
loss_G_VGG = 0
if not opt.no_vgg_loss:
loss_G_VGG = loss.criterionVGG(fake, data['image']) * opt.lambda_feat
## reconstruction loss
if alpha != 0: ## alpha = 10 calculate the reconstruction loss
Recloss = nn.MSELoss()
rec_loss = alpha * Recloss(fake, data['image'])
else:
rec_loss = 0
errG = loss_G_GAN + loss_G_GAN_Feat + loss_G_VGG + rec_loss
errG.backward()
optimizerG.step()
############################
# (3) Update S network: maximize D(S(z))
###########################
netS.zero_grad()
pred_fake_S = netD(data['image'], segment_prob)
loss_G_GAN_S = 0.03 * loss.criterionGAN(pred_fake_S, True)
# Segmentation loss
if opt.contour:
loss_G_Seg = loss.crossEntropy(segment_logit, data['label'].float())
else:
loss_G_Seg = loss.crossEntropy(segment_prob, torch.squeeze(data['label'][:,0:1,...], dim =1))
# GAN feature matching loss
loss_G_GAN_Feat_S = 0
if not opt.no_ganFeat_loss:
feat_weights = 4.0 / (opt.n_layers_D + 1)
D_weights = 1.0 / opt.num_D
for i in range(opt.num_D):
for j in range(len(pred_fake_S[i]) - 1):
loss_G_GAN_Feat_S += D_weights * feat_weights * \
loss.criterionFeat(pred_fake_S[i][j],
pred_real[i][j].detach()) * opt.lambda_feat
errS = loss_G_GAN_S + loss_G_GAN_Feat_S + loss_G_Seg
errS.backward()
optimizerS.step()
## for every epoch, do the following:
errG2plot.append(errG.detach()) ## ErrG for each iteration
D_real2plot.append(D_x) ## discriminator loss on real
D_fake2plot.append(D_G_z + D_S_z) ## discriminator loss on fake
if epoch % 25 == 0 or epoch == (opt.niter - 1):
print('scale %d:[%d/%d]' % (opt.scale_num, epoch, opt.niter))
if epoch % 25 == 0 or epoch == (opt.niter - 1):
plt.imsave('%s/fake_sample_%d.png' % (opt.outf, epoch),
functions.convert_image_np(fake.detach()), vmin=0, vmax=1)
plt.imsave('%s/fake_sample_real_%d.png' % (opt.outf, epoch),
functions.convert_image_np(data['image']), vmin=0, vmax=1)
plt.imsave('%s/fake_sample_mask_%d.png' % (opt.outf, epoch),
functions.convert_mask_np(data['label'][:,0:1,...], num_classes= opt.label_nc))
plt.imsave('%s/segmentation_mask_%d.png' % (opt.outf, epoch),
functions.convert_mask_np(segment_mask.detach(), num_classes=opt.label_nc))
schedulerD.step()
schedulerG.step()
schedulerS.step()
functions.save_networks(netG, netD, netS, opt) ## save netG, netD, z_opt, opt is used to parser output path
return in_s, in_s_S, netG, netS
def draw_concat(Gs, masks, reals, NoiseAmp, in_s, mode, opt):
'''
:param Gs: [G0]
:param mask: [down scaled _mask]
:param reals: [image pyramid] only used to represent the image shape
:param NoiseAmp: [1]
:param in_s: all zeros [1, 3, 26, 26]
:param mode: 'rand'
:param opt:
:return:
'''
G_z = in_s[:opt.batchSize, :, :, :] # [None, 3, 26, 26] all zeros, image input for the corest level
if len(Gs) > 0:
if mode == 'generator':
count = 0
for G, mask, real_curr, real_next, noise_amp in zip(Gs, masks, reals, reals[1:], NoiseAmp):
if count == 0:
z = functions.generate_noise([1, real_curr[0], real_curr[1]],
opt.batchSize)
z = z.expand(opt.batchSize, G_z.shape[1], z.shape[2], z.shape[3])
else:
z = functions.generate_noise(
[opt.nc_z, real_curr[0], real_curr[1]], opt.batchSize)
G_z = G_z[:, :, 0:real_curr[0], 0:real_curr[1]] ## G_z [None, 3, 32, 32]
z_in = noise_amp * z + G_z
G_z = G(z_in.detach(), G_z, mask) ## [1, 3, 26, 26] output of previous generator
G_z = imresize(G_z, real_next[1] / real_curr[1], opt)
G_z = G_z[:, :, 0:real_next[0],
0:real_next[1]] ## resize the image to be compatible with current G [1, 3, 33, 33]
count += 1
elif mode == 'segment':
count = 0
for G, mask, real_curr, real_next, noise_amp in zip(Gs, masks, reals, reals[1:], NoiseAmp):
G_z = G_z[:, :, 0:real_curr[0], 0:real_curr[1]] ## G_z [None, 3, 32, 32]
_, G_z, _ = G(mask, G_z) ## [1, 3, 26, 26] output of previous generator
if opt.contour:
G_z = torch.cat((G_z, 1-G_z), 1)
G_z = imresize(G_z, real_next[1] / real_curr[1], opt)
G_z = G_z[:, :, 0:real_next[0],
0:real_next[1]] ## resize the image to be compatible with current G [1, 3, 33, 33]
count += 1
return G_z
| 2.359375 | 2 |
tests/python/unittest/test_tir_pass_inject_double_buffer.py | 0xreza/tvm | 0 | 1625 | <filename>tests/python/unittest/test_tir_pass_inject_double_buffer.py
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm import te
def test_double_buffer():
dtype = 'int64'
n = 100
m = 4
tx = te.thread_axis("threadIdx.x")
ib = tvm.tir.ir_builder.create()
A = ib.pointer("float32", name="A")
C = ib.pointer("float32", name="C")
ib.scope_attr(tx, "thread_extent", 1)
with ib.for_range(0, n) as i:
B = ib.allocate("float32", m, name="B", scope="shared")
with ib.new_scope():
ib.scope_attr(B.asobject(), "double_buffer_scope", 1)
with ib.for_range(0, m) as j:
B[j] = A[i * 4 + j]
with ib.for_range(0, m) as j:
C[j] = B[j] + 1
stmt = ib.get()
stmt = tvm.tir.ir_pass.InjectDoubleBuffer(stmt, 2)
stmt = tvm.tir.ir_pass.Simplify(stmt)
assert isinstance(stmt.body.body, tvm.tir.Allocate)
assert stmt.body.body.extents[0].value == 2
mod = tvm.IRModule({
"db" : tvm.tir.PrimFunc([A.asobject(), C.asobject()], stmt)
})
f = tvm.tir.transform.ThreadSync("shared")(mod)["db"]
count = [0]
def count_sync(op):
if isinstance(op, tvm.tir.Call) and op.name == "tvm_storage_sync":
count[0] += 1
tvm.tir.ir_pass.PostOrderVisit(f.body, count_sync)
assert count[0] == 4
if __name__ == "__main__":
test_double_buffer()
| 2.125 | 2 |
read_sensor.py | shivupoojar/openfaas-pi | 1 | 1626 | import requests
from sense_hat import SenseHat
import smbus
import time
while True:
try:
pressure=0
sense = SenseHat()
pressure = sense.get_pressure()
data = {'pressure':pressure}
print(pressure)
#send http request to sense serverless function with pressure
#data
r=requests.post('http://127.0.0.1:8080/function/sensor',data)
print(r.text)
sense=SenseHat()
sense.show_message(r.text)
except KeyboardInterrupt:
sys.exit()
| 2.734375 | 3 |
trabantsim/prototypes/space_invaders.py | highfestiva/life | 9 | 1627 | #!/usr/bin/env python3
# Space Invadersishkebab.
from trabant import *
# ASCII geometries.
shipascii = r'''
/\
/XXXXXXXX\
v v
'''
invader = r'''
/XXXXXX\
/XXXXXXXX\
XXXXXXXXXX
XX XX XX
\XXXXXXXX/
/XX XX\
/X/ \/ \X\
X/ \X
'''
cam(distance=250)
gravity((0,0,0))
ship = create_ascii_object(shipascii, pos=(0,0,-100), col='#070')
shots = []
invaderspeeds,isi = [(25,0,0), (0,0,-10), (-25,0,0), (0,0,-10)],0
invaders = set()
for y in range(2):
for x in range(8):
invaders.add(create_ascii_object(invader, pos=(x*25-130,0,100-y*20), col=rndvec().abs(), physmesh=True))
for invader in invaders:
invader.vel(invaderspeeds[0])
while loop():
# Steering.
vel = keydir()*50 + tapdir(ship.pos())*4
ship.vel((vel.x,0,0)) # Only move in X.
# Shooting.
is_tap_close = taps() and tapdir(ship.pos()).x < 3
is_shooting = 'Space' in keys() or 'LCtrl' in keys() or is_tap_close
if is_shooting and timeout(0.7, first_hit=True):
shots += [create_sphere(ship.pos()+vec3(0,0,10), vel=(0,0,200), col='#fff')]
sound(sound_bang, shots[-1].pos())
# Run invaders.
if timeout(3, timer='invaders'):
isi = (isi+1)%len(invaderspeeds)
[i.vel(invaderspeeds[isi]) for i in invaders]
# Check collisions, make explosions.
for o in collided_objects():
if o in invaders:
invaders.remove(o)
explode(o.pos(),o.vel(),5)
elif o == ship:
while loop():
pass
o.release()
| 2.59375 | 3 |
model/backbone/xception.py | Shang-XH/BAFTT | 4 | 1628 | import math
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.model_zoo as model_zoo
from model.sync_batchnorm.batchnorm import SynchronizedBatchNorm2d
def fixed_padding(inputs, kernel_size, dilation):
kernel_size_effective = kernel_size + (kernel_size - 1) * (dilation - 1)
pad_total = kernel_size_effective - 1
pad_beg = pad_total // 2
pad_end = pad_total - pad_beg
padded_inputs = F.pad(inputs, (pad_beg, pad_end, pad_beg, pad_end))
return padded_inputs
class SeparableConv2d(nn.Module):
def __init__(self, inplanes, planes, kernel_size=3, stride=1, dilation=1, bias=False, BatchNorm=None):
super(SeparableConv2d, self).__init__()
self.conv1 = nn.Conv2d(inplanes, inplanes, kernel_size, stride, 0, dilation,
groups=inplanes, bias=bias)
self.bn = BatchNorm(inplanes)
self.pointwise = nn.Conv2d(inplanes, planes, 1, 1, 0, 1, 1, bias=bias)
def forward(self, x):
x = fixed_padding(x, self.conv1.kernel_size[0], dilation=self.conv1.dilation[0])
x = self.conv1(x)
x = self.bn(x)
x = self.pointwise(x)
return x
class Block(nn.Module):
def __init__(self, inplanes, planes, reps, stride=1, dilation=1, BatchNorm=None,
start_with_relu=True, grow_first=True, is_last=False):
super(Block, self).__init__()
if planes != inplanes or stride != 1:
self.skip = nn.Conv2d(inplanes, planes, 1, stride=stride, bias=False)
self.skipbn = BatchNorm(planes)
else:
self.skip = None
self.relu = nn.ReLU(inplace=True)
rep = []
filters = inplanes
if grow_first:
rep.append(self.relu)
rep.append(SeparableConv2d(inplanes, planes, 3, 1, dilation, BatchNorm=BatchNorm))
rep.append(BatchNorm(planes))
filters = planes
for i in range(reps - 1):
rep.append(self.relu)
rep.append(SeparableConv2d(filters, filters, 3, 1, dilation, BatchNorm=BatchNorm))
rep.append(BatchNorm(filters))
if not grow_first:
rep.append(self.relu)
rep.append(SeparableConv2d(inplanes, planes, 3, 1, dilation, BatchNorm=BatchNorm))
rep.append(BatchNorm(planes))
if stride != 1:
rep.append(self.relu)
rep.append(SeparableConv2d(planes, planes, 3, 2, BatchNorm=BatchNorm))
rep.append(BatchNorm(planes))
if stride == 1 and is_last:
rep.append(self.relu)
rep.append(SeparableConv2d(planes, planes, 3, 1, BatchNorm=BatchNorm))
rep.append(BatchNorm(planes))
if not start_with_relu:
rep = rep[1:]
self.rep = nn.Sequential(*rep)
def forward(self, inp):
x = self.rep(inp)
if self.skip is not None:
skip = self.skip(inp)
skip = self.skipbn(skip)
else:
skip = inp
x = x + skip
return x
class AlignedXception(nn.Module):
"""
Modified Alighed Xception
"""
def __init__(self, output_stride, BatchNorm,
pretrained=True):
super(AlignedXception, self).__init__()
if output_stride == 16:
entry_block3_stride = 2
middle_block_dilation = 1
exit_block_dilations = (1, 2)
elif output_stride == 8:
entry_block3_stride = 1
middle_block_dilation = 2
exit_block_dilations = (2, 4)
else:
raise NotImplementedError
# Entry flow
self.conv1 = nn.Conv2d(3, 32, 3, stride=2, padding=1, bias=False)
self.bn1 = BatchNorm(32)
self.relu = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(32, 64, 3, stride=1, padding=1, bias=False)
self.bn2 = BatchNorm(64)
self.block1 = Block(64, 128, reps=2, stride=2, BatchNorm=BatchNorm, start_with_relu=False)
self.block2 = Block(128, 256, reps=2, stride=2, BatchNorm=BatchNorm, start_with_relu=False,
grow_first=True)
self.block3 = Block(256, 728, reps=2, stride=entry_block3_stride, BatchNorm=BatchNorm,
start_with_relu=True, grow_first=True, is_last=True)
# Middle flow
self.block4 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation,
BatchNorm=BatchNorm, start_with_relu=True, grow_first=True)
self.block5 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation,
BatchNorm=BatchNorm, start_with_relu=True, grow_first=True)
self.block6 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation,
BatchNorm=BatchNorm, start_with_relu=True, grow_first=True)
self.block7 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation,
BatchNorm=BatchNorm, start_with_relu=True, grow_first=True)
self.block8 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation,
BatchNorm=BatchNorm, start_with_relu=True, grow_first=True)
self.block9 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation,
BatchNorm=BatchNorm, start_with_relu=True, grow_first=True)
self.block10 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation,
BatchNorm=BatchNorm, start_with_relu=True, grow_first=True)
self.block11 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation,
BatchNorm=BatchNorm, start_with_relu=True, grow_first=True)
self.block12 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation,
BatchNorm=BatchNorm, start_with_relu=True, grow_first=True)
self.block13 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation,
BatchNorm=BatchNorm, start_with_relu=True, grow_first=True)
self.block14 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation,
BatchNorm=BatchNorm, start_with_relu=True, grow_first=True)
self.block15 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation,
BatchNorm=BatchNorm, start_with_relu=True, grow_first=True)
self.block16 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation,
BatchNorm=BatchNorm, start_with_relu=True, grow_first=True)
self.block17 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation,
BatchNorm=BatchNorm, start_with_relu=True, grow_first=True)
self.block18 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation,
BatchNorm=BatchNorm, start_with_relu=True, grow_first=True)
self.block19 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation,
BatchNorm=BatchNorm, start_with_relu=True, grow_first=True)
# Exit flow
self.block20 = Block(728, 1024, reps=2, stride=1, dilation=exit_block_dilations[0],
BatchNorm=BatchNorm, start_with_relu=True, grow_first=False, is_last=True)
self.conv3 = SeparableConv2d(1024, 1536, 3, stride=1, dilation=exit_block_dilations[1], BatchNorm=BatchNorm)
self.bn3 = BatchNorm(1536)
self.conv4 = SeparableConv2d(1536, 1536, 3, stride=1, dilation=exit_block_dilations[1], BatchNorm=BatchNorm)
self.bn4 = BatchNorm(1536)
self.conv5 = SeparableConv2d(1536, 2048, 3, stride=1, dilation=exit_block_dilations[1], BatchNorm=BatchNorm)
self.bn5 = BatchNorm(2048)
# Init weights
self._init_weight()
# Load pretrained model
if pretrained:
self._load_pretrained_model()
def forward(self, x):
# Entry flow
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.relu(x)
x = self.block1(x)
# add relu here
x = self.relu(x)
low_level_feat = x
x = self.block2(x)
x = self.block3(x)
# Middle flow
x = self.block4(x)
x = self.block5(x)
x = self.block6(x)
x = self.block7(x)
x = self.block8(x)
x = self.block9(x)
x = self.block10(x)
x = self.block11(x)
x = self.block12(x)
x = self.block13(x)
x = self.block14(x)
x = self.block15(x)
x = self.block16(x)
x = self.block17(x)
x = self.block18(x)
x = self.block19(x)
# Exit flow
x = self.block20(x)
x = self.relu(x)
x = self.conv3(x)
x = self.bn3(x)
x = self.relu(x)
x = self.conv4(x)
x = self.bn4(x)
x = self.relu(x)
x = self.conv5(x)
x = self.bn5(x)
x = self.relu(x)
return x, low_level_feat
def _init_weight(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, SynchronizedBatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _load_pretrained_model(self):
pretrain_dict = model_zoo.load_url('http://data.lip6.fr/cadene/pretrainedmodels/xception-b5690688.pth')
model_dict = {}
state_dict = self.state_dict()
for k, v in pretrain_dict.items():
if k in model_dict:
if 'pointwise' in k:
v = v.unsqueeze(-1).unsqueeze(-1)
if k.startswith('block11'):
model_dict[k] = v
model_dict[k.replace('block11', 'block12')] = v
model_dict[k.replace('block11', 'block13')] = v
model_dict[k.replace('block11', 'block14')] = v
model_dict[k.replace('block11', 'block15')] = v
model_dict[k.replace('block11', 'block16')] = v
model_dict[k.replace('block11', 'block17')] = v
model_dict[k.replace('block11', 'block18')] = v
model_dict[k.replace('block11', 'block19')] = v
elif k.startswith('block12'):
model_dict[k.replace('block12', 'block20')] = v
elif k.startswith('bn3'):
model_dict[k] = v
model_dict[k.replace('bn3', 'bn4')] = v
elif k.startswith('conv4'):
model_dict[k.replace('conv4', 'conv5')] = v
elif k.startswith('bn4'):
model_dict[k.replace('bn4', 'bn5')] = v
else:
model_dict[k] = v
state_dict.update(model_dict)
self.load_state_dict(state_dict)
if __name__ == "__main__":
import torch
model = AlignedXception(BatchNorm=nn.BatchNorm2d, pretrained=True, output_stride=16)
input = torch.rand(1, 3, 512, 512)
output, low_level_feat = model(input)
print(output.size())
print(low_level_feat.size()) | 2.40625 | 2 |
Backend/autonomus/utils/mail.py | IrinaMBejan/Autonom | 2 | 1629 | <gh_stars>1-10
from sendgrid import SendGridAPIClient
from sendgrid.helpers.mail import Mail, Substitution
API_KEY = '<KEY>'
API_KEY_ID = '<KEY>'
ENCODING = "utf-8"
DEFAULT_MAIL="<EMAIL>"
def link(urlsafe):
return "https://develop-dot-autonomus.appspot.com/events/details?event_id=" + urlsafe
def send_newsletter(users, event1, event2):
for user in users:
send_mail(DEFAULT_MAIL, user.username, user.email, event1, event2)
def send_mail(from_mail, username, to_mails, event1, event2):
message = Mail(
from_email=from_mail,
to_emails=to_mails
)
message.dynamic_template_data = {
'name': username,
'title1' : event1.title,
'src1' : link(event1.urlsafe),
'loc1': event1.location,
'date1': event1.date.strftime('%d-%m-%Y %H:%M'),
'title2' : event2.title,
'src2' : link(event2.urlsafe),
'loc2': event2.location,
'date2': event2.date.strftime('%d-%m-%Y %H:%M')
}
print('before')
message.template_id = 'd-6607926b2aba4f8fba984dccdaa9ece6'
client = SendGridAPIClient(API_KEY)
response = client.send(message)
code = response.status_code
print('after')
was_successful = lambda ret_code: ret_code // 100 in (2, 3)
if not was_successful(code):
raise Exception("Couldn't send e-mail: {} {}".format(code, response.body))
| 2.53125 | 3 |
yellowbrick/features/pca.py | percygautam/yellowbrick | 1 | 1630 | <reponame>percygautam/yellowbrick
# -*- coding: utf-8 -*-
# yellowbrick.features.pca
# Decomposition based feature visualization with PCA.
#
# Author: <NAME>
# Author: <NAME>
# Author: <NAME>
# Created: Tue May 23 18:34:27 2017 -0400
#
# Copyright (C) 2017 The scikit-yb developers
# For license information, see LICENSE.txt
#
# ID: pca.py [] <EMAIL> $
"""
Decomposition based feature visualization with PCA.
"""
##########################################################################
## Imports
##########################################################################
# NOTE: must import mplot3d to load the 3D projection
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
from yellowbrick.style import palettes
from yellowbrick.features.projection import ProjectionVisualizer
from yellowbrick.exceptions import YellowbrickValueError, NotFitted
from sklearn.pipeline import Pipeline
from sklearn.decomposition import PCA as PCATransformer
from sklearn.preprocessing import StandardScaler
from sklearn.exceptions import NotFittedError
##########################################################################
# 2D and 3D PCA Visualizer
##########################################################################
class PCA(ProjectionVisualizer):
"""
Produce a two or three dimensional principal component plot of a data array
projected onto its largest sequential principal components. It is common
practice to scale the data array ``X`` before applying a PC decomposition.
Variable scaling can be controlled using the ``scale`` argument.
Parameters
----------
ax : matplotlib Axes, default: None
The axes to plot the figure on. If None is passed in, the current axes
will be used (or generated if required).
features : list, default: None
The names of the features specified by the columns of the input dataset.
This length of this list must match the number of columns in X, otherwise
an exception will be raised on ``fit()``.
classes : list, default: None
The class labels for each class in y, ordered by sorted class index. These
names act as a label encoder for the legend, identifying integer classes
or renaming string labels. If omitted, the class labels will be taken from
the unique values in y.
Note that the length of this list must match the number of unique values in
y, otherwise an exception is raised. This parameter is only used in the
discrete target type case and is ignored otherwise.
scale : bool, default: True
Boolean that indicates if user wants to scale data.
projection : int or string, default: 2
The number of axes to project into, either 2d or 3d. To plot 3d plots
with matplotlib, please ensure a 3d axes is passed to the visualizer,
otherwise one will be created using the current figure.
proj_features : bool, default: False
Boolean that indicates if the user wants to project the features
in the projected space. If True the plot will be similar to a biplot.
colors : list or tuple, default: None
A single color to plot all instances as or a list of colors to color each
instance according to its class in the discrete case or as an ordered
colormap in the sequential case. If not enough colors per class are
specified then the colors are treated as a cycle.
colormap : string or cmap, default: None
The colormap used to create the individual colors. In the discrete case
it is used to compute the number of colors needed for each class and
in the continuous case it is used to create a sequential color map based
on the range of the target.
alpha : float, default: 0.75
Specify a transparency where 1 is completely opaque and 0 is completely
transparent. This property makes densely clustered points more visible.
random_state : int, RandomState instance or None, optional (default None)
This parameter sets the random state on this solver. If the input X is
larger than 500x500 and the number of components to extract is lower
than 80% of the smallest dimension of the data, then the more efficient
`randomized` solver is enabled.
colorbar : bool, default: True
If the target_type is "continous" draw a colorbar to the right of the
scatter plot. The colobar axes is accessible using the cax property.
heatmap : bool, default: False
Add a heatmap showing contribution of each feature in the principal components.
Also draws a colorbar for readability purpose. The heatmap is accessible
using lax property and colorbar using uax property.
kwargs : dict
Keyword arguments that are passed to the base class and may influence
the visualization as defined in other Visualizers.
Attributes
----------
pca_components_ : ndarray, shape (n_features, n_components)
This tells about the magnitude of each feature in the pricipal components.
This is primarily used to draw the biplots.
classes_ : ndarray, shape (n_classes,)
The class labels that define the discrete values in the target. Only
available if the target type is discrete. This is guaranteed to be
strings even if the classes are a different type.
features_ : ndarray, shape (n_features,)
The names of the features discovered or used in the visualizer that
can be used as an index to access or modify data in X. If a user passes
feature names in, those features are used. Otherwise the columns of a
DataFrame are used or just simply the indices of the data array.
range_ : (min y, max y)
A tuple that describes the minimum and maximum values in the target.
Only available if the target type is continuous.
Examples
--------
>>> from sklearn import datasets
>>> iris = datasets.load_iris()
>>> X = iris.data
>>> y = iris.target
>>> visualizer = PCA()
>>> visualizer.fit_transform(X, y)
>>> visualizer.show()
"""
def __init__(
self,
ax=None,
features=None,
classes=None,
scale=True,
projection=2,
proj_features=False,
colors=None,
colormap=None,
alpha=0.75,
random_state=None,
colorbar=True,
heatmap=False,
**kwargs
):
super(PCA, self).__init__(
ax=ax,
features=features,
classes=classes,
colors=colors,
colormap=colormap,
projection=projection,
alpha=alpha,
colorbar=colorbar,
**kwargs
)
# Data Parameters
self.scale = scale
self.proj_features = proj_features
# Create the PCA transformer
self.pca_transformer = Pipeline(
[
("scale", StandardScaler(with_std=self.scale)),
("pca", PCATransformer(self.projection, random_state=random_state)),
]
)
self.alpha = alpha
# Visual Parameters
self.heatmap = heatmap
self._uax, self._lax = None, None
# No heatmap can be drawn with 3d plots as they do not have permit axes
# division.
if self.projection == 3 and self.heatmap:
raise YellowbrickValueError(
"heatmap and colorbar are not compatible with 3d projections"
)
@property
def uax(self):
"""
The axes of the colorbar, bottom of scatter plot. This is the colorbar
for heatmap and not for the scatter plot.
"""
if self._uax is None:
raise AttributeError("This visualizer does not have an axes for colorbar")
return self._uax
@property
def lax(self):
"""
The axes of the heatmap below scatter plot.
"""
if self._lax is None:
raise AttributeError("This visualizer does not have an axes for heatmap")
return self._lax
def layout(self, divider=None):
"""
Creates the layout for colorbar and heatmap, adding new axes for the heatmap
if necessary and modifying the aspect ratio. Does not modify the axes or the
layout if ``self.heatmap`` is ``False`` or ``None``.
Parameters
----------
divider: AxesDivider
An AxesDivider to be passed among all layout calls.
"""
# Ensure matplotlib version compatibility
if make_axes_locatable is None:
raise YellowbrickValueError(
(
"heatmap requires matplotlib 2.0.2 or greater "
"please upgrade matplotlib or set heatmap=False on the visualizer"
)
)
# Create the new axes for the colorbar and heatmap
if divider is None:
divider = make_axes_locatable(self.ax)
# Call to super class ensures that a colorbar is drawn when target is
# continuous.
super(PCA, self).layout(divider)
if self.heatmap:
# Axes for colorbar(for heatmap).
if self._uax is None:
self._uax = divider.append_axes("bottom", size="10%", pad=0.7)
# Axes for heatmap
if self._lax is None:
self._lax = divider.append_axes("bottom", size="15%", pad=0.5)
def fit(self, X, y=None, **kwargs):
"""
Fits the PCA transformer, transforms the data in X, then draws the
decomposition in either 2D or 3D space as a scatter plot.
Parameters
----------
X : ndarray or DataFrame of shape n x m
A matrix of n instances with m features.
y : ndarray or Series of length n
An array or series of target or class values.
Returns
-------
self : visualizer
Returns self for use in Pipelines.
"""
# Call super fit to compute features, classes, colors, etc.
super(PCA, self).fit(X=X, y=y, **kwargs)
self.pca_transformer.fit(X)
self.pca_components_ = self.pca_transformer.named_steps["pca"].components_
return self
def transform(self, X, y=None, **kwargs):
"""
Calls the internal `transform` method of the scikit-learn PCA transformer, which
performs a dimensionality reduction on the input features ``X``. Next calls the
``draw`` method of the Yellowbrick visualizer, finally returning a new array of
transformed features of shape ``(len(X), projection)``.
Parameters
----------
X : ndarray or DataFrame of shape n x m
A matrix of n instances with m features.
y : ndarray or Series of length n
An array or series of target or class values.
Returns
-------
Xp : ndarray or DataFrame of shape n x m
Returns a new array-like object of transformed features of shape
``(len(X), projection)``.
"""
try:
Xp = self.pca_transformer.transform(X)
self.draw(Xp, y)
return Xp
except NotFittedError:
raise NotFitted.from_estimator(self, "transform")
def draw(self, Xp, y):
"""
Plots a scatterplot of points that represented the decomposition,
`pca_features_`, of the original features, `X`, projected into either 2 or
3 dimensions.
If 2 dimensions are selected, a colorbar and heatmap can also be optionally
included to show the magnitude of each feature value to the component.
Parameters
----------
Xp : array-like of shape (n, 2) or (n, 3)
The matrix produced by the ``transform()`` method.
y : array-like of shape (n,), optional
The target, used to specify the colors of the points.
Returns
-------
self.ax : matplotlib Axes object
Returns the axes that the scatter plot was drawn on.
"""
# Call to super draw which draws the scatter plot.
super(PCA, self).draw(Xp, y)
if self.proj_features:
# Draws projection features in transformed space.
self._draw_projection_features(Xp, y)
if self.projection == 2:
if self.heatmap:
if not self.colormap:
self.colormap = palettes.DEFAULT_SEQUENCE
# TODO: change to pcolormesh instead of imshow per #615 spec
im = self.lax.imshow(
self.pca_components_,
interpolation="none",
cmap=self.colormap,
aspect="auto",
)
plt.colorbar(
im,
cax=self.uax,
orientation="horizontal",
ticks=[self.pca_components_.min(), 0, self.pca_components_.max()],
)
return self.ax
def _draw_projection_features(self, Xp, y):
"""
Draw the projection of features in the transformed space.
Parameters
----------
Xp : array-like of shape (n, 2) or (n, 3)
The matrix produced by the ``transform()`` method.
y : array-like of shape (n,), optional
The target, used to specify the colors of the points.
Returns
-------
self.ax : matplotlib Axes object
Returns the axes that the scatter plot was drawn on.
"""
x_vector = self.pca_components_[0]
y_vector = self.pca_components_[1]
max_x = max(Xp[:, 0])
max_y = max(Xp[:, 1])
if self.projection == 2:
for i in range(self.pca_components_.shape[1]):
self.ax.arrow(
x=0,
y=0,
dx=x_vector[i] * max_x,
dy=y_vector[i] * max_y,
color="r",
head_width=0.05,
width=0.005,
)
self.ax.text(
x_vector[i] * max_x * 1.05,
y_vector[i] * max_y * 1.05,
self.features_[i],
color="r",
)
elif self.projection == 3:
z_vector = self.pca_components_[2]
max_z = max(Xp[:, 1])
for i in range(self.pca_components_.shape[1]):
self.ax.plot(
[0, x_vector[i] * max_x],
[0, y_vector[i] * max_y],
[0, z_vector[i] * max_z],
color="r",
)
self.ax.text(
x_vector[i] * max_x * 1.05,
y_vector[i] * max_y * 1.05,
z_vector[i] * max_z * 1.05,
self.features_[i],
color="r",
)
else:
raise YellowbrickValueError("Projection dimensions must be either 2 or 3")
return self.ax
def finalize(self, **kwargs):
"""
Draws the title, labels, legends, heatmap, and colorbar as specified by the
keyword arguments.
"""
super(PCA, self).finalize()
self.ax.set_title("Principal Component Plot")
self.ax.set_xlabel("$PC_1$")
self.ax.set_ylabel("$PC_2$")
if self.projection == 3:
self.ax.set_zlabel("$PC_3$")
if self.heatmap == True:
self.lax.set_xticks(np.arange(-0.5, len(self.features_)))
self.lax.set_xticklabels([])
# Makes the labels centered.
self.lax.set_xticks(np.arange(0, len(self.features_)), minor=True)
self.lax.set_xticklabels(
self.features_, rotation=90, fontsize=12, minor=True
)
self.lax.set_yticks(np.arange(0.5, 2))
self.lax.set_yticklabels(["$PC_1$", "$PC_2$"], va="bottom", fontsize=10)
self.fig.tight_layout()
##########################################################################
## Quick Method
##########################################################################
def pca_decomposition(
X,
y=None,
ax=None,
features=None,
classes=None,
scale=True,
projection=2,
proj_features=False,
colors=None,
colormap=None,
alpha=0.75,
random_state=None,
colorbar=True,
heatmap=False,
show=True,
**kwargs
):
"""
Produce a two or three dimensional principal component plot of the data array ``X``
projected onto its largest sequential principal components. It is common practice
to scale the data array ``X`` before applying a PC decomposition. Variable scaling
can be controlled using the ``scale`` argument.
Parameters
----------
X : ndarray or DataFrame of shape n x m
A matrix of n instances with m features.
y : ndarray or Series of length n
An array or series of target or class values.
ax : matplotlib Axes, default: None
The axes to plot the figure on. If None is passed in, the current axes
will be used (or generated if required).
features : list, default: None
The names of the features specified by the columns of the input dataset.
This length of this list must match the number of columns in X, otherwise
an exception will be raised on ``fit()``.
classes : list, default: None
The class labels for each class in y, ordered by sorted class index. These
names act as a label encoder for the legend, identifying integer classes
or renaming string labels. If omitted, the class labels will be taken from
the unique values in y.
Note that the length of this list must match the number of unique values in
y, otherwise an exception is raised. This parameter is only used in the
discrete target type case and is ignored otherwise.
scale : bool, default: True
Boolean that indicates if user wants to scale data.
projection : int or string, default: 2
The number of axes to project into, either 2d or 3d. To plot 3d plots
with matplotlib, please ensure a 3d axes is passed to the visualizer,
otherwise one will be created using the current figure.
proj_features : bool, default: False
Boolean that indicates if the user wants to project the features
in the projected space. If True the plot will be similar to a biplot.
colors : list or tuple, default: None
A single color to plot all instances as or a list of colors to color each
instance according to its class in the discrete case or as an ordered
colormap in the sequential case. If not enough colors per class are
specified then the colors are treated as a cycle.
colormap : string or cmap, default: None
The colormap used to create the individual colors. In the discrete case
it is used to compute the number of colors needed for each class and
in the continuous case it is used to create a sequential color map based
on the range of the target.
alpha : float, default: 0.75
Specify a transparency where 1 is completely opaque and 0 is completely
transparent. This property makes densely clustered points more visible.
random_state : int, RandomState instance or None, optional (default None)
This parameter sets the random state on this solver. If the input X is
larger than 500x500 and the number of components to extract is lower
than 80% of the smallest dimension of the data, then the more efficient
`randomized` solver is enabled.
colorbar : bool, default: True
If the target_type is "continous" draw a colorbar to the right of the
scatter plot. The colobar axes is accessible using the cax property.
heatmap : bool, default: False
Add a heatmap showing contribution of each feature in the principal components.
Also draws a colorbar for readability purpose. The heatmap is accessible
using lax property and colorbar using uax property.
show : bool, default: True
If True, calls ``show()``, which in turn calls ``plt.show()`` however you cannot
call ``plt.savefig`` from this signature, nor ``clear_figure``. If False, simply
calls ``finalize()``
kwargs : dict
Keyword arguments that are passed to the base class and may influence
the visualization as defined in other Visualizers.
Attributes
----------
pca_components_ : ndarray, shape (n_features, n_components)
This tells about the magnitude of each feature in the pricipal components.
This is primarily used to draw the biplots.
classes_ : ndarray, shape (n_classes,)
The class labels that define the discrete values in the target. Only
available if the target type is discrete. This is guaranteed to be
strings even if the classes are a different type.
features_ : ndarray, shape (n_features,)
The names of the features discovered or used in the visualizer that
can be used as an index to access or modify data in X. If a user passes
feature names in, those features are used. Otherwise the columns of a
DataFrame are used or just simply the indices of the data array.
range_ : (min y, max y)
A tuple that describes the minimum and maximum values in the target.
Only available if the target type is continuous.
Examples
--------
>>> from sklearn import datasets
>>> iris = datasets.load_iris()
>>> X = iris.data
>>> y = iris.target
>>> pca_decomposition(X, y, colors=['r', 'g', 'b'], projection=3)
"""
# Instantiate the visualizer
visualizer = PCA(
ax=ax,
features=features,
scale=scale,
projection=projection,
proj_features=proj_features,
colors=colors,
colormap=colormap,
alpha=alpha,
random_state=random_state,
colorbar=colorbar,
heatmap=heatmap,
**kwargs
)
# Fit and transform the visualizer (calls draw)
visualizer.fit(X, y)
visualizer.transform(X, y)
if show:
visualizer.show()
else:
visualizer.finalize()
# Returns the visualizer object.
return visualizer
# Alias for PCA
PCADecomposition = PCA
| 2.421875 | 2 |
k2/python/host/k2host/properties.py | Jarvan-Wang/k2 | 144 | 1631 | <reponame>Jarvan-Wang/k2<filename>k2/python/host/k2host/properties.py
# Copyright (c) 2020 Xiaomi Corporation (author: <NAME>)
# See ../../../LICENSE for clarification regarding multiple authors
import torch
from torch.utils.dlpack import to_dlpack
from .fsa import Fsa
from _k2host import _is_valid
from _k2host import _is_top_sorted
from _k2host import _is_arc_sorted
from _k2host import _has_self_loops
from _k2host import _is_acyclic
from _k2host import _is_deterministic
from _k2host import _is_epsilon_free
from _k2host import _is_connected
from _k2host import _is_empty
def is_valid(fsa: Fsa) -> bool:
return _is_valid(fsa.get_base())
def is_top_sorted(fsa: Fsa) -> bool:
return _is_top_sorted(fsa.get_base())
def is_arc_sorted(fsa: Fsa) -> bool:
return _is_arc_sorted(fsa.get_base())
def has_self_loops(fsa: Fsa) -> bool:
return _has_self_loops(fsa.get_base())
def is_acyclic(fsa: Fsa) -> bool:
return _is_acyclic(fsa.get_base())
def is_deterministic(fsa: Fsa) -> bool:
return _is_deterministic(fsa.get_base())
def is_epsilon_free(fsa: Fsa) -> bool:
return _is_epsilon_free(fsa.get_base())
def is_connected(fsa: Fsa) -> bool:
return _is_connected(fsa.get_base())
def is_empty(fsa: Fsa) -> bool:
return _is_empty(fsa.get_base())
| 1.773438 | 2 |
origamibot/core/teletypes/poll_option.py | cmd410/OrigamiBot | 4 | 1632 | from .base import TelegramStructure, Field
class PollOption(TelegramStructure):
text = Field()
voter_count = Field()
def __init__(self,
text: str,
voter_count: int
):
self.text = \
Field(text, [str])
self.voter_count = \
Field(voter_count, [int])
| 2.3125 | 2 |
var/spack/repos/builtin/packages/r-viridislite/package.py | xiki-tempula/spack | 9 | 1633 | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RViridislite(RPackage):
"""viridisLite: Default Color Maps from 'matplotlib' (Lite Version)"""
homepage = "https://github.com/sjmgarnier/viridisLite"
url = "https://cloud.r-project.org/src/contrib/viridisLite_0.2.0.tar.gz"
list_url = "https://cloud.r-project.org/src/contrib/Archive/viridisLite"
version('0.3.0', sha256='780ea12e7c4024d5ba9029f3a107321c74b8d6d9165262f6e64b79e00aa0c2af')
version('0.2.0', sha256='2d4d909f21c51e720bd685f05041ba158294e0a4064e0946d0bd916709818694')
depends_on('[email protected]:', type=('build', 'run'))
| 1.429688 | 1 |
shiSock-0.2.0/test_two/PySock/server.py | AnanyaRamanA/shiSock | 0 | 1634 | <reponame>AnanyaRamanA/shiSock
from re import S
import select
import socket
import queue
import threading
import sys
import pickle
import base64
import os
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.ciphers.aead import AESGCM
from cryptography.hazmat.primitives.serialization import load_ssh_public_key
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.asymmetric import padding
from cryptography.hazmat.backends import default_backend
import hashlib
import yaml
import random
import time
class IPNC():
def __init__(self):
pass
def _read_yml(self,file = None):
with open(file) as file:
documents = yaml.full_load(file)
return documents
def _write_yml(self,file = None, dict_data = None,mode = "a+"):
with open(file, mode) as file:
yaml.dump(dict_data, file)
def _add_node(self,file = None, node = None):
try:
read = self._read_yml(file)
if read != None:
read[node[0]]
self._change_node_value(file,node)
else:
raise KeyError
except KeyError:
node_dict = {
node[0] : node[1]
}
self._write_yml(file, node_dict)
def _change_node_value(self,file = None, node = None):
r_yml = self._read_yml(file)
r_yml[node[0]] = node[1]
self._write_yml(file = file, dict_data = r_yml, mode = "w")
def _get_node(self,file = None, key = None, wait = True):
if key == None:
return self._read_yml(file)
if wait:
while True:
r_yml = self._read_yml(file)
try:
value = r_yml[key]
return value
except KeyError:
pass
except TypeError:
pass
else:
r_yml = self._read_yml(file)
try:
value = r_yml[key]
return value
except KeyError:
return None
except TypeError:
pass
def _remove_node(self,file,node):
try:
r_yml = self._read_yml(file = file)
r_yml[node]
r_yml.pop(node)
self._write_yml(file = file, dict_data = r_yml, mode = "w")
except KeyError:
return False
except:
pass
def _name_generator(self,_len_ = 16, onlyText = False):
lower_case = list("abcdefghijklmnopqrstuvwxyz")
upper_case = list('ABCDEFGHIJKLMNOPQRSTUVWXYZ')
special = list("!@#$%&*?")
number = list("0123456789")
if onlyText:
_all_ = lower_case + upper_case
else:
_all_ = lower_case + upper_case + special + number
random.shuffle(_all_)
return "".join(random.sample(_all_,_len_))
class DSP():
def __init__(
self,
msg : str = None,
DSP_type : str = None,
device_id : int = None,
universalAesKey : bytes = None,
nonce : bytes = None,
aad : str = None,
):
if msg is not None:
self.msg = msg
else:
self.msg = msg
self.DSP_type = DSP_type
self.device_id = device_id
if universalAesKey is not None:
self.UNIVERSAL_AES_KEY = universalAesKey
else:
self.UNIVERSAL_AES_KEY = b'<KEY>'
if nonce is not None:
self.NONCE = nonce
else:
self.NONCE = b'\xfe\x1e1\xc0\xfc`s\xbc6\x9fQ\xb2'
if aad is not None:
self.AAD = aad
else:
self.AAD = b"au$tica&tedbut@u32nencr#cdscypteddatafdrj"
def _messanger(self,MSG = None):
if MSG is not None:
self.msg = MSG
data = f'DSP("{self.msg}","{self.DSP_type}")'
data = pickle.dumps(data)
pickled_data = data
encrypted_data = [self.device_id, self.__encrypt(pickled_data)]
p_e_d = pickle.dumps(encrypted_data)
ret = base64.b64encode(p_e_d)
return ret
def __repr__(self):
return "_main.DSP._"
def __encrypt(self,data):
aesgcm = AESGCM(self.UNIVERSAL_AES_KEY,)
ct = aesgcm.encrypt(
self.NONCE,
data,
self.AAD
)
return ct
def _convert_to_class(self,OBJECT : bytes = None,secure : bool = True, secure_dict : list = None):
try:
OBJECT = base64.b64decode(OBJECT)
OBJECT = pickle.loads(OBJECT)
if secure == True:
if secure_dict is None:
raise TypeError(
"convert_to_class() missing 1 required positional argument: 'secure_lst'")
else:
secure_dict = pickle.loads(base64.b64decode(secure_dict))
aesgcm = AESGCM(secure_dict["aes_key"])
ct = aesgcm.decrypt(
secure_dict["nonce"], OBJECT[-1], secure_dict["aad"])
ct = pickle.loads(ct)
return eval(ct)
else:
aesgcm = AESGCM(self.UNIVERSAL_AES_KEY)
ct = aesgcm.decrypt(self.NONCE, OBJECT[-1], self.AAD)
ct = pickle.loads(ct)
return eval(ct)
except TypeError:
sys.exit()
except ValueError:
print("sender has not done the handshake")
class MAIN(IPNC):
def __init__(self,secure : bool = True,file = None):
"""async_server initializer class that will create the a asyncronouse tcp server.
"""
IPNC.__init__(self)
self.__secure = secure
self.__file_location = file
self.READABLE = []
self.WRITABLE = []
self.INPUTS = []
self.OUTPUTS = []
self.MESSAGE_QUEUES = {}
self.REQUEST_LIST = []
self.REQUEST_RESPONSE_LIST = []
self.MESSAGE_LIST = []
self.__VARIFIED_DEVICES = []
self.__CLIENT_KEYS = {}
self.__CUSTOM_CHANNEL = []
self.__CUSTOM_CHANNEL_MSG_REC = []
self.__CUSTOM_CHANNEL_MSG_SEND = []
self.__VARIFIER_LIST = []
self.__CALLBACK_LOOP = []
self.__RECEIVING_MSG = []
get = self._get_node(file = self.__file_location,key = hashlib.sha256(bytes("key", "utf-8")).digest(), wait = False)
if get is not None:
self.__CLIENT_KEYS = get
self.__VARIFIED_DEVICES.extend(list(get.keys()))
def SERVER(self,address : str = None, port : int = None, listeners : int = None):
self.address = address
self.port = port
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.setsockopt( socket.SOL_SOCKET, socket.SO_REUSEADDR, 1 )
self.sock.setblocking(0)
self.sock.bind((self.address,self.port))
self.sock.listen(listeners)
print("[SERVER IS ACTIVATED | LISTENING]")
self.INPUTS.append(self.sock)
thread1 = threading.Thread(
target = self.receive_func,
args = (
self.__RECEIVING_MSG,
self.__VARIFIED_DEVICES,
self.__VARIFIER_LIST,
self.__CLIENT_KEYS,
self.OUTPUTS,
self.REQUEST_LIST,
self.REQUEST_RESPONSE_LIST,
self.MESSAGE_LIST,
self.__CUSTOM_CHANNEL_MSG_REC,
)
)
thread2 = threading.Thread(
target = self.send_func,
args = (
self.WRITABLE,
self.MESSAGE_QUEUES,
self.MESSAGE_LIST,
self.REQUEST_LIST,
self.REQUEST_RESPONSE_LIST,
self.__VARIFIER_LIST,
self.__CUSTOM_CHANNEL_MSG_SEND
)
)
thread3 = threading.Thread(
target = self.__callback_loop,
args = (
self.__CALLBACK_LOOP,
)
)
# thread1.daemon = True
thread1.start()
# thread2.daemon = True
thread2.start()
# thread3.daemon = True
thread3.start()
thread = threading.Thread(target = self.__server)
# thread.daemon = True
thread.start()
def __server(self):
data_recv_len = []
while True:
readable, writable, exceptions = select.select(self.INPUTS, self.OUTPUTS, self.INPUTS)
# handling the inputs
for r in readable:
if r is self.sock:
connection,addr = r.accept()
connection.setblocking(0)
self.INPUTS.append(connection)
self.MESSAGE_QUEUES[connection] = queue.Queue()
else:
ini = list(zip(*data_recv_len))
if len(ini) == 0 or r not in ini[0]:
try:
data_len = pickle.loads(base64.b64decode(r.recv(32).decode().strip("0").encode("utf-8")))
except ConnectionResetError:
print("Client Disconnected")
if r in self.OUTPUTS:
self.OUTPUTS.remove(r)
if r in self.WRITABLE:
self.WRITABLE.remove(r)
self.INPUTS.remove(r)
r.close()
del self.MESSAGE_QUEUES[r]
continue
except Exception as e:
pass
if data_len:
if type(data_len) == type([]):
data_recv_len.append(
[
r,
data_len[0]
]
)
else:
print("User Disconnected")
if r in self.OUTPUTS:
self.OUTPUTS.remove(r)
self.INPUTS.remove(r)
if r in self.WRITABLE:
self.WRITABLE.remove(r)
r.close()
del self.MESSAGE_QUEUES[r]
continue
else:
qwe = list(zip(*data_recv_len))
INDEX = qwe[0].index(r)
try:
recv_len = data_recv_len.pop(INDEX)[1]
data = r.recv(recv_len)
try:
data = data.decode().strip("0").encode("utf-8")
except:
print("Error in decoding")
self.__RECEIVING_MSG.append(data)
self.MESSAGE_QUEUES[r].put(pickle.loads(base64.b64decode(data))[0])
if r not in self.OUTPUTS:
self.OUTPUTS.append(r)
except Exception as e:
print("User Disconnected")
readable.remove(r)
self.INPUTS.remove(r)
writable.remove(r)
self.OUTPUTS.remove(r)
if r in self.WRITABLE:
self.WRITABLE.remove(r)
del self.MESSAGE_QUEUES[r]
continue
# handling the outputs
for w in writable:
if w not in self.WRITABLE:
self.WRITABLE.append(w)
# handling the errors
for e in exceptions:
self.INPUTS.remove(e)
if e in self.OUTPUTS:
self.OUTPUTS.remove(e)
e.close()
del self.MESSAGE_QUEUES[e]
def receive_func(self, __receiving_msg,__varified_devices, __varifier_lst, __client_keys, __outputs, __request_lst, __request_res_lst, __message_lst, __custom_c_m_r):
# __receiving_msg = self.__RECEIVING_MSG,
# __varified_devices = self.__VARIFIED_DEVICES,
# __varifier_lst = self.__VARIFIER_LIST,
# __client_keys = self.__CLIENT_KEYS,
# __outputs = self.OUTPUTS,
# __request_lst = self.REQUEST_LIST
# __request_res_lst = self.REQUEST_RESPONSE_LIST
# __message_lst = self.MESSAGE_LIS
# __custom_c_m_r = self.__CUSTOM_CHANNEL_MSG_REC
while True:
try:
for INDEX,_data_ in enumerate(__receiving_msg):
data = pickle.loads(base64.b64decode(_data_))
# print(f"data[0] : {data[0]}")
# print(f"__varified_devices : {__varified_devices}")
if data[0] not in __varified_devices:
_recv_ = DSP()._convert_to_class(_data_, secure = False)
if _recv_.DSP_type == "username_secure":
resolved_data = eval(_recv_.msg)
aes_key = AESGCM.generate_key(256)
nonce = os.urandom(32)
aad = bytes(self._name_generator(),"utf-8")
qw = {
"aes_key" : aes_key,
"nonce" : nonce,
"aad" : aad,
}
pickle_qw = pickle.dumps(qw)
b64_aes_key_pack = base64.b64encode(pickle_qw)
key = load_ssh_public_key(
bytes(
resolved_data["data"],
"utf-8"
),
backend=default_backend()
)
ciphertext = key.encrypt(
b64_aes_key_pack,
padding.OAEP(
mgf = padding.MGF1(algorithm = hashes.SHA256()),
algorithm = hashes.SHA256(),
label = None
)
)
ciphertext = base64.b64encode(ciphertext)
prepare_data = {"key" : ciphertext}
dsp_data = DSP(
DSP_type="username_secure_response"
)._messanger(
MSG = prepare_data
)
dsp_data = [resolved_data["username"],dsp_data]
__varifier_lst.append(dsp_data)
__varified_devices.append(resolved_data["username"])
__client_keys[resolved_data["username"]] = b64_aes_key_pack
get = self._get_node(
file = self.__file_location,
key = hashlib.sha256(bytes("key","utf-8")).digest(),
wait = False
)
if get is not None:
get[resolved_data["username"]] = b64_aes_key_pack
self._add_node(
file = self.__file_location,
node = [
hashlib.sha256(bytes("key","utf-8")).digest(),
get
]
)
else:
self._add_node(
file = self.__file_location,
node = [
hashlib.sha256(bytes("key","utf-8")).digest(),
{
resolved_data["username"] : b64_aes_key_pack
}
]
)
__receiving_msg.pop(INDEX)
else:
aes_key_pack = __client_keys[data[0]]
_recv_ = DSP()._convert_to_class(
OBJECT = _data_,
secure = True,
secure_dict = aes_key_pack
)
if _recv_.DSP_type == "DSP_REQ":
try:
resolved_data = eval(_recv_.msg)
resolved_data = pickle.loads(base64.b64decode(eval(_recv_.msg)))
__request_lst.append(
[
resolved_data["target_name"],
_recv_.msg
]
)
__receiving_msg.remove(_data_)
except:
pass
elif _recv_.DSP_type == "DSP_REQ_RES":
try:
resolved_data = pickle.loads(base64.b64decode(eval(_recv_.msg)))
__request_res_lst.append(
[
resolved_data["target_name"],
_recv_.msg
]
)
__receiving_msg.remove(_data_)
except:
pass
elif _recv_.DSP_type == "DSP_MSG":
try:
resolved_data = pickle.loads(base64.b64decode(eval(_recv_.msg)))
__message_lst.append(
[
resolved_data['target_name'],
_recv_.msg
]
)
__receiving_msg.remove(_data_)
except:
pass
elif _recv_.DSP_type in self.__CUSTOM_CHANNEL:
try:
resolved_data = pickle.loads(base64.b64decode(eval(_recv_.msg)))
__custom_c_m_r.append(resolved_data)
__receiving_msg.remove(_data_)
except:
pass
except:
pass
def send_func(self,Writable,message_q,message_list,requestList,requestResList,varifierList,customChannelMessageSend):
while True:
# print(f"Writable : {Writable}")
# time.sleep(2)
for s in Writable:
if s._closed == True and s.fileno() == -1:
Writable.remove(s)
# try:
try:
username = message_q[s].get_nowait()
message_q[s].put(username)
msg_lst = list(list(zip(*message_list)))
req_lst = list(list(zip(*requestList)))
req_res_lst = list(list(zip(*requestResList)))
vari_lst = list(list(zip(*varifierList)))
send_c_msg = list(zip(*customChannelMessageSend))
except KeyError:
pass
if len(msg_lst) > 0:
if username in msg_lst[0]:
INDEX = msg_lst[0].index(username)
aes_key_pack = self.__CLIENT_KEYS[username]
aes_key_pack = pickle.loads(base64.b64decode(aes_key_pack))
dsp_data = DSP(
DSP_type = "DSP_MSG",
universalAesKey = aes_key_pack["aes_key"],
nonce = aes_key_pack["nonce"],
aad = aes_key_pack["aad"]
)._messanger(
MSG = f"{msg_lst[1][INDEX]}"
).decode().center(len(msg_lst[1][INDEX]) + 100, "|").encode("utf-8")
try:
s.send(bytes(f"{len(dsp_data)}".center(16,"|"),"utf-8"))
s.send(
dsp_data
)
message_list.pop(INDEX)
except OSError:
pass
if len(req_lst) > 0:
if username in req_lst[0]:
INDEX = req_lst[0].index(username)
try:
aes_key_pack = self.__CLIENT_KEYS[username]
except KeyError:
continue
aes_key_pack = pickle.loads(base64.b64decode(aes_key_pack))
dsp_data = DSP(
DSP_type = "DSP_handshake_request",
universalAesKey = aes_key_pack["aes_key"],
nonce = aes_key_pack["nonce"],
aad = aes_key_pack["aad"]
)._messanger(
MSG = f"{req_lst[1][INDEX]}"
).decode().center(len(req_lst[1][INDEX]) + 100, "|").encode("utf-8")
s.send(bytes(f"{len(dsp_data)+100}".center(16,"|"),"utf-8"))
s.send(
dsp_data
)
requestList.pop(INDEX)
if len(req_res_lst) > 0:
if username in req_res_lst[0]:
INDEX = req_res_lst[0].index(username)
aes_key_pack = self.__CLIENT_KEYS[username]
aes_key_pack = pickle.loads(base64.b64decode(aes_key_pack))
dsp_data = DSP(
DSP_type = "DSP_handshake_request_res",
universalAesKey = aes_key_pack["aes_key"],
nonce = aes_key_pack["nonce"],
aad = aes_key_pack["aad"]
)._messanger(
MSG = f"{req_res_lst[1][INDEX]}"
).decode().center(len(req_res_lst[1][INDEX]) + 100, "|").encode("utf-8")
s.send(bytes(f"{len(dsp_data)+100}".center(16,"|"),"utf-8"))
s.send(
dsp_data
)
requestResList.pop(INDEX)
if len(vari_lst) > 0:
if username in vari_lst[0]:
INDEX = vari_lst[0].index(username)
s.send(bytes(f"{len(vari_lst[1][INDEX])}".center(16,"|"),"utf-8"))
s.send(
vari_lst[1][INDEX]
)
varifierList.pop(INDEX)
if len(send_c_msg) > 0:
if username in send_c_msg[0]:
INDEX = send_c_msg[0].index(username)
s.send(bytes(f"{len(send_c_msg[1][INDEX])}".center(16,"|"),"utf-8"))
s.send(send_c_msg[1][INDEX])
customChannelMessageSend.pop(INDEX)
# except:
# pass
def CREATE_CHANNEL(self,channel_name = None, multiple : bool = False):
if multiple:
if type(channel_name) == type([]):
for channel in channel_name:
if channel not in self.__CUSTOM_CHANNEL:
self.__CUSTOM_CHANNEL.append(channel)
else:
print(f"Channel : {channel} already exists.")
else:
raise TypeError("When 'mutliple' is to True then channel_name should be a list of multiple channel names")
else:
if channel_name not in self.__CUSTOM_CHANNEL:
self.__CUSTOM_CHANNEL.append(channel_name)
def LISTEN(self,channel : str = None,function : object = None,args = None):
if channel is not None:
found = False
index = None
if channel in self.__CUSTOM_CHANNEL:
for i,d in enumerate(self.__CUSTOM_CHANNEL_MSG_REC):
if d["channel"] == channel:
found = True
index = i
break
if found:
if args is None:
p_data = self.__CUSTOM_CHANNEL_MSG_REC.pop(index)
self.__CALLBACK_LOOP.append([function,[p_data]])
else:
p_data = self.__CUSTOM_CHANNEL_MSG_REC.pop(index)
args = list(args)
args.insert(0,p_data)
self.__CALLBACK_LOOP.append([function,args])
else:
raise TypeError("'channel' should not be None")
def __callback_loop(self,__callback_loop):
while True:
for index,func in enumerate(__callback_loop):
__callback_loop.pop(index)
func[0](*func[1])
def SEND(self,channel_name,target_name,data):
if channel_name in self.__CUSTOM_CHANNEL:
key_pack = self.__CLIENT_KEYS[target_name]
key_pack = pickle.loads(base64.b64decode(key_pack))
dsp_data = DSP(
DSP_type = channel_name,
universalAesKey=key_pack["aes_key"],
nonce = key_pack["nonce"],
aad= key_pack["aad"]
)._messanger(
MSG = base64.b64encode(pickle.dumps(data))
)
self.__CUSTOM_CHANNEL_MSG_SEND.append(
[
target_name,
dsp_data
]
)
class server():
def __init__(self, file = None, debug : bool = False, MTCL : bool = True, MPCL : bool = False, safeMode : bool = True):
"""
This class allows user to create multi-client server.
args:
secure : bool = True -> this should set to the default value True,
file : str = None -> here user need to pass a yaml file which saves all the keys and configurations.
if not specified, will raise an TypeError
"""
if not file:
raise TypeError("asyncServer() missing 1 required positional argument: 'file'")
__parent = MAIN(file,debug,MTCL,MPCL,safeMode)
self.SERVER = __parent.SERVER
self.CREATE_CHANNEL = __parent.CREATE_CHANNEL
self.LISTEN = __parent.LISTEN
self.SEND = __parent.SEND
| 2.1875 | 2 |
server/www/packages/packages-windows/x86/ldap3/utils/asn1.py | zhoulhb/teleport | 640 | 1635 | """
"""
# Created on 2015.08.19
#
# Author: <NAME>
#
# Copyright 2015 - 2018 <NAME>
#
# This file is part of ldap3.
#
# ldap3 is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ldap3 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with ldap3 in the COPYING and COPYING.LESSER files.
# If not, see <http://www.gnu.org/licenses/>.
from pyasn1 import __version__ as pyasn1_version
from pyasn1.codec.ber import decoder # for usage in other modules
from pyasn1.codec.ber.encoder import Encoder # for monkeypatching of boolean value
from ..core.results import RESULT_CODES
from ..utils.conv import to_unicode
from ..protocol.convert import referrals_to_list
CLASSES = {(False, False): 0, # Universal
(False, True): 1, # Application
(True, False): 2, # Context
(True, True): 3} # Private
# Monkeypatching of pyasn1 for encoding Boolean with the value 0xFF for TRUE
# THIS IS NOT PART OF THE FAST BER DECODER
if pyasn1_version == 'xxx0.2.3':
from pyasn1.codec.ber.encoder import tagMap, BooleanEncoder, encode
from pyasn1.type.univ import Boolean
from pyasn1.compat.octets import ints2octs
class BooleanCEREncoder(BooleanEncoder):
_true = ints2octs((255,))
tagMap[Boolean.tagSet] = BooleanCEREncoder()
else:
from pyasn1.codec.ber.encoder import tagMap, typeMap, AbstractItemEncoder
from pyasn1.type.univ import Boolean
from copy import deepcopy
class LDAPBooleanEncoder(AbstractItemEncoder):
supportIndefLenMode = False
if pyasn1_version <= '0.2.3':
from pyasn1.compat.octets import ints2octs
_true = ints2octs((255,))
_false = ints2octs((0,))
def encodeValue(self, encodeFun, value, defMode, maxChunkSize):
return value and self._true or self._false, 0
elif pyasn1_version <= '0.3.1':
def encodeValue(self, encodeFun, value, defMode, maxChunkSize):
return value and (255,) or (0,), False, False
elif pyasn1_version <= '0.3.4':
def encodeValue(self, encodeFun, value, defMode, maxChunkSize, ifNotEmpty=False):
return value and (255,) or (0,), False, False
elif pyasn1_version <= '0.3.7':
def encodeValue(self, value, encodeFun, **options):
return value and (255,) or (0,), False, False
else:
def encodeValue(self, value, asn1Spec, encodeFun, **options):
return value and (255,) or (0,), False, False
customTagMap = deepcopy(tagMap)
customTypeMap = deepcopy(typeMap)
customTagMap[Boolean.tagSet] = LDAPBooleanEncoder()
customTypeMap[Boolean.typeId] = LDAPBooleanEncoder()
encode = Encoder(customTagMap, customTypeMap)
# end of monkey patching
# a fast BER decoder for LDAP responses only
def compute_ber_size(data):
"""
Compute size according to BER definite length rules
Returns size of value and value offset
"""
if data[1] <= 127: # BER definite length - short form. Highest bit of byte 1 is 0, message length is in the last 7 bits - Value can be up to 127 bytes long
return data[1], 2
else: # BER definite length - long form. Highest bit of byte 1 is 1, last 7 bits counts the number of following octets containing the value length
bytes_length = data[1] - 128
value_length = 0
cont = bytes_length
for byte in data[2: 2 + bytes_length]:
cont -= 1
value_length += byte * (256 ** cont)
return value_length, bytes_length + 2
def decode_message_fast(message):
ber_len, ber_value_offset = compute_ber_size(get_bytes(message[:10])) # get start of sequence, at maximum 3 bytes for length
decoded = decode_sequence(message, ber_value_offset, ber_len + ber_value_offset, LDAP_MESSAGE_CONTEXT)
return {
'messageID': decoded[0][3],
'protocolOp': decoded[1][2],
'payload': decoded[1][3],
'controls': decoded[2][3] if len(decoded) == 3 else None
}
def decode_sequence(message, start, stop, context_decoders=None):
decoded = []
while start < stop:
octet = get_byte(message[start])
ber_class = CLASSES[(bool(octet & 0b10000000), bool(octet & 0b01000000))]
ber_constructed = bool(octet & 0b00100000)
ber_type = octet & 0b00011111
ber_decoder = DECODERS[(ber_class, octet & 0b00011111)] if ber_class < 2 else None
ber_len, ber_value_offset = compute_ber_size(get_bytes(message[start: start + 10]))
start += ber_value_offset
if ber_decoder:
value = ber_decoder(message, start, start + ber_len, context_decoders) # call value decode function
else:
# try:
value = context_decoders[ber_type](message, start, start + ber_len) # call value decode function for context class
# except KeyError:
# if ber_type == 3: # Referral in result
# value = decode_sequence(message, start, start + ber_len)
# else:
# raise # re-raise, should never happen
decoded.append((ber_class, ber_constructed, ber_type, value))
start += ber_len
return decoded
def decode_integer(message, start, stop, context_decoders=None):
first = message[start]
value = -1 if get_byte(first) & 0x80 else 0
for octet in message[start: stop]:
value = value << 8 | get_byte(octet)
return value
def decode_octet_string(message, start, stop, context_decoders=None):
return message[start: stop]
def decode_boolean(message, start, stop, context_decoders=None):
return False if message[start: stop] == 0 else True
def decode_bind_response(message, start, stop, context_decoders=None):
return decode_sequence(message, start, stop, BIND_RESPONSE_CONTEXT)
def decode_extended_response(message, start, stop, context_decoders=None):
return decode_sequence(message, start, stop, EXTENDED_RESPONSE_CONTEXT)
def decode_intermediate_response(message, start, stop, context_decoders=None):
return decode_sequence(message, start, stop, INTERMEDIATE_RESPONSE_CONTEXT)
def decode_controls(message, start, stop, context_decoders=None):
return decode_sequence(message, start, stop, CONTROLS_CONTEXT)
def ldap_result_to_dict_fast(response):
response_dict = dict()
response_dict['result'] = int(response[0][3]) # resultCode
response_dict['description'] = RESULT_CODES[response_dict['result']]
response_dict['dn'] = to_unicode(response[1][3], from_server=True) # matchedDN
response_dict['message'] = to_unicode(response[2][3], from_server=True) # diagnosticMessage
if len(response) == 4:
response_dict['referrals'] = referrals_to_list([to_unicode(referral[3], from_server=True) for referral in response[3][3]]) # referrals
else:
response_dict['referrals'] = None
return response_dict
######
if str is not bytes: # Python 3
def get_byte(x):
return x
def get_bytes(x):
return x
else: # Python 2
def get_byte(x):
return ord(x)
def get_bytes(x):
return bytearray(x)
DECODERS = {
# Universal
(0, 1): decode_boolean, # Boolean
(0, 2): decode_integer, # Integer
(0, 4): decode_octet_string, # Octet String
(0, 10): decode_integer, # Enumerated
(0, 16): decode_sequence, # Sequence
(0, 17): decode_sequence, # Set
# Application
(1, 1): decode_bind_response, # Bind response
(1, 4): decode_sequence, # Search result entry
(1, 5): decode_sequence, # Search result done
(1, 7): decode_sequence, # Modify response
(1, 9): decode_sequence, # Add response
(1, 11): decode_sequence, # Delete response
(1, 13): decode_sequence, # ModifyDN response
(1, 15): decode_sequence, # Compare response
(1, 19): decode_sequence, # Search result reference
(1, 24): decode_extended_response, # Extended response
(1, 25): decode_intermediate_response, # intermediate response
(2, 3): decode_octet_string #
}
BIND_RESPONSE_CONTEXT = {
7: decode_octet_string # SaslCredentials
}
EXTENDED_RESPONSE_CONTEXT = {
10: decode_octet_string, # ResponseName
11: decode_octet_string # Response Value
}
INTERMEDIATE_RESPONSE_CONTEXT = {
0: decode_octet_string, # IntermediateResponseName
1: decode_octet_string # IntermediateResponseValue
}
LDAP_MESSAGE_CONTEXT = {
0: decode_controls, # Controls
3: decode_sequence # Referral
}
CONTROLS_CONTEXT = {
0: decode_sequence # Control
}
| 1.789063 | 2 |
lib/utils/arg_scope.py | SimeonZhang/detectron2_tensorflow | 3 | 1636 | import copy
from contextlib import contextmanager
from functools import wraps
from collections import defaultdict
import tensorflow as tf
_ArgScopeStack = []
@contextmanager
def arg_scope(layers, **kwargs):
"""
Args:
layers (list or layer): layer or list of layers to apply the arguments.
Returns:
a context where all appearance of these layer will by default have the
arguments specified by kwargs.
Example:
.. code-block:: python
with arg_scope(Conv2D, kernel_shape=3, nl=tf.nn.relu, out_channel=32):
x = Conv2D('conv0', x)
x = Conv2D('conv1', x)
x = Conv2D('conv2', x, out_channel=64) # override argscope
"""
if not isinstance(layers, list):
layers = [layers]
for l in layers:
assert hasattr(l, '__arg_scope_enabled__'), "Argscope not supported for {}".format(l)
# need to deepcopy so that changes to new_scope does not affect outer scope
new_scope = copy.deepcopy(get_arg_scope())
for l in layers:
new_scope[l.__name__].update(kwargs)
_ArgScopeStack.append(new_scope)
yield
del _ArgScopeStack[-1]
def get_arg_scope():
"""
Returns:
dict: the current argscope.
An argscope is a dict of dict: ``dict[layername] = {arg: val}``
"""
if len(_ArgScopeStack) > 0:
return _ArgScopeStack[-1]
else:
return defaultdict(dict)
def add_arg_scope(cls):
"""Decorator for function to support argscope
Example:
.. code-block:: python
from mylib import MyClass
myfunc = add_arg_scope(MyClass)
Args:
func: A function mapping one or multiple tensors to one or multiple
tensors.
Remarks:
If the function ``func`` returns multiple input or output tensors,
only the first input/output tensor shape is displayed during logging.
Returns:
The decorated function.
"""
original_init = cls.__init__
@wraps(original_init)
def wrapped_init(self, *args, **kwargs):
actual_args = copy.copy(get_arg_scope()[cls.__name__])
actual_args.update(kwargs)
instance = original_init(self, *args, **actual_args)
return instance
cls.__arg_scope_enabled__ = True
cls.__init__ = wrapped_init
return cls
| 2.765625 | 3 |
CORN-TEST/textfsm_parse.py | AnkitDeshwal89/NETMIKO | 0 | 1637 | <filename>CORN-TEST/textfsm_parse.py
import textfsm
import subprocess
import random
res = subprocess.run('ifconfig',stdout=subprocess.PIPE)
intstatus = res.stdout.decode('ascii')
with open("datafile","w+") as a:
a.write(intstatus)
a.close()
template_file= "ifconfig-template.template"
template = open(template_file)
with open("datafile") as f:
raw_data = f.read()
re_table = textfsm.TextFSM(template)
data = re_table.ParseText(raw_data)
print(data)
NL = []
for x in data:
NLD = {
'Interface' : x[0].split(':')[0],
'TX' : int(x[1])+int(random.randint(1,100))
}
NL.append(NLD)
print(NL)
import json
print('#'*12)
print(json.dumps(NL))
#Enter template FileName :ifconfig-template.template
#Input Data file : ifconfig_output.txt
| 2.40625 | 2 |
classes.py | jared-jorgenson/mini_game | 0 | 1638 | import pygame
class Player(pygame.sprite.Sprite):
death = [pygame.image.load('Images/death1.png'), pygame.image.load('Images/death2.png'),
pygame.image.load('Images/death3.png'),
pygame.image.load('Images/death4.png'), pygame.image.load('Images/death5.png'),
pygame.image.load('Images/death6.png'),
pygame.image.load('Images/death7.png'), pygame.image.load('Images/death8.png'),
pygame.image.load('Images/death9.png'),
pygame.image.load('Images/death10.png'), pygame.image.load('Images/death11.png'),
pygame.image.load('Images/death12.png'),
pygame.image.load('Images/death13.png'), pygame.image.load('Images/death14.png'),
pygame.image.load('Images/death15.png'),
pygame.image.load('Images/death16.png'), pygame.image.load('Images/death17.png'),
pygame.image.load('Images/death18.png'),pygame.image.load('Images/death19.png'),
pygame.image.load('Images/death20.png')]
p1walkLeft = [pygame.image.load('Images/p1left.png'), pygame.image.load('Images/p1left1.png'),
pygame.image.load('Images/p1left2.png'),
pygame.image.load('Images/p1left3.png'), pygame.image.load('Images/p1left2.png'),
pygame.image.load('Images/p1left1.png'), pygame.image.load('Images/p1left.png')]
p1walkLeftshield = [pygame.image.load('Images/p1leftshield.png'), pygame.image.load('Images/p1left1shield.png'),
pygame.image.load('Images/p1left2shield.png'),
pygame.image.load('Images/p1left3shield.png'), pygame.image.load('Images/p1left2shield.png'),
pygame.image.load('Images/p1left1shield.png'), pygame.image.load('Images/p1leftshield.png')]
p1walkRight = [pygame.image.load('Images/p1right.png'), pygame.image.load('Images/p1right1.png'),
pygame.image.load('Images/p1right2.png'),
pygame.image.load('Images/p1right3.png'), pygame.image.load('Images/p1right2.png'),
pygame.image.load('Images/p1right1.png'), pygame.image.load('Images/p1right.png')]
p1walkRightshield = [pygame.image.load('Images/p1rightshield.png'), pygame.image.load('Images/p1right1shield.png'),
pygame.image.load('Images/p1right2shield.png'),
pygame.image.load('Images/p1right3shield.png'), pygame.image.load('Images/p1right2shield.png'),
pygame.image.load('Images/p1right1shield.png'), pygame.image.load('Images/p1rightshield.png')]
p1walkFront = [pygame.image.load('Images/p1front.png'), pygame.image.load('Images/p1front1.png'),
pygame.image.load('Images/p1front2.png'),
pygame.image.load('Images/p1front3.png'), pygame.image.load('Images/p1front2.png'),
pygame.image.load('Images/p1front1.png'), pygame.image.load('Images/p1front.png')]
p1walkFrontshield = [pygame.image.load('Images/p1frontshield.png'), pygame.image.load('Images/p1front1shield.png'),
pygame.image.load('Images/p1front2shield.png'),
pygame.image.load('Images/p1front3shield.png'), pygame.image.load('Images/p1front2shield.png'),
pygame.image.load('Images/p1front1shield.png'), pygame.image.load('Images/p1frontshield.png')]
p1walkBack = [pygame.image.load('Images/p1back.png'), pygame.image.load('Images/p1back1.png'),
pygame.image.load('Images/p1back2.png'),
pygame.image.load('Images/p1back3.png'), pygame.image.load('Images/p1back2.png'),
pygame.image.load('Images/p1back1.png'), pygame.image.load('Images/p1back.png')]
p1walkBackshield = [pygame.image.load('Images/p1backshield.png'), pygame.image.load('Images/p1back1shield.png'),
pygame.image.load('Images/p1back2shield.png'),
pygame.image.load('Images/p1back3shield.png'), pygame.image.load('Images/p1back2shield.png'),
pygame.image.load('Images/p1back1shield.png'), pygame.image.load('Images/p1backshield.png')]
p2walkLeft = [pygame.image.load('Images/p2left.png'), pygame.image.load('Images/p2left1.png'),
pygame.image.load('Images/p2left2.png'),
pygame.image.load('Images/p2left3.png'), pygame.image.load('Images/p2left2.png'),
pygame.image.load('Images/p2left1.png'), pygame.image.load('Images/p2left.png')]
p2walkRight = [pygame.image.load('Images/p2right.png'), pygame.image.load('Images/p2right1.png'),
pygame.image.load('Images/p2right2.png'),
pygame.image.load('Images/p2right3.png'), pygame.image.load('Images/p2right2.png'),
pygame.image.load('Images/p2right1.png'), pygame.image.load('Images/p2right.png')]
p2walkFront = [pygame.image.load('Images/p2front.png'), pygame.image.load('Images/p2front1.png'),
pygame.image.load('Images/p2front2.png'),
pygame.image.load('Images/p2front3.png'), pygame.image.load('Images/p2front2.png'),
pygame.image.load('Images/p2front1.png'), pygame.image.load('Images/p2front.png')]
p2walkBack = [pygame.image.load('Images/p2back.png'), pygame.image.load('Images/p2back1.png'),
pygame.image.load('Images/p2back2.png'),
pygame.image.load('Images/p2back3.png'), pygame.image.load('Images/p2back2.png'),
pygame.image.load('Images/p2back1.png'), pygame.image.load('Images/p2back.png')]
p2walkLeftshield = [pygame.image.load('Images/p2leftshield.png'), pygame.image.load('Images/p2left1shield.png'),
pygame.image.load('Images/p2left2shield.png'),
pygame.image.load('Images/p2left3shield.png'), pygame.image.load('Images/p2left2shield.png'),
pygame.image.load('Images/p2left1shield.png'), pygame.image.load('Images/p2leftshield.png')]
p2walkRightshield = [pygame.image.load('Images/p2rightshield.png'), pygame.image.load('Images/p2right1shield.png'),
pygame.image.load('Images/p2right2shield.png'),
pygame.image.load('Images/p2right3shield.png'), pygame.image.load('Images/p2right2shield.png'),
pygame.image.load('Images/p2right1shield.png'), pygame.image.load('Images/p2rightshield.png')]
p2walkFrontshield = [pygame.image.load('Images/p2frontshield.png'), pygame.image.load('Images/p2front1shield.png'),
pygame.image.load('Images/p2front2shield.png'),
pygame.image.load('Images/p2front3shield.png'), pygame.image.load('Images/p2front2shield.png'),
pygame.image.load('Images/p2front1shield.png'), pygame.image.load('Images/p2frontshield.png')]
p2walkBackshield = [pygame.image.load('Images/p2backshield.png'), pygame.image.load('Images/p2back1shield.png'),
pygame.image.load('Images/p2back2shield.png'),
pygame.image.load('Images/p2back3shield.png'), pygame.image.load('Images/p2back2shield.png'),
pygame.image.load('Images/p2back1shield.png'), pygame.image.load('Images/p2backshield.png')]
# Constructor function
def __init__(self, x, y, number):
super().__init__()
self.image = pygame.Surface([24, 28])
self.image.fill((0,0,0))
self.rect = self.image.get_rect()
self.rect.y = y
self.rect.x = x
self.front = True
self.back = False
self.left = False
self.right = False
self.number = number
self.change_x = 0
self.change_y = 0
self.walkCount = 0
self.walls = None
self.alive = True
self.canmove = True
self.deathCount = 0
self.gotomenu=False
self.speed=3
self.superspeed=False
self.superspeedcount=0
self.shield=False
self.shieldcount=0
self.megabombs=False
self.megabombcount = 0
def changespeed(self, x, y):
self.change_x += x
self.change_y += y
if self.superspeed and self.change_x==0 and self.change_y==0:
self.speed=6
if self.superspeedcount>=150:
self.superspeed = False
self.speed=3
self.superspeedcount=0
def update(self):
if self.canmove:
self.rect.x += self.change_x
if self.change_x <0:
self.left=True
self.right=False
self.front=False
self.back=False
elif self.change_x >0:
self.left=False
self.right=True
self.front=False
self.back=False
block_hit_list = pygame.sprite.spritecollide(self, self.walls, False)
for block in block_hit_list:
if self.change_x > 0:
self.rect.right = block.rect.left
else:
self.rect.left = block.rect.right
self.rect.y += self.change_y
if self.change_y <0:
self.left=False
self.right=False
self.front=False
self.back=True
elif self.change_y >0:
self.left=False
self.right=False
self.front=True
self.back=False
block_hit_list = pygame.sprite.spritecollide(self, self.walls, False)
for block in block_hit_list:
if self.change_y > 0:
self.rect.bottom = block.rect.top
else:
self.rect.top = block.rect.bottom
def draw(self, screen):
if self.number == 1:
screen.blit(pygame.image.load('Images2/'+str(self.megabombcount)+'megabombs.png'), (2, 0))
if self.alive:
if self.front:
if self.shield:
if self.change_y == 0:
screen.blit(pygame.image.load('Images/p1frontshield.png'),
(self.rect.x, self.rect.y))
else:
if self.walkCount + 1 >= 21:
self.walkCount = 0
screen.blit(self.p1walkFrontshield[self.walkCount // 3], (self.rect.x, self.rect.y))
self.walkCount += 1
else:
if self.change_y == 0:
screen.blit(pygame.image.load('Images/p1front.png'),
(self.rect.x, self.rect.y))
else:
if self.walkCount + 1 >= 21:
self.walkCount = 0
screen.blit(self.p1walkFront[self.walkCount // 3], (self.rect.x, self.rect.y))
self.walkCount += 1
elif self.back:
if self.shield:
if self.change_y == 0:
screen.blit(pygame.image.load('Images/p1backshield.png'),
(self.rect.x, self.rect.y))
else:
if self.walkCount + 1 >= 21:
self.walkCount = 0
screen.blit(self.p1walkBackshield[self.walkCount // 3], (self.rect.x, self.rect.y))
self.walkCount += 1
else:
if self.change_y == 0:
screen.blit(pygame.image.load('Images/p1back.png'),
(self.rect.x, self.rect.y))
else:
if self.walkCount + 1 >= 21:
self.walkCount = 0
screen.blit(self.p1walkBack[self.walkCount // 3], (self.rect.x, self.rect.y))
self.walkCount += 1
elif self.left:
if self.shield:
if self.change_x == 0:
screen.blit(pygame.image.load('Images/p1leftshield.png'),
(self.rect.x, self.rect.y))
else:
if self.walkCount + 1 >= 21:
self.walkCount = 0
screen.blit(self.p1walkLeftshield[self.walkCount // 3], (self.rect.x, self.rect.y))
self.walkCount += 1
else:
if self.change_x == 0:
screen.blit(pygame.image.load('Images/p1left.png'),
(self.rect.x, self.rect.y))
else:
if self.walkCount + 1 >= 21:
self.walkCount = 0
screen.blit(self.p1walkLeft[self.walkCount // 3], (self.rect.x, self.rect.y))
self.walkCount += 1
elif self.right:
if self.shield:
if self.change_x == 0:
screen.blit(pygame.image.load('Images/p1rightshield.png'),
(self.rect.x, self.rect.y))
else:
if self.walkCount + 1 >= 21:
self.walkCount = 0
screen.blit(self.p1walkRightshield[self.walkCount // 3], (self.rect.x, self.rect.y))
self.walkCount += 1
else:
if self.change_x == 0:
screen.blit(pygame.image.load('Images/p1right.png'),
(self.rect.x, self.rect.y))
else:
if self.walkCount + 1 >= 21:
self.walkCount = 0
screen.blit(self.p1walkRight[self.walkCount // 3], (self.rect.x, self.rect.y))
self.walkCount += 1
elif self.number == 2:
screen.blit(pygame.image.load('Images2/'+str(self.megabombcount)+'megabombs.png'), (415, 0))
if self.alive:
if self.front:
if self.shield:
if self.change_y == 0:
screen.blit(pygame.image.load('Images/p2frontshield.png'),
(self.rect.x, self.rect.y))
else:
if self.walkCount + 1 >= 21:
self.walkCount = 0
screen.blit(self.p2walkFrontshield[self.walkCount // 3], (self.rect.x, self.rect.y))
self.walkCount += 1
else:
if self.change_y == 0:
screen.blit(pygame.image.load('Images/p2front.png'),
(self.rect.x, self.rect.y))
else:
if self.walkCount + 1 >= 21:
self.walkCount = 0
screen.blit(self.p2walkFront[self.walkCount // 3], (self.rect.x, self.rect.y))
self.walkCount += 1
elif self.back:
if self.shield:
if self.change_y == 0:
screen.blit(pygame.image.load('Images/p2backshield.png'),
(self.rect.x, self.rect.y))
else:
if self.walkCount + 1 >= 21:
self.walkCount = 0
screen.blit(self.p2walkBackshield[self.walkCount // 3], (self.rect.x, self.rect.y))
self.walkCount += 1
else:
if self.change_y == 0:
screen.blit(pygame.image.load('Images/p2back.png'),
(self.rect.x, self.rect.y))
else:
if self.walkCount + 1 >= 21:
self.walkCount = 0
screen.blit(self.p2walkBack[self.walkCount // 3], (self.rect.x, self.rect.y))
self.walkCount += 1
elif self.left:
if self.shield:
if self.change_x == 0:
screen.blit(pygame.image.load('Images/p2leftshield.png'),
(self.rect.x, self.rect.y))
else:
if self.walkCount + 1 >= 21:
self.walkCount = 0
screen.blit(self.p2walkLeftshield[self.walkCount // 3], (self.rect.x, self.rect.y))
self.walkCount += 1
else:
if self.change_x == 0:
screen.blit(pygame.image.load('Images/p2left.png'),
(self.rect.x, self.rect.y))
else:
if self.walkCount + 1 >= 21:
self.walkCount = 0
screen.blit(self.p2walkLeft[self.walkCount // 3], (self.rect.x, self.rect.y))
self.walkCount += 1
elif self.right:
if self.shield:
if self.change_x == 0:
screen.blit(pygame.image.load('Images/p2rightshield.png'),
(self.rect.x, self.rect.y))
else:
if self.walkCount + 1 >= 21:
self.walkCount = 0
screen.blit(self.p2walkRightshield[self.walkCount // 3], (self.rect.x, self.rect.y))
self.walkCount += 1
else:
if self.change_x == 0:
screen.blit(pygame.image.load('Images/p2right.png'),
(self.rect.x, self.rect.y))
else:
if self.walkCount + 1 >= 21:
self.walkCount = 0
screen.blit(self.p2walkRight[self.walkCount // 3], (self.rect.x, self.rect.y))
self.walkCount += 1
if self.alive == False and self.deathCount < 200:
screen.blit(self.death[self.deathCount // 10], (self.rect.x, self.rect.y))
self.deathCount += 1
if self.deathCount >= 200:
self.rect.x = 1000
self.gotomenu=True
def reset(self,x,y):
self.gotomenu = False
self.alive = True
self.deathCount = 0
self.rect.x = x
self.rect.y = y
self.canmove = True
self.front = True
self.change_x=0
self.change_y=0
self.superspeed=False
self.speed=3
self.shield=False
self.megabombs=False
self.megabombcount=0
class Wall(pygame.sprite.Sprite):
def __init__(self, x, y, width, height):
super().__init__()
self.image = pygame.Surface([width,height], pygame.SRCALPHA, 32)
image = self.image.convert_alpha()
self.rect = self.image.get_rect()
self.rect.y = y
self.rect.x = x
class powerup(pygame.sprite.Sprite):
superspeedanimation=[pygame.image.load('Images/superspeed1.png'), pygame.image.load('Images/superspeed2.png'),
pygame.image.load('Images/superspeed3.png'), pygame.image.load('Images/superspeed3.png'),
pygame.image.load('Images/superspeed2.png'), pygame.image.load('Images/superspeed1.png')]
shieldanimation = [pygame.image.load('Images/shield1.png'), pygame.image.load('Images/shield2.png'),
pygame.image.load('Images/shield3.png'), pygame.image.load('Images/shield3.png'),
pygame.image.load('Images/shield2.png'), pygame.image.load('Images/shield1.png')]
megabombanimation = [pygame.image.load('Images2/megabombicon1.png'), pygame.image.load('Images2/megabombicon2.png'),
pygame.image.load('Images2/megabombicon3.png'), pygame.image.load('Images2/megabombicon3.png'),
pygame.image.load('Images2/megabombicon2.png'), pygame.image.load('Images2/megabombicon1.png')]
def __init__(self, x, y, number):
super().__init__()
self.image = pygame.Surface([22, 28], pygame.SRCALPHA, 32)
image = self.image.convert_alpha()
self.rect = self.image.get_rect()
self.rect.y = y
self.rect.x = x
self.number = number
self.spawntimer=0
self.respawntimer=0
self.exists=True
self.animationcount=0
def draw(self, screen):
if self.number==1:
if self.exists and self.spawntimer>50:
if self.animationcount + 1 >= 30:
self.animationcount = 0
screen.blit(self.superspeedanimation[self.animationcount // 5], (self.rect.x, self.rect.y))
self.animationcount += 1
elif self.number==2:
if self.exists and self.spawntimer > 50:
if self.animationcount + 1 >= 30:
self.animationcount = 0
screen.blit(self.shieldanimation[self.animationcount // 5], (self.rect.x, self.rect.y))
self.animationcount += 1
else:
if self.exists and self.spawntimer > 50:
if self.animationcount + 1 >= 30:
self.animationcount = 0
screen.blit(self.megabombanimation[self.animationcount // 5], (self.rect.x, self.rect.y))
self.animationcount += 1
def reset(self):
self.spawntimer=0
self.respawntimer=0
self.exists=True
class bomb(pygame.sprite.Sprite):
def __init__(self, x, y, width, height, bomb_count, bomb_type):
super().__init__()
self.image = pygame.Surface([22, 28], pygame.SRCALPHA, 32)
image = self.image.convert_alpha()
self.rect = self.image.get_rect()
self.rect.y = y
self.rect.x = x
self.width = width
self.height = height
self.bomb_count = bomb_count
self.bomb_type = bomb_type
self.walls = None
self.leftcheck = self.rect.x - 32
self.rightcheck = self.rect.x + self.width
self.upcheck = self.rect.y - 32
self.downcheck = self.rect.y + self.height
self.expleft = True
self.doubleexpleft = True
self.expright = True
self.doubleexpright = True
self.expup = True
self.doubleexpup = True
self.expdown = True
self.doubleexpdown = True
self.expboxlist = []
def draw(self, screen):
if self.bomb_count < 30:
if self.bomb_type==0:
screen.blit(pygame.image.load('Images/bomb3.png'), (self.rect.x, self.rect.y))
else:
screen.blit(pygame.image.load('Images2/megabomb3.png'), (self.rect.x, self.rect.y))
elif self.bomb_count < 60:
if self.bomb_type == 0:
screen.blit(pygame.image.load('Images/bomb2.png'), (self.rect.x, self.rect.y))
else:
screen.blit(pygame.image.load('Images2/megabomb2.png'), (self.rect.x, self.rect.y))
elif self.bomb_count < 90:
if self.bomb_type == 0:
screen.blit(pygame.image.load('Images/bomb1.png'), (self.rect.x, self.rect.y))
else:
screen.blit(pygame.image.load('Images2/megabomb1.png'), (self.rect.x, self.rect.y))
elif self.bomb_count < 120:
if self.bomb_type==0:
for i in self.walls:
if i.rect.collidepoint(self.leftcheck,self.rect.y):
self.expleft = False
if i.rect.collidepoint(self.rightcheck,self.rect.y):
self.expright = False
if i.rect.collidepoint(self.rect.x,self.upcheck):
self.expup = False
if i.rect.collidepoint(self.rect.x,self.downcheck):
self.expdown = False
screen.blit(pygame.image.load('Images/explosion.png'), (self.rect.x, self.rect.y))
self.expboxlist.append(pygame.Rect(self.rect.x, self.rect.y, 32, 32))
if self.expleft:
screen.blit(pygame.image.load('Images/explosion.png'), (self.leftcheck, self.rect.y))
self.expboxlist.append(pygame.Rect(self.leftcheck, self.rect.y, 32, 32))
screen.blit(pygame.image.load('Images/explosion.png'), (self.leftcheck+16, self.rect.y))
self.expboxlist.append(pygame.Rect(self.leftcheck+16, self.rect.y, 32, 32))
if self.expright:
screen.blit(pygame.image.load('Images/explosion.png'), (self.rightcheck, self.rect.y))
self.expboxlist.append(pygame.Rect(self.rightcheck, self.rect.y, 32, 32))
screen.blit(pygame.image.load('Images/explosion.png'), (self.rightcheck-16, self.rect.y))
self.expboxlist.append(pygame.Rect(self.rightcheck-16, self.rect.y, 32, 32))
if self.expup:
screen.blit(pygame.image.load('Images/explosion.png'), (self.rect.x, self.upcheck))
self.expboxlist.append(pygame.Rect(self.rect.x, self.upcheck, 32, 32))
screen.blit(pygame.image.load('Images/explosion.png'), (self.rect.x, self.upcheck+16))
self.expboxlist.append(pygame.Rect(self.rect.x, self.upcheck+16, 32, 32))
if self.expdown:
screen.blit(pygame.image.load('Images/explosion.png'), (self.rect.x, self.downcheck))
self.expboxlist.append(pygame.Rect(self.rect.x, self.downcheck, 32, 32))
screen.blit(pygame.image.load('Images/explosion.png'), (self.rect.x, self.downcheck-16))
self.expboxlist.append(pygame.Rect(self.rect.x, self.downcheck-16, 32, 32))
elif self.bomb_type==1:
for i in self.walls:
if i.rect.collidepoint(self.leftcheck, self.rect.y):
self.expleft = False
if i.rect.collidepoint(self.leftcheck-32, self.rect.y):
self.doubleexpleft = False
if i.rect.collidepoint(self.rightcheck, self.rect.y):
self.expright = False
if i.rect.collidepoint(self.rightcheck+32, self.rect.y):
self.doubleexpright = False
if i.rect.collidepoint(self.rect.x, self.upcheck):
self.expup = False
if i.rect.collidepoint(self.rect.x, self.upcheck-32):
self.doubleexpup = False
if i.rect.collidepoint(self.rect.x, self.downcheck):
self.expdown = False
if i.rect.collidepoint(self.rect.x, self.downcheck+32):
self.doubleexpdown = False
screen.blit(pygame.image.load('Images/explosion.png'), (self.rect.x, self.rect.y))
self.expboxlist.append(pygame.Rect(self.rect.x, self.rect.y, 32, 32))
if self.expleft:
screen.blit(pygame.image.load('Images/explosion.png'), (self.leftcheck, self.rect.y))
self.expboxlist.append(pygame.Rect(self.leftcheck, self.rect.y, 32, 32))
screen.blit(pygame.image.load('Images/explosion.png'), (self.leftcheck + 16, self.rect.y))
self.expboxlist.append(pygame.Rect(self.leftcheck + 16, self.rect.y, 32, 32))
if self.doubleexpleft:
screen.blit(pygame.image.load('Images/explosion.png'), (self.leftcheck-32, self.rect.y))
self.expboxlist.append(pygame.Rect(self.leftcheck-32, self.rect.y, 32, 32))
screen.blit(pygame.image.load('Images/explosion.png'), (self.leftcheck-16, self.rect.y))
self.expboxlist.append(pygame.Rect(self.leftcheck-16, self.rect.y, 32, 32))
if self.expright:
screen.blit(pygame.image.load('Images/explosion.png'), (self.rightcheck, self.rect.y))
self.expboxlist.append(pygame.Rect(self.rightcheck, self.rect.y, 32, 32))
screen.blit(pygame.image.load('Images/explosion.png'), (self.rightcheck - 16, self.rect.y))
self.expboxlist.append(pygame.Rect(self.rightcheck - 16, self.rect.y, 32, 32))
if self.doubleexpright:
screen.blit(pygame.image.load('Images/explosion.png'), (self.rightcheck+32, self.rect.y))
self.expboxlist.append(pygame.Rect(self.rightcheck+32, self.rect.y, 32, 32))
screen.blit(pygame.image.load('Images/explosion.png'), (self.rightcheck+16, self.rect.y))
self.expboxlist.append(pygame.Rect(self.rightcheck+16, self.rect.y, 32, 32))
if self.expup:
screen.blit(pygame.image.load('Images/explosion.png'), (self.rect.x, self.upcheck))
self.expboxlist.append(pygame.Rect(self.rect.x, self.upcheck, 32, 32))
screen.blit(pygame.image.load('Images/explosion.png'), (self.rect.x, self.upcheck + 16))
self.expboxlist.append(pygame.Rect(self.rect.x, self.upcheck + 16, 32, 32))
if self.doubleexpup:
screen.blit(pygame.image.load('Images/explosion.png'), (self.rect.x, self.upcheck-32))
self.expboxlist.append(pygame.Rect(self.rect.x, self.upcheck-32, 32, 32))
screen.blit(pygame.image.load('Images/explosion.png'), (self.rect.x, self.upcheck-16))
self.expboxlist.append(pygame.Rect(self.rect.x, self.upcheck-16, 32, 32))
if self.expdown:
screen.blit(pygame.image.load('Images/explosion.png'), (self.rect.x, self.downcheck))
self.expboxlist.append(pygame.Rect(self.rect.x, self.downcheck, 32, 32))
screen.blit(pygame.image.load('Images/explosion.png'), (self.rect.x, self.downcheck - 16))
self.expboxlist.append(pygame.Rect(self.rect.x, self.downcheck - 16, 32, 32))
if self.doubleexpdown:
screen.blit(pygame.image.load('Images/explosion.png'), (self.rect.x, self.downcheck+32))
self.expboxlist.append(pygame.Rect(self.rect.x, self.downcheck+32, 32, 32))
screen.blit(pygame.image.load('Images/explosion.png'), (self.rect.x, self.downcheck+16))
self.expboxlist.append(pygame.Rect(self.rect.x, self.downcheck+16, 32, 32)) | 2.40625 | 2 |
installSynApps/data_model/install_config.py | NSLS-II/installSynApps | 0 | 1639 | """A file containing representations of install configurations.
The core Data representation for installSynApps. An InstallConfiguration object
is parsed from a configuration, and is then used throughout the build process.
InjectorFile objects are used for representing text that need to be injected
into configuration files prior to builds.
"""
import os
import installSynApps
from installSynApps.data_model.install_module import InstallModule as IM
class InstallConfiguration:
"""
Class that represents an Install Configuration for installSynApps
It stores the top level install_location, the path to the configuration files,
any OS specific configurations, and the actual list of modules that will be
installed.
Attributes
----------
install_location : str
path to top level install location
path_to_configure : str
path to configure folder of installSynApps
modules : List of InsallModule
list of InstallModule objects representing the modules that will be installed
base_path : str
abs path to install location of EPICS base
support_path : str
abs path to install location of EPICS support modules
ad_path : str
abs path to install location of EPICS area detector
motor_path : str
abs path to install location of EPICS motor
module_map : dict of str -> int
Dictionary storing relation of module names to build index
injector_files : list of InjectorFile
list of injector files loaded by install configuration
build_flags : list of list of str
list of macro-value pairs enforced at build time
"""
def __init__(self, install_location, path_to_configure):
"""Constructor for the InstallConfiguration object
"""
# Paths to configure and output locations
self.path_to_configure = path_to_configure
self.install_location = os.path.abspath(install_location)
# Modules loaded into install config
self.modules = []
# Dict that maps module name to index in module list for easier searching.
self.module_map = {}
self.injector_files = []
self.build_flags = []
# Paths to the three install location paths used for relative path correction
self.base_path = None
self.support_path = None
self.ad_path = None
self.motor_path = None
self.extensions_path = None
def is_install_valid(self):
"""Function that checks if given install location is valid
Parameters
----------
self : InstallConfiguration
Self object
Returns
-------
bool
True if install location is valid, false otherwise
str
Error message if applicable, None otherwise
"""
valid = True
message = None
target = self.install_location
if not os.path.exists(target):
target = os.path.dirname(self.install_location)
if not os.path.exists(target):
valid = False
message = 'Install location and parent directory do not exist'
elif not os.access(target, os.W_OK | os.X_OK):
valid = False
message = 'Permission Error: {}'.format(target)
return valid, message
def add_module(self, module):
"""Function that adds a module to the InstallConfiguration module list
First checks if parameter is a valid InstallModule, then sets the config, and abs path,
then if it is one of the three key modules to track, sets the appropriate variables. Also,
add the module to the map of modules which will keep track of which position each module is
in in the list/build order
Parameters
----------
module : InstallModule
new installation module being added.
"""
if isinstance(module, IM):
# Updates the abs path
module.abs_path = self.convert_path_abs(module.rel_path)
# Key paths to track
if module.name == "EPICS_BASE":
self.base_path = module.abs_path
elif module.name == "SUPPORT":
self.support_path = module.abs_path
elif module.name == "AREA_DETECTOR":
self.ad_path = module.abs_path
elif module.name == "MOTOR":
self.motor_path = module.abs_path
elif module.name == "EXTENSIONS":
self.extensions_path = module.abs_path
self.module_map[module.name] = len(self.modules)
self.modules.append(module)
def add_injector_file(self, name, contents, target):
"""Function that adds a new injector file to the install_config object
Parameters
----------
name : str
name of the file
contents : str
The contents of the file
target : str
The target location file into which contents will be injected.
"""
new_injector = InjectorFile(self.path_to_configure, name, contents, target)
self.injector_files.append(new_injector)
def add_macros(self, macro_list):
"""Function that adds macro-value pairs to a list of macros
Parameters
----------
macro_list : list of [str, str]
list of new macros to append
"""
self.build_flags = self.build_flags + macro_list
def get_module_list(self):
"""Function that gets the list of modules in the configuration
Returns
-------
List
self.modules - list of modules to install in this install configuration
"""
return self.modules
def get_module_by_name(self, name):
"""Function that returns install module object given module name
Uses module name as a key in a dictionary to return reference to given module object.
Parameters
----------
name : str
Module name
Returns
-------
obj - InstallModule
Return matching module, or None if not found.
"""
if name in self.module_map.keys():
return self.modules[self.module_map[name]]
else:
return None
def get_module_build_index(self, name):
"""Function that returns the index in the build order for the module
Used for ensuring dependencies are built before lower level packages.
Parameters
----------
name : str
Module name
Returns
-------
int
Index of module in build order if found, otherwise -1
"""
if name in self.module_map.keys():
return self.module_map[name]
else:
return -1
def get_core_version(self):
"""Funciton that returns selected version of ADCore
"""
return self.get_module_by_name('ADCORE').version
def swap_module_positions(self, module_A, module_B):
"""Swaps build order of modules
Used to ensure dependencies are built before lower level packages
Parameters
----------
module_A : str
Name of first module
module_B : str
Name of second module
"""
index_A = self.get_module_build_index(module_A)
index_B = self.get_module_build_index(module_B)
if index_A >= 0 and index_B >= 0:
temp_A = self.get_module_by_name(module_B)
temp_B = self.get_module_by_name(module_A)
self.modules[index_A] = temp_A
self.modules[index_B] = temp_B
self.module_map[module_A] = index_B
self.module_map[module_B] = index_A
def convert_path_abs(self, rel_path):
"""Function that converts a given modules relative path to an absolute path
If the macro name can be found in the list of accounted for modules, replace it with that module's absolute path
Parameters
----------
rel_path : str
The relative installation path for the given module
Returns
-------
str
The absolute installation path for the module. (Macros are replaced)
"""
temp = rel_path.split('/', 1)[-1]
if "$(INSTALL)" in rel_path and self.install_location != None:
return installSynApps.join_path(self.install_location, temp)
elif "$(EPICS_BASE)" in rel_path and self.base_path != None:
return installSynApps.join_path(self.base_path, temp)
elif "$(SUPPORT)" in rel_path and self.support_path != None:
return installSynApps.join_path(self.support_path, temp)
elif "$(AREA_DETECTOR)" in rel_path and self.ad_path != None:
return installSynApps.join_path(self.ad_path, temp)
elif "$(MOTOR)" in rel_path and self.motor_path != None:
return installSynApps.join_path(self.motor_path, temp)
elif "$(EXTENSIONS)" in rel_path and self.extensions_path != None:
return installSynApps.join_path(self.extensions_path, temp)
elif "$(" in rel_path:
macro_part = rel_path.split(')')[0]
rel_to = macro_part.split('(')[1]
rel_to_module = self.get_module_by_name(rel_to)
if rel_to_module is not None:
return installSynApps.join_path(rel_to_module.abs_path, temp)
return rel_path
def print_installation_info(self, fp = None):
"""Function that prints installation info
Prints list of all modules including clone/build/package information
Parameters
----------
fp = None : file pointer
Optional pointer to an external log file
"""
if fp == None:
print(self.get_printable_string().strip())
else:
fp.write(self.get_printable_string())
def get_printable_string(self):
"""Function that gets a toString for an InstallConfigurations
Returns
-------
str
A string representing the install configuration
"""
out = "--------------------------------\n"
out = out + "Install Location = {}\n".format(self.install_location)
out = out + "This Install Config is saved at {}\n".format(self.path_to_configure)
for module in self.modules:
if module.clone == 'YES':
out = out + module.get_printable_string()
return out
def get_module_names_list(self):
"""Function that gets list of modules being built
Returns
-------
list of str
list of module names that are set to build
"""
out = []
for module in self.modules:
if module.build == 'YES':
out.append(module.name)
return out
class InjectorFile:
"""Class that represents an injector file and stores its name, contents, and target
Injector file classes are used to represent data that needs to be appended to target files
at build time. Used to add to commonPlugins, commonPlugin_settings, etc.
TODO: This class can probably be abstracted into a simpler data structure (since its used as a struct anyway)
Attributes
----------
path_to_configure : str
path to the configure dir that houses this injector file
name : str
name of the file
contents : str
The contents of the file
target : str
The target location file into which contents will be injected.
"""
def __init__(self, path_to_configure, name, contents, target):
"""Constructor of InjectorFile class
"""
self.path_to_configure = path_to_configure
self.name = name
self.contents = contents
self.target = target
def generate_default_install_config(target_install_loc='/epics', update_versions=False, with_pva=True):
config = InstallConfiguration(target_install_loc, None)
y = 'YES'
n = 'NO'
gu = 'GIT_URL'
wu = 'WGET_URL'
base_org = 'https://github.com/epics-base/'
syn_org = 'https://github.com/EPICS-synApps/'
mod_org = 'https://github.com/epics-modules/'
ad_org = 'https://github.com/areaDetector/'
seq_rel = 'http://www-csr.bessy.de/control/SoftDist/sequencer/releases/'
psi_org = 'https://github.com/paulscherrerinstitute/'
# Add core modules that will generally always be built
config.add_module(IM("EPICS_BASE", "R7.0.3", "$(INSTALL)/base", gu, base_org, "epics-base", y, y, y))
config.add_module(IM("SUPPORT", "R6-1", "$(INSTALL)/support", gu, syn_org, "support", y, y, n))
config.add_module(IM("CONFIGURE", "R6-1", "$(SUPPORT)/configure", gu, syn_org, "configure", y, y, n))
config.add_module(IM("UTILS", "R6-1", "$(SUPPORT)/utils", gu, syn_org, "utils", y, y, n))
config.add_module(IM("SNCSEQ", "2.2.8", "$(SUPPORT)/seq", wu, seq_rel, "seq-2.2.8.tar.gz", y, y, y))
config.add_module(IM("IPAC", "2.15", "$(SUPPORT)/ipac", gu, mod_org, "ipac", y, y, y))
config.add_module(IM("ASYN", "R4-37", "$(SUPPORT)/asyn", gu, mod_org, "asyn", y, y, y))
config.add_module(IM("AUTOSAVE", "R5-10", "$(SUPPORT)/autosave", gu, mod_org, "autosave", y, y, y))
config.add_module(IM("BUSY", "R1-7-2", "$(SUPPORT)/busy", gu, mod_org, "busy", y, y, y))
config.add_module(IM("CALC", "R3-7-3", "$(SUPPORT)/calc", gu, mod_org, "calc", y, y, y))
config.add_module(IM("DEVIOCSTATS", "master", "$(SUPPORT)/iocStats", gu, mod_org, "iocStats", y, y, y))
config.add_module(IM("SSCAN", "R2-11-3", "$(SUPPORT)/sscan", gu, mod_org, "sscan", y, y, y))
config.add_module(IM("IPUNIDIG", "R2-11", "$(SUPPORT)/ipUnidig", gu, mod_org, "ipUnidig", y, y, y))
# Some modules that are commonly needed
config.add_module(IM("XSPRESS3", "master", "$(SUPPORT)/xspress3", gu, mod_org, "xspress3", y, y, y))
config.add_module(IM("MOTOR", "R7-1", "$(SUPPORT)/motor", gu, mod_org, "motor", y, y, y))
config.add_module(IM("QUADEM", "R9-3", "$(SUPPORT)/quadEM", gu, mod_org, "quadEM", y, y, y))
config.add_module(IM("STREAM", "2.8.10", "$(SUPPORT)/stream", gu, psi_org, "StreamDevice", y, y, y))
# AreaDetector and commonly used drivers
config.add_module(IM("AREA_DETECTOR", "R3-8", "$(SUPPORT)/areaDetector", gu, ad_org, "areaDetector", y, y, n))
config.add_module(IM("ADSUPPORT", "R1-9", "$(AREA_DETECTOR)/ADSupport", gu, ad_org, "ADSupport", y, y, y))
config.add_module(IM("ADCORE", "R3-8", "$(AREA_DETECTOR)/ADCore", gu, ad_org, "ADCore", y, y, y))
config.add_module(IM("ADPERKINELMER", "master", "$(AREA_DETECTOR)/ADPerkinElmer", gu, ad_org, "ADPerkinElmer", n, n, n))
config.add_module(IM("ADGENICAM", "master", "$(AREA_DETECTOR)/ADGenICam", gu, ad_org, "ADGenICam", n, n, n))
config.add_module(IM("ADANDOR3", "master", "$(AREA_DETECTOR)/ADAndor3", gu, ad_org, "ADAndor3", n, n, n))
config.add_module(IM("ADPROSILICA", "R2-5", "$(AREA_DETECTOR)/ADProsilica", gu, ad_org, "ADProsilica", n, n, n))
config.add_module(IM("ADSIMDETECTOR", "master", "$(AREA_DETECTOR)/ADSimDetector", gu, ad_org, "ADSimDetector", n, n, n))
config.add_module(IM("ADPILATUS", "R2-8", "$(AREA_DETECTOR)/ADPilatus", gu, ad_org, "ADPilatus", n, n, n))
config.add_module(IM("ADMERLIN", "master", "$(AREA_DETECTOR)/ADMerlin", gu, ad_org, "ADMerlin", n, n, n))
config.add_module(IM("ADARAVIS", "master", "$(AREA_DETECTOR)/ADAravis", gu, ad_org, "ADAravis", n, n, n))
config.add_module(IM("ADEIGER", "R2-6", "$(AREA_DETECTOR)/ADEiger", gu, ad_org, "ADEiger", n, n, n))
config.add_module(IM("ADVIMBA", "master", "$(AREA_DETECTOR)/ADVimba", gu, ad_org, "ADVimba", n, n, n))
config.add_module(IM("ADPOINTGREY", "master", "$(AREA_DETECTOR)/ADPointGrey", gu, ad_org, "ADPointGrey", n, n, n))
config.add_module(IM("ADANDOR", "R2-8", "$(AREA_DETECTOR)/ADAndor", gu, ad_org, "ADAndor", n, n, n))
config.add_module(IM("ADDEXELA", "R2-3", "$(AREA_DETECTOR)/ADDexela", gu, ad_org, "ADDexela", n, n, n))
config.add_module(IM("ADMYTHEN", "master", "$(AREA_DETECTOR)/ADMythen", gu, ad_org, "ADMythen", n, n, n))
config.add_module(IM("ADURL", "master", "$(AREA_DETECTOR)/ADURL", gu, ad_org, "ADURL", n, n, n))
common_plugins_str = 'dbLoadRecords("$(DEVIOCSTATS)/db/iocAdminSoft.db", "IOC=$(PREFIX)")\n'
autosave_str = 'file "sseqRecord_settings.req", P=$(P), S=AcquireSequence\n'
if with_pva:
autosave_str += 'file "NDPva_settings.req", P=$(P), R=Pva1:\n'
common_plugins_str += 'NDPvaConfigure("PVA1", $(QSIZE), 0, "$(PORT)", 0, $(PREFIX)Pva1:Image, 0, 0, 0)\n' \
'dbLoadRecords("NDPva.template", "P=$(PREFIX),R=Pva1:, PORT=PVA1,ADDR=0,TIMEOUT=1,NDARRAY_PORT=$(PORT)")\n' \
'# Must start PVA server if this is enabled\n' \
'startPVAServer\n' \
config.add_injector_file('PLUGIN_CONFIG', common_plugins_str, '$(AREA_DETECTOR)/ADCore/iocBoot/EXAMPLE_commonPlugins.cmd')
config.add_injector_file('AUTOSAVE_CONFIG', autosave_str, '$(AREA_DETECTOR)/ADCore/iocBoot/EXAMPLE_commonPlugin_settings.req')
if update_versions:
installSynApps.sync_all_module_tags(config)
return config
| 2.859375 | 3 |
apps/accounts/views.py | tarvitz/icu | 1 | 1640 | <gh_stars>1-10
# Create your views here.
# -*- coding: utf-8 -*-
from apps.core.helpers import render_to, ajax_response, get_object_or_None
from apps.core.decorators import lock, login_required_json
from apps.accounts.models import Invite
from apps.accounts.decorators import check_invite
from apps.accounts.forms import (
LoginForm, AccountRegisterForm, SendInviteForm, InviteRegisterForm
)
from django.core.mail import send_mail
from django.core.urlresolvers import reverse
from django.contrib import auth
from django.contrib.auth.decorators import login_required
from django.conf import settings
from django.db import transaction
from django.utils.translation import ugettext_lazy as _
@render_to('accounts/login.html')
def login(request):
form = LoginForm(request.POST or None)
if request.method == 'POST':
if form.is_valid():
user = form.cleaned_data['user']
auth.login(request, user)
return {'redirect': 'core:index'}
return {
'form': form
}
@render_to('index.html')
def logout(request):
auth.logout(request)
return {}
@render_to('accounts/profile.html')
def profile(request):
return {}
@login_required_json
@ajax_response
def generate_new_api_key(request):
if request.method == 'POST':
request.user.api_key.key = request.user.api_key.generate_key()
request.user.api_key.save()
key = request.user.api_key.key
return {'success': True, 'key': key}
return {'success': False}
@lock("REGISTER_ALLOWED")
@render_to('accounts/register.html')
def register(request):
form = AccountRegisterForm(request.POST or None)
if request.method == "POST":
if form.is_valid():
user = form.save(commit=False)
user.set_password(form.cleaned_data['password'])
user.save()
return {'redirect': 'core:index'}
return {
'form': form
}
@login_required
@render_to('accounts/invite.html')
def invite(request):
form = SendInviteForm(request.POST or None, request=request)
if request.method == 'POST':
if form.is_valid():
form.save(commit=False)
invite = form.instance
email = form.cleaned_data['email']
msg = settings.INVITE_MESSAGE % {
'user': request.user.username,
'link': "http://b3ban.blacklibrary.ru%s" % reverse('accounts:invite-register', args=(invite.sid, ))
}
#no mail send, no money :)
send_mail(
subject=unicode(_('You have been invited to b3ban service')),
message=unicode(msg),
from_email=settings.EMAIL_FROM,
recipient_list=[email]
)
invite.save()
return {'redirect': 'accounts:invite-success'}
return {
'form': form
}
#@check for possibility to register
@transaction.commit_on_success
@check_invite(sid='sid')
@render_to('accounts/invite_register.html')
def invite_register(request, sid):
invite = get_object_or_None(Invite, sid=sid)
if not invite:
return {'redirect': 'core:ufo'}
form = InviteRegisterForm(request.POST or None)
if request.method == 'POST':
if form.is_valid():
invite.is_verified = True
invite.save()
user = form.save(commit=False)
user.email = invite.email
user.set_password(form.cleaned_data['password'])
user.save()
return {'redirect': 'accounts:invite-register-success'}
return {'form': form, 'sid': sid}
| 1.890625 | 2 |
plugins/modules/oci_blockstorage_volume_backup_policy_facts.py | LaudateCorpus1/oci-ansible-collection | 0 | 1641 | <reponame>LaudateCorpus1/oci-ansible-collection
#!/usr/bin/python
# Copyright (c) 2020, 2022 Oracle and/or its affiliates.
# This software is made available to you under the terms of the GPL 3.0 license or the Apache 2.0 license.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Apache License v2.0
# See LICENSE.TXT for details.
# GENERATED FILE - DO NOT EDIT - MANUAL CHANGES WILL BE OVERWRITTEN
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "community",
}
DOCUMENTATION = """
---
module: oci_blockstorage_volume_backup_policy_facts
short_description: Fetches details about one or multiple VolumeBackupPolicy resources in Oracle Cloud Infrastructure
description:
- Fetches details about one or multiple VolumeBackupPolicy resources in Oracle Cloud Infrastructure
- Lists all the volume backup policies available in the specified compartment.
- For more information about Oracle defined backup policies and user defined backup policies,
see L(Policy-Based Backups,https://docs.cloud.oracle.com/iaas/Content/Block/Tasks/schedulingvolumebackups.htm).
- If I(policy_id) is specified, the details of a single VolumeBackupPolicy will be returned.
version_added: "2.9.0"
author: Oracle (@oracle)
options:
policy_id:
description:
- The OCID of the volume backup policy.
- Required to get a specific volume_backup_policy.
type: str
aliases: ["id"]
compartment_id:
description:
- The OCID of the compartment.
If no compartment is specified, the Oracle defined backup policies are listed.
type: str
extends_documentation_fragment: [ oracle.oci.oracle, oracle.oci.oracle_display_name_option ]
"""
EXAMPLES = """
- name: Get a specific volume_backup_policy
oci_blockstorage_volume_backup_policy_facts:
# required
policy_id: "ocid1.policy.oc1..xxxxxxEXAMPLExxxxxx"
- name: List volume_backup_policies
oci_blockstorage_volume_backup_policy_facts:
# optional
compartment_id: "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx"
"""
RETURN = """
volume_backup_policies:
description:
- List of VolumeBackupPolicy resources
returned: on success
type: complex
contains:
display_name:
description:
- A user-friendly name. Does not have to be unique, and it's changeable.
Avoid entering confidential information.
returned: on success
type: str
sample: display_name_example
id:
description:
- The OCID of the volume backup policy.
returned: on success
type: str
sample: "ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx"
schedules:
description:
- The collection of schedules that this policy will apply.
returned: on success
type: complex
contains:
backup_type:
description:
- The type of volume backup to create.
returned: on success
type: str
sample: FULL
offset_seconds:
description:
- The number of seconds that the volume backup start
time should be shifted from the default interval boundaries specified by
the period. The volume backup start time is the frequency start time plus the offset.
returned: on success
type: int
sample: 56
period:
description:
- The volume backup frequency.
returned: on success
type: str
sample: ONE_HOUR
offset_type:
description:
- Indicates how the offset is defined. If value is `STRUCTURED`,
then `hourOfDay`, `dayOfWeek`, `dayOfMonth`, and `month` fields are used
and `offsetSeconds` will be ignored in requests and users should ignore its
value from the responses.
- "`hourOfDay` is applicable for periods `ONE_DAY`,
`ONE_WEEK`, `ONE_MONTH` and `ONE_YEAR`."
- "`dayOfWeek` is applicable for period
`ONE_WEEK`."
- "`dayOfMonth` is applicable for periods `ONE_MONTH` and `ONE_YEAR`."
- "'month' is applicable for period 'ONE_YEAR'."
- They will be ignored in the requests for inapplicable periods.
- If value is `NUMERIC_SECONDS`, then `offsetSeconds`
will be used for both requests and responses and the structured fields will be
ignored in the requests and users should ignore their values from the responses.
- For clients using older versions of Apis and not sending `offsetType` in their
requests, the behaviour is just like `NUMERIC_SECONDS`.
returned: on success
type: str
sample: STRUCTURED
hour_of_day:
description:
- The hour of the day to schedule the volume backup.
returned: on success
type: int
sample: 56
day_of_week:
description:
- The day of the week to schedule the volume backup.
returned: on success
type: str
sample: MONDAY
day_of_month:
description:
- The day of the month to schedule the volume backup.
returned: on success
type: int
sample: 56
month:
description:
- The month of the year to schedule the volume backup.
returned: on success
type: str
sample: JANUARY
retention_seconds:
description:
- How long, in seconds, to keep the volume backups created by this schedule.
returned: on success
type: int
sample: 56
time_zone:
description:
- Specifies what time zone is the schedule in
returned: on success
type: str
sample: UTC
destination_region:
description:
- The paired destination region for copying scheduled backups to. Example `us-ashburn-1`.
See L(Region Pairs,https://docs.cloud.oracle.com/iaas/Content/Block/Tasks/schedulingvolumebackups.htm#RegionPairs) for details about paired
regions.
returned: on success
type: str
sample: us-phoenix-1
time_created:
description:
- The date and time the volume backup policy was created. Format defined by L(RFC3339,https://tools.ietf.org/html/rfc3339).
returned: on success
type: str
sample: "2013-10-20T19:20:30+01:00"
compartment_id:
description:
- The OCID of the compartment that contains the volume backup.
returned: on success
type: str
sample: "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx"
defined_tags:
description:
- Defined tags for this resource. Each key is predefined and scoped to a
namespace. For more information, see L(Resource Tags,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm).
- "Example: `{\\"Operations\\": {\\"CostCenter\\": \\"42\\"}}`"
returned: on success
type: dict
sample: {'Operations': {'CostCenter': 'US'}}
freeform_tags:
description:
- Free-form tags for this resource. Each tag is a simple key-value pair with no
predefined name, type, or namespace. For more information, see L(Resource
Tags,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm).
- "Example: `{\\"Department\\": \\"Finance\\"}`"
returned: on success
type: dict
sample: {'Department': 'Finance'}
sample: [{
"display_name": "display_name_example",
"id": "ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx",
"schedules": [{
"backup_type": "FULL",
"offset_seconds": 56,
"period": "ONE_HOUR",
"offset_type": "STRUCTURED",
"hour_of_day": 56,
"day_of_week": "MONDAY",
"day_of_month": 56,
"month": "JANUARY",
"retention_seconds": 56,
"time_zone": "UTC"
}],
"destination_region": "us-phoenix-1",
"time_created": "2013-10-20T19:20:30+01:00",
"compartment_id": "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx",
"defined_tags": {'Operations': {'CostCenter': 'US'}},
"freeform_tags": {'Department': 'Finance'}
}]
"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.oracle.oci.plugins.module_utils import oci_common_utils
from ansible_collections.oracle.oci.plugins.module_utils.oci_resource_utils import (
OCIResourceFactsHelperBase,
get_custom_class,
)
try:
from oci.core import BlockstorageClient
HAS_OCI_PY_SDK = True
except ImportError:
HAS_OCI_PY_SDK = False
class VolumeBackupPolicyFactsHelperGen(OCIResourceFactsHelperBase):
"""Supported operations: get, list"""
def get_required_params_for_get(self):
return [
"policy_id",
]
def get_required_params_for_list(self):
return []
def get_resource(self):
return oci_common_utils.call_with_backoff(
self.client.get_volume_backup_policy,
policy_id=self.module.params.get("policy_id"),
)
def list_resources(self):
optional_list_method_params = [
"compartment_id",
"display_name",
]
optional_kwargs = dict(
(param, self.module.params[param])
for param in optional_list_method_params
if self.module.params.get(param) is not None
)
return oci_common_utils.list_all_resources(
self.client.list_volume_backup_policies, **optional_kwargs
)
VolumeBackupPolicyFactsHelperCustom = get_custom_class(
"VolumeBackupPolicyFactsHelperCustom"
)
class ResourceFactsHelper(
VolumeBackupPolicyFactsHelperCustom, VolumeBackupPolicyFactsHelperGen
):
pass
def main():
module_args = oci_common_utils.get_common_arg_spec()
module_args.update(
dict(
policy_id=dict(aliases=["id"], type="str"),
compartment_id=dict(type="str"),
display_name=dict(type="str"),
)
)
module = AnsibleModule(argument_spec=module_args)
if not HAS_OCI_PY_SDK:
module.fail_json(msg="oci python sdk required for this module.")
resource_facts_helper = ResourceFactsHelper(
module=module,
resource_type="volume_backup_policy",
service_client_class=BlockstorageClient,
namespace="core",
)
result = []
if resource_facts_helper.is_get():
result = [resource_facts_helper.get()]
elif resource_facts_helper.is_list():
result = resource_facts_helper.list()
else:
resource_facts_helper.fail()
module.exit_json(volume_backup_policies=result)
if __name__ == "__main__":
main()
| 1.625 | 2 |
pi_control/server_stats/apps.py | mhozza/pi-control | 0 | 1642 | <reponame>mhozza/pi-control<gh_stars>0
from django.apps import AppConfig
class ServerStatsConfig(AppConfig):
name = "server_stats"
| 1.296875 | 1 |
testproject/testproject/settings.py | jackvz/mezzanine-cartridge-api | 1 | 1643 |
from __future__ import absolute_import, unicode_literals
import os
from django import VERSION as DJANGO_VERSION
from django.utils.translation import ugettext_lazy as _
SECRET_KEY = <KEY>'
######################
# CARTRIDGE SETTINGS #
######################
# The following settings are already defined in cartridge.shop.defaults
# with default values, but are common enough to be put here, commented
# out, for conveniently overriding. Please consult the settings
# documentation for a full list of settings Cartridge implements:
# http://cartridge.jupo.org/configuration.html#default-settings
# Sequence of available credit card types for payment.
# SHOP_CARD_TYPES = ("Mastercard", "Visa", "Diners", "Amex")
# Setting to turn on featured images for shop categories. Defaults to False.
# SHOP_CATEGORY_USE_FEATURED_IMAGE = True
# If True, the checkout process is split into separate
# billing/shipping and payment steps.
# SHOP_CHECKOUT_STEPS_SPLIT = True
# If True, the checkout process has a final confirmation step before
# completion.
# SHOP_CHECKOUT_STEPS_CONFIRMATION = True
# Controls the formatting of monetary values accord to the locale
# module in the python standard library. If an empty string is
# used, will fall back to the system's locale.
SHOP_CURRENCY_LOCALE = "en_GB.UTF-8"
# Dotted package path and name of the function that
# is called on submit of the billing/shipping checkout step. This
# is where shipping calculation can be performed and set using the
# function ``cartridge.shop.utils.set_shipping``.
# SHOP_HANDLER_BILLING_SHIPPING = \
# "cartridge.shop.checkout.default_billship_handler"
# Dotted package path and name of the function that
# is called once an order is successful and all of the order
# object's data has been created. This is where any custom order
# processing should be implemented.
# SHOP_HANDLER_ORDER = "cartridge.shop.checkout.default_order_handler"
# Dotted package path and name of the function that
# is called on submit of the payment checkout step. This is where
# integration with a payment gateway should be implemented.
# SHOP_HANDLER_PAYMENT = "cartridge.shop.checkout.default_payment_handler"
# Sequence of value/name pairs for order statuses.
# SHOP_ORDER_STATUS_CHOICES = (
# (1, "Unprocessed"),
# (2, "Processed"),
# )
# Sequence of value/name pairs for types of product options,
# eg Size, Colour. NOTE: Increasing the number of these will
# require database migrations!
# SHOP_OPTION_TYPE_CHOICES = (
# (1, "Size"),
# (2, "Colour"),
# )
# Sequence of indexes from the SHOP_OPTION_TYPE_CHOICES setting that
# control how the options should be ordered in the admin,
# eg for "Colour" then "Size" given the above:
# SHOP_OPTION_ADMIN_ORDER = (2, 1)
######################
# MEZZANINE SETTINGS #
######################
# The following settings are already defined with default values in
# the ``defaults.py`` module within each of Mezzanine's apps, but are
# common enough to be put here, commented out, for conveniently
# overriding. Please consult the settings documentation for a full list
# of settings Mezzanine implements:
# http://mezzanine.jupo.org/docs/configuration.html#default-settings
# Controls the ordering and grouping of the admin menu.
#
# ADMIN_MENU_ORDER = (
# ("Content", ("pages.Page", "blog.BlogPost",
# "generic.ThreadedComment", (_("Media Library"), "media-library"),)),
# (_("Shop"), ("shop.Product", "shop.ProductOption", "shop.DiscountCode",
# "shop.Sale", "shop.Order")),
# ("Site", ("sites.Site", "redirects.Redirect", "conf.Setting")),
# ("Users", ("auth.User", "auth.Group",)),
# )
# A three item sequence, each containing a sequence of template tags
# used to render the admin dashboard.
#
# DASHBOARD_TAGS = (
# ("blog_tags.quick_blog", "mezzanine_tags.app_list"),
# ("comment_tags.recent_comments",),
# ("mezzanine_tags.recent_actions",),
# )
# A sequence of templates used by the ``page_menu`` template tag. Each
# item in the sequence is a three item sequence, containing a unique ID
# for the template, a label for the template, and the template path.
# These templates are then available for selection when editing which
# menus a page should appear in. Note that if a menu template is used
# that doesn't appear in this setting, all pages will appear in it.
# PAGE_MENU_TEMPLATES = (
# (1, _("Top navigation bar"), "pages/menus/dropdown.html"),
# (2, _("Left-hand tree"), "pages/menus/tree.html"),
# (3, _("Footer"), "pages/menus/footer.html"),
# )
# A sequence of fields that will be injected into Mezzanine's (or any
# library's) models. Each item in the sequence is a four item sequence.
# The first two items are the dotted path to the model and its field
# name to be added, and the dotted path to the field class to use for
# the field. The third and fourth items are a sequence of positional
# args and a dictionary of keyword args, to use when creating the
# field instance. When specifying the field class, the path
# ``django.models.db.`` can be omitted for regular Django model fields.
#
# EXTRA_MODEL_FIELDS = (
# (
# # Dotted path to field.
# "mezzanine.blog.models.BlogPost.image",
# # Dotted path to field class.
# "somelib.fields.ImageField",
# # Positional args for field class.
# (_("Image"),),
# # Keyword args for field class.
# {"blank": True, "upload_to": "blog"},
# ),
# # Example of adding a field to *all* of Mezzanine's content types:
# (
# "mezzanine.pages.models.Page.another_field",
# "IntegerField", # 'django.db.models.' is implied if path is omitted.
# (_("Another name"),),
# {"blank": True, "default": 1},
# ),
# )
# Setting to turn on featured images for blog posts. Defaults to False.
#
# BLOG_USE_FEATURED_IMAGE = True
# If True, the django-modeltranslation will be added to the
# INSTALLED_APPS setting.
USE_MODELTRANSLATION = False
########################
# MAIN DJANGO SETTINGS #
########################
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts
ALLOWED_HOSTS = ['*']
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'UTC'
# If you set this to True, Django will use timezone-aware datetimes.
USE_TZ = True
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = "en"
# Supported languages
LANGUAGES = (
('en', _('English')),
)
# A boolean that turns on/off debug mode. When set to ``True``, stack traces
# are displayed for error pages. Should always be set to ``False`` in
# production. Best set to ``True`` in local_settings.py
DEBUG = True
# Whether a user's session cookie expires when the Web browser is closed.
SESSION_EXPIRE_AT_BROWSER_CLOSE = True
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = False
AUTHENTICATION_BACKENDS = ("mezzanine.core.auth_backends.MezzanineBackend",)
# The numeric mode to set newly-uploaded files to. The value should be
# a mode you'd pass directly to os.chmod.
FILE_UPLOAD_PERMISSIONS = 0o644
#############
# DATABASES #
#############
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'db.dev',
}
}
#########
# PATHS #
#########
# Full filesystem path to the project.
PROJECT_APP_PATH = os.path.dirname(os.path.abspath(__file__))
PROJECT_APP = os.path.basename(PROJECT_APP_PATH)
PROJECT_ROOT = BASE_DIR = os.path.dirname(PROJECT_APP_PATH)
# Every cache key will get prefixed with this value - here we set it to
# the name of the directory the project is in to try and use something
# project specific.
CACHE_MIDDLEWARE_KEY_PREFIX = PROJECT_APP
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = "/static/"
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = os.path.join(PROJECT_ROOT, STATIC_URL.strip("/"))
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = STATIC_URL + "media/"
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = os.path.join(PROJECT_ROOT, *MEDIA_URL.strip("/").split("/"))
# Package/module name to import the root urlpatterns from for the project.
ROOT_URLCONF = "%s.urls" % PROJECT_APP
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [
os.path.join(PROJECT_ROOT, "templates")
],
"OPTIONS": {
"context_processors": [
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
"django.template.context_processors.debug",
"django.template.context_processors.i18n",
"django.template.context_processors.static",
"django.template.context_processors.media",
"django.template.context_processors.request",
"django.template.context_processors.tz",
"mezzanine.conf.context_processors.settings",
"mezzanine.pages.context_processors.page",
],
"builtins": [
"mezzanine.template.loader_tags",
],
"loaders": [
"mezzanine.template.loaders.host_themes.Loader",
"django.template.loaders.filesystem.Loader",
"django.template.loaders.app_directories.Loader",
],
},
},
]
if DJANGO_VERSION < (1, 9):
del TEMPLATES[0]["OPTIONS"]["builtins"]
################
# APPLICATIONS #
################
INSTALLED_APPS = (
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.redirects",
"django.contrib.sessions",
"django.contrib.sites",
"django.contrib.sitemaps",
"django.contrib.staticfiles",
"mezzanine.boot",
"mezzanine.conf",
"mezzanine.core",
"mezzanine.generic",
"mezzanine.pages",
"cartridge.shop",
"mezzanine.blog",
"mezzanine.forms",
"mezzanine.galleries",
"mezzanine.twitter",
# "mezzanine.accounts",
'corsheaders',
'rest_framework',
'rest_framework_api_key',
'drf_yasg',
# 'oauth2_provider',
# 'rest_framework.authtoken',
'mezzanine_cartridge_api',
)
# List of middleware classes to use. Order is important; in the request phase,
# these middleware classes will be applied in the order given, and in the
# response phase the middleware will be applied in reverse order.
MIDDLEWARE = (
"mezzanine.core.middleware.UpdateCacheMiddleware",
'django.contrib.sessions.middleware.SessionMiddleware',
# Uncomment if using internationalisation or localisation
# 'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
"cartridge.shop.middleware.ShopMiddleware",
"mezzanine.core.request.CurrentRequestMiddleware",
"mezzanine.core.middleware.RedirectFallbackMiddleware",
"mezzanine.core.middleware.AdminLoginInterfaceSelectorMiddleware",
"mezzanine.core.middleware.SitePermissionMiddleware",
"mezzanine.pages.middleware.PageMiddleware",
"mezzanine.core.middleware.FetchFromCacheMiddleware",
'corsheaders.middleware.CorsMiddleware',
)
if DJANGO_VERSION < (1, 10):
MIDDLEWARE_CLASSES = MIDDLEWARE
del MIDDLEWARE
# Store these package names here as they may change in the future since
# at the moment we are using custom forks of them.
PACKAGE_NAME_FILEBROWSER = "filebrowser_safe"
PACKAGE_NAME_GRAPPELLI = "grappelli_safe"
#########################
# OPTIONAL APPLICATIONS #
#########################
# These will be added to ``INSTALLED_APPS``, only if available.
OPTIONAL_APPS = (
"debug_toolbar",
"django_extensions",
"compressor",
PACKAGE_NAME_FILEBROWSER,
PACKAGE_NAME_GRAPPELLI,
)
##################
# LOCAL SETTINGS #
##################
# Allow any settings to be defined in local_settings.py which should be
# ignored in your version control system allowing for settings to be
# defined per machine.
# Instead of doing "from .local_settings import *", we use exec so that
# local_settings has full access to everything defined in this module.
# Also force into sys.modules so it's visible to Django's autoreload.
f = os.path.join(PROJECT_APP_PATH, "local_settings.py")
if os.path.exists(f):
import sys
import imp
module_name = "%s.local_settings" % PROJECT_APP
module = imp.new_module(module_name)
module.__file__ = f
sys.modules[module_name] = module
exec(open(f, "rb").read())
####################
# DYNAMIC SETTINGS #
####################
# set_dynamic_settings() will rewrite globals based on what has been
# defined so far, in order to provide some better defaults where
# applicable. We also allow this settings module to be imported
# without Mezzanine installed, as the case may be when using the
# fabfile, where setting the dynamic settings below isn't strictly
# required.
try:
from mezzanine.utils.conf import set_dynamic_settings
except ImportError:
pass
else:
set_dynamic_settings(globals())
| 1.984375 | 2 |
wordgen/data_gen.py | ishaanbakhle/wordgen.us | 0 | 1644 | <gh_stars>0
from wordgen import consts
import numpy as np
from sklearn import preprocessing
def fill_matrix(dataset):
assert type(dataset) == str
assert len(dataset) > 0, print("Dataset must be > 0")
matrix = []
for i in consts.rang:
matrix.append([])
for o in consts.rang:
matrix[i].append(0)
dataset = dataset.lower()
accepted = list("abcdefghijklmnopqrstuvqwxyz") + ['\n']
for i in range(len(dataset)-1):
# if (dataset[i+1] in accepted and dataset[i] in accepted):
if dataset[i] in accepted:
val2 = i+1
while (val2 < len(dataset) and not (dataset[val2] in accepted)):
val2 += 1
ind1 = consts.get_ord(dataset[i])
ind2 = consts.get_ord(dataset[val2])
matrix[ind2][ind1] += 1
matrix = preprocessing.normalize(matrix, norm='l1')
return matrix
if __name__ == '__main__':
print(fill_matrix("james as"))
| 3.03125 | 3 |
arcade/gl/context.py | Cleptomania/arcade | 0 | 1645 | <filename>arcade/gl/context.py
from ctypes import c_int, c_char_p, cast, c_float
from collections import deque
import logging
import weakref
from typing import Any, Dict, List, Tuple, Union, Sequence, Set
import pyglet
from pyglet.window import Window
from pyglet import gl
from .buffer import Buffer
from .program import Program
from .vertex_array import Geometry, VertexArray
from .framebuffer import Framebuffer, DefaultFrameBuffer
from typing import Optional
from .texture import Texture
from .query import Query
from .glsl import ShaderSource
from .types import BufferDescription
LOG = logging.getLogger(__name__)
class Context:
"""
Represents an OpenGL context. This context belongs to a ``pyglet.Window``
normally accessed through ``window.ctx``.
The Context class contains methods for creating resources,
global states and commonly used enums. All enums also exist
in the ``gl`` module. (``ctx.BLEND`` or ``arcade.gl.BLEND``).
"""
#: The active context
active: Optional["Context"] = None
# --- Store the most commonly used OpenGL constants
# Texture
#: Texture interpolation: Nearest pixel
NEAREST = 0x2600
#: Texture interpolation: Linear interpolate
LINEAR = 0x2601
#: Texture interpolation: Minification filter for mipmaps
NEAREST_MIPMAP_NEAREST = 0x2700
#: Texture interpolation: Minification filter for mipmaps
LINEAR_MIPMAP_NEAREST = 0x2701
#: Texture interpolation: Minification filter for mipmaps
NEAREST_MIPMAP_LINEAR = 0x2702
#: Texture interpolation: Minification filter for mipmaps
LINEAR_MIPMAP_LINEAR = 0x2703
#: Texture wrap mode: Repeat
REPEAT = gl.GL_REPEAT
# Texture wrap mode: Clamp to border pixel
CLAMP_TO_EDGE = gl.GL_CLAMP_TO_EDGE
# Texture wrap mode: Clamp to border color
CLAMP_TO_BORDER = gl.GL_CLAMP_TO_BORDER
# Texture wrap mode: Repeat mirrored
MIRRORED_REPEAT = gl.GL_MIRRORED_REPEAT
# Flags
#: Context flag: Blending
BLEND = gl.GL_BLEND
#: Context flag: Depth testing
DEPTH_TEST = gl.GL_DEPTH_TEST
#: Context flag: Face culling
CULL_FACE = gl.GL_CULL_FACE
#: Context flag: Enable ``gl_PointSize`` in shaders.
PROGRAM_POINT_SIZE = gl.GL_PROGRAM_POINT_SIZE
# Blend functions
#: Blend function
ZERO = 0x0000
#: Blend function
ONE = 0x0001
#: Blend function
SRC_COLOR = 0x0300
#: Blend function
ONE_MINUS_SRC_COLOR = 0x0301
#: Blend function
SRC_ALPHA = 0x0302
#: Blend function
ONE_MINUS_SRC_ALPHA = 0x0303
#: Blend function
DST_ALPHA = 0x0304
#: Blend function
ONE_MINUS_DST_ALPHA = 0x0305
#: Blend function
DST_COLOR = 0x0306
#: Blend function
ONE_MINUS_DST_COLOR = 0x0307
# Blend equations
#: source + destination
FUNC_ADD = 0x8006
#: Blend equations: source - destination
FUNC_SUBTRACT = 0x800A
#: Blend equations: destination - source
FUNC_REVERSE_SUBTRACT = 0x800B
#: Blend equations: Minimum of source and destination
MIN = 0x8007
#: Blend equations: Maximum of source and destination
MAX = 0x8008
# Blend mode shortcuts
#: Blend mode shortcut for default blend mode: ``SRC_ALPHA, ONE_MINUS_SRC_ALPHA``
BLEND_DEFAULT = 0x0302, 0x0303
#: Blend mode shortcut for additive blending: ``ONE, ONE``
BLEND_ADDITIVE = 0x0001, 0x0001
#: Blend mode shortcut for premultipled alpha: ``SRC_ALPHA, ONE``
BLEND_PREMULTIPLIED_ALPHA = 0x0302, 0x0001
# VertexArray: Primitives
#: Primitive mode
POINTS = gl.GL_POINTS # 0
#: Primitive mode
LINES = gl.GL_LINES # 1
#: Primitive mode
LINE_STRIP = gl.GL_LINE_STRIP # 3
#: Primitive mode
TRIANGLES = gl.GL_TRIANGLES # 4
#: Primitive mode
TRIANGLE_STRIP = gl.GL_TRIANGLE_STRIP # 5
#: Primitive mode
TRIANGLE_FAN = gl.GL_TRIANGLE_FAN # 6
#: Primitive mode
LINES_ADJACENCY = gl.GL_LINES_ADJACENCY # 10
#: Primitive mode
LINE_STRIP_ADJACENCY = gl.GL_LINE_STRIP_ADJACENCY # 11
#: Primitive mode
TRIANGLES_ADJACENCY = gl.GL_TRIANGLES_ADJACENCY # 12
#: Primitive mode
TRIANGLE_STRIP_ADJACENCY = gl.GL_TRIANGLE_STRIP_ADJACENCY # 13
#: Patch mode (tessellation)
PATCHES = gl.GL_PATCHES
# The most common error enums
_errors = {
gl.GL_INVALID_ENUM: "GL_INVALID_ENUM",
gl.GL_INVALID_VALUE: "GL_INVALID_VALUE",
gl.GL_INVALID_OPERATION: "GL_INVALID_OPERATION",
gl.GL_INVALID_FRAMEBUFFER_OPERATION: "GL_INVALID_FRAMEBUFFER_OPERATION",
gl.GL_OUT_OF_MEMORY: "GL_OUT_OF_MEMORY",
gl.GL_STACK_UNDERFLOW: "GL_STACK_UNDERFLOW",
gl.GL_STACK_OVERFLOW: "GL_STACK_OVERFLOW",
}
def __init__(self, window: pyglet.window.Window, gc_mode: str = "auto"):
self._window_ref = weakref.ref(window)
self.limits = Limits(self)
self._gl_version = (self.limits.MAJOR_VERSION, self.limits.MINOR_VERSION)
Context.activate(self)
# Texture unit we use when doing operations on textures to avoid
# affecting currently bound textures in the first units
self.default_texture_unit = self.limits.MAX_TEXTURE_IMAGE_UNITS - 1
# Detect the default framebuffer
self._screen = DefaultFrameBuffer(self)
# Tracking active program
self.active_program: Optional[Program] = None
# Tracking active framebuffer. On context creation the window is the default render target
self.active_framebuffer: Framebuffer = self._screen
self.stats: ContextStats = ContextStats(warn_threshold=1000)
# Hardcoded states
# This should always be enabled
gl.glEnable(gl.GL_TEXTURE_CUBE_MAP_SEAMLESS)
# Set primitive restart index to -1 by default
gl.glEnable(gl.GL_PRIMITIVE_RESTART)
self._primitive_restart_index = -1
self.primitive_restart_index = self._primitive_restart_index
# We enable scissor testing by default.
# This is always set to the same value as the viewport
# to avoid background color affecting areas outside the viewport
gl.glEnable(gl.GL_SCISSOR_TEST)
# States
self._blend_func = self.BLEND_DEFAULT
self._point_size = 1.0
self._flags: Set[int] = set()
# Normal garbage collection as default (what we expect in python)
self._gc_mode = "auto"
self.gc_mode = gc_mode
#: Collected objects to gc when gc_mode is "context_gc"
self.objects = deque()
@property
def window(self) -> Window:
"""
The window this context belongs to.
:type: ``pyglet.Window``
"""
return self._window_ref()
@property
def screen(self) -> Framebuffer:
"""
The framebuffer for the window.
:type: :py:class:`~arcade.Framebuffer`
"""
return self._screen
@property
def fbo(self) -> Framebuffer:
"""
Get the currently active framebuffer.
This property is read-only
:type: :py:class:`arcade.gl.Framebuffer`
"""
return self.active_framebuffer
@property
def gl_version(self) -> Tuple[int, int]:
"""
The OpenGL version as a 2 component tuple
:type: tuple (major, minor) version
"""
return self._gl_version
def gc(self):
"""
Run garbage collection of OpenGL objects for this context.
This is only needed when ``gc_mode`` is ``context_gc``.
"""
# Loop the array until all objects are gone.
# Deleting one object might add new ones so we need
while len(self.objects):
obj = self.objects.pop()
obj.delete()
@property
def gc_mode(self) -> str:
"""
Set the garbage collection mode for OpenGL resources.
Supported modes are:
# default: Auto
ctx.gc_mode = "auto"
"""
return self._gc_mode
@gc_mode.setter
def gc_mode(self, value: str):
modes = ["auto", "context_gc"]
if value not in modes:
raise ValueError("Unsupported gc_mode. Supported modes are:", modes)
self._gc_mode = value
@property
def error(self) -> Union[str, None]:
"""Check OpenGL error
Returns a string representation of the occurring error
or ``None`` of no errors has occurred.
Example::
err = ctx.error
if err:
raise RuntimeError("OpenGL error: {err}")
:type: str
"""
err = gl.glGetError()
if err == gl.GL_NO_ERROR:
return None
return self._errors.get(err, "GL_UNKNOWN_ERROR")
@classmethod
def activate(cls, ctx: "Context"):
"""Mark a context as the currently active one"""
cls.active = ctx
def enable(self, *args):
"""
Enables one or more context flags::
# Single flag
ctx.enable(ctx.BLEND)
# Multiple flags
ctx.enable(ctx.DEPTH_TEST, ctx.CULL_FACE)
"""
self._flags.update(args)
for flag in args:
gl.glEnable(flag)
def enable_only(self, *args):
"""
Enable only some flags. This will disable all other flags.
This is a simple way to ensure that context flag states
are not lingering from other sections of your code base::
# Ensure all flags are disabled (enable no flags)
ctx.enable_only()
# Make sure only blending is enabled
ctx.enable_only(ctx.BLEND)
# Make sure only depth test and culling is enabled
ctx.enable_only(ctx.DEPTH_TEST, ctx.CULL_FACE)
"""
self._flags = set(args)
if self.BLEND in self._flags:
gl.glEnable(self.BLEND)
else:
gl.glDisable(self.BLEND)
if self.DEPTH_TEST in self._flags:
gl.glEnable(self.DEPTH_TEST)
else:
gl.glDisable(self.DEPTH_TEST)
if self.CULL_FACE in self._flags:
gl.glEnable(self.CULL_FACE)
else:
gl.glDisable(self.CULL_FACE)
if self.PROGRAM_POINT_SIZE in self._flags:
gl.glEnable(self.PROGRAM_POINT_SIZE)
else:
gl.glDisable(self.PROGRAM_POINT_SIZE)
def disable(self, *args):
"""
Disable one or more context flags::
# Single flag
ctx.disable(ctx.BLEND)
# Multiple flags
ctx.disable(ctx.DEPTH_TEST, ctx.CULL_FACE)
"""
self._flags -= set(args)
for flag in args:
gl.glDisable(flag)
def is_enabled(self, flag) -> bool:
"""
Check if a context flag is enabled
:type: bool
"""
return flag in self._flags
@property
def viewport(self) -> Tuple[int, int, int, int]:
"""
Get or set the viewport for the currently active framebuffer.
The viewport simply describes what pixels of the screen
OpenGL should render to. Normally it would be the size of
the window's framebuffer::
# 4:3 screen
ctx.viewport = 0, 0, 800, 600
# 1080p
ctx.viewport = 0, 0, 1920, 1080
# Using the current framebuffer size
ctx.viewport = 0, 0, *ctx.screen.size
:type: tuple (x, y, width, height)
"""
return self.active_framebuffer.viewport
@viewport.setter
def viewport(self, value: Tuple[int, int, int, int]):
self.active_framebuffer.viewport = value
@property
def blend_func(self) -> Tuple[int, int]:
"""
Get or the blend function::
ctx.blend_func = ctx.ONE, ctx.ONE
:type: tuple (src, dst)
"""
return self._blend_func
@blend_func.setter
def blend_func(self, value: Tuple[int, int]):
self._blend_func = value
gl.glBlendFunc(value[0], value[1])
# def blend_equation(self)
# def front_face(self)
# def cull_face(self)
@property
def patch_vertices(self) -> int:
"""
Get or set number of vertices that will be used to make up a single patch primitive.
Patch primitives are consumed by the tessellation control shader (if present) and subsequently used for tessellation.
:type: int
"""
value = c_int()
gl.glGetIntegerv(gl.GL_PATCH_VERTICES, value)
return value.value
@patch_vertices.setter
def patch_vertices(self, value: int):
if not isinstance(value, int):
raise TypeError("patch_vertices must be an integer")
gl.glPatchParameteri(gl.GL_PATCH_VERTICES, value)
@property
def point_size(self) -> float:
"""float: Get or set the point size."""
return self._point_size
@point_size.setter
def point_size(self, value: float):
gl.glPointSize(self._point_size)
self._point_size = value
@property
def primitive_restart_index(self) -> int:
"""Get or set the primitive restart index. Default is -1"""
return self._primitive_restart_index
@primitive_restart_index.setter
def primitive_restart_index(self, value: int):
self._primitive_restart_index = value
gl.glPrimitiveRestartIndex(value)
def finish(self) -> None:
"""Wait until all OpenGL rendering commands are completed"""
gl.glFinish()
# --- Resource methods ---
def buffer(
self, *, data: Optional[Any] = None, reserve: int = 0, usage: str = "static"
) -> Buffer:
"""Create a new OpenGL Buffer object.
:param Any data: The buffer data, This can be ``bytes`` or an object supporting the buffer protocol.
:param int reserve: The number of bytes reserve
:param str usage: Buffer usage. 'static', 'dynamic' or 'stream'
:rtype: :py:class:`~arcade.gl.Buffer`
"""
# create_with_size
return Buffer(self, data, reserve=reserve, usage=usage)
def framebuffer(
self,
*,
color_attachments: Union[Texture, List[Texture]] = None,
depth_attachment: Texture = None
) -> Framebuffer:
"""Create a Framebuffer.
:param List[arcade.gl.Texture] color_attachments: List of textures we want to render into
:param arcade.gl.Texture depth_attachment: Depth texture
:rtype: :py:class:`~arcade.gl.Framebuffer`
"""
return Framebuffer(
self, color_attachments=color_attachments, depth_attachment=depth_attachment
)
def texture(
self,
size: Tuple[int, int],
*,
components: int = 4,
dtype: str = "f1",
data: Any = None,
wrap_x: gl.GLenum = None,
wrap_y: gl.GLenum = None,
filter: Tuple[gl.GLenum, gl.GLenum] = None
) -> Texture:
"""Create a 2D Texture.
Wrap modes: ``GL_REPEAT``, ``GL_MIRRORED_REPEAT``, ``GL_CLAMP_TO_EDGE``, ``GL_CLAMP_TO_BORDER``
Minifying filters: ``GL_NEAREST``, ``GL_LINEAR``, ``GL_NEAREST_MIPMAP_NEAREST``, ``GL_LINEAR_MIPMAP_NEAREST``
``GL_NEAREST_MIPMAP_LINEAR``, ``GL_LINEAR_MIPMAP_LINEAR``
Magnifying filters: ``GL_NEAREST``, ``GL_LINEAR``
:param Tuple[int, int] size: The size of the texture
:param int components: Number of components (1: R, 2: RG, 3: RGB, 4: RGBA)
:param str dtype: The data type of each component: f1, f2, f4 / i1, i2, i4 / u1, u2, u4
:param Any data: The texture data (optional). Can be bytes or an object supporting the buffer protocol.
:param GLenum wrap_x: How the texture wraps in x direction
:param GLenum wrap_y: How the texture wraps in y direction
:param Tuple[GLenum,GLenum] filter: Minification and magnification filter
"""
return Texture(
self,
size,
components=components,
data=data,
dtype=dtype,
wrap_x=wrap_x,
wrap_y=wrap_y,
filter=filter,
)
def depth_texture(self, size: Tuple[int, int], *, data=None) -> Texture:
"""Create a 2D depth texture
:param Tuple[int, int] size: The size of the texture
:param Any data: The texture data (optional). Can be bytes or an object supporting the buffer protocol.
"""
return Texture(self, size, data=data, depth=True)
def geometry(
self,
content: Optional[Sequence[BufferDescription]] = None,
index_buffer: Buffer = None,
mode: int = None,
index_element_size: int = 4,
):
"""
Create a Geomtry instance.
:param list content: List of :py:class:`~arcade.gl.BufferDescription` (optional)
:param Buffer index_buffer: Index/element buffer (optional)
:param int mode: The default draw mode (optional)
:param int mode: The default draw mode (optional)
:param int index_element_size: Byte size of the index buffer type. Can be 1, 2 or 4 (8, 16 or 32 bit unsigned integer)
"""
return Geometry(self, content, index_buffer=index_buffer, mode=mode, index_element_size=index_element_size)
def program(
self,
*,
vertex_shader: str,
fragment_shader: str = None,
geometry_shader: str = None,
tess_control_shader: str = None,
tess_evaluation_shader: str = None,
defines: Dict[str, str] = None
) -> Program:
"""Create a :py:class:`~arcade.gl.Program` given the vertex, fragment and geometry shader.
:param str vertex_shader: vertex shader source
:param str fragment_shader: fragment shader source (optional)
:param str geometry_shader: geometry shader source (optional)
:param str tess_control_shader: tessellation control shader source (optional)
:param str tess_evaluation_shader: tessellation evaluation shader source (optional)
:param dict defines: Substitute #defines values in the source (optional)
:rtype: :py:class:`~arcade.gl.Program`
"""
source_vs = ShaderSource(vertex_shader, gl.GL_VERTEX_SHADER)
source_fs = (
ShaderSource(fragment_shader, gl.GL_FRAGMENT_SHADER)
if fragment_shader
else None
)
source_geo = (
ShaderSource(geometry_shader, gl.GL_GEOMETRY_SHADER)
if geometry_shader
else None
)
source_tc = (
ShaderSource(tess_control_shader, gl.GL_TESS_CONTROL_SHADER)
if tess_control_shader
else None
)
source_te = (
ShaderSource(tess_evaluation_shader, gl.GL_TESS_EVALUATION_SHADER)
if tess_evaluation_shader
else None
)
# If we don't have a fragment shader we are doing transform feedback.
# When a geometry shader is present the out attributes will be located there
out_attributes = [] # type: List[str]
if not source_fs:
if source_geo:
out_attributes = source_geo.out_attributes
else:
out_attributes = source_vs.out_attributes
return Program(
self,
vertex_shader=source_vs.get_source(defines=defines),
fragment_shader=source_fs.get_source(defines=defines)
if source_fs
else None,
geometry_shader=source_geo.get_source(defines=defines)
if source_geo
else None,
tess_control_shader=source_tc.get_source(defines=defines)
if source_tc
else None,
tess_evaluation_shader=source_te.get_source(defines=defines)
if source_te
else None,
out_attributes=out_attributes,
)
def query(self):
"""
Create a query object for measuring rendering calls in opengl.
:rtype: :py:class:`~arcade.gl.Query`
"""
return Query(self)
class ContextStats:
def __init__(self, warn_threshold=100):
self.warn_threshold = warn_threshold
# (created, freed)
self.texture = (0, 0)
self.framebuffer = (0, 0)
self.buffer = (0, 0)
self.program = (0, 0)
self.vertex_array = (0, 0)
self.geometry = (0, 0)
def incr(self, key):
created, freed = getattr(self, key)
setattr(self, key, (created + 1, freed))
if created % self.warn_threshold == 0 and created > 0:
LOG.debug(
"%s allocations passed threshold (%s) [created = %s] [freed = %s] [active = %s]",
key,
self.warn_threshold,
created,
freed,
created - freed,
)
def decr(self, key):
created, freed = getattr(self, key)
setattr(self, key, (created, freed + 1))
class Limits:
"""OpenGL Limitations"""
def __init__(self, ctx):
self._ctx = ctx
#: Minor version number of the OpenGL API supported by the current context
self.MINOR_VERSION = self.get(gl.GL_MINOR_VERSION)
#: Major version number of the OpenGL API supported by the current context.
self.MAJOR_VERSION = self.get(gl.GL_MAJOR_VERSION)
self.VENDOR = self.get_str(gl.GL_VENDOR)
self.RENDERER = self.get_str(gl.GL_RENDERER)
#: Value indicating the number of sample buffers associated with the framebuffer
self.SAMPLE_BUFFERS = self.get(gl.GL_SAMPLE_BUFFERS)
#: An estimate of the number of bits of subpixel resolution
#: that are used to position rasterized geometry in window coordinates
self.SUBPIXEL_BITS = self.get(gl.GL_SUBPIXEL_BITS)
#: A mask value indicating what context profile is used (core, compat etc.)
self.CONTEXT_PROFILE_MASK = self.get(gl.GL_CONTEXT_PROFILE_MASK)
#: Minimum required alignment for uniform buffer sizes and offset
self.UNIFORM_BUFFER_OFFSET_ALIGNMENT = self.get(
gl.GL_UNIFORM_BUFFER_OFFSET_ALIGNMENT
)
#: Value indicates the maximum number of layers allowed in an array texture, and must be at least 256
self.MAX_ARRAY_TEXTURE_LAYERS = self.get(gl.GL_MAX_ARRAY_TEXTURE_LAYERS)
#: A rough estimate of the largest 3D texture that the GL can handle. The value must be at least 64
self.MAX_3D_TEXTURE_SIZE = self.get(gl.GL_MAX_3D_TEXTURE_SIZE)
#: Maximum number of color attachments in a framebuffer
self.MAX_COLOR_ATTACHMENTS = self.get(gl.GL_MAX_COLOR_ATTACHMENTS)
#: Maximum number of samples in a color multisample texture
self.MAX_COLOR_TEXTURE_SAMPLES = self.get(gl.GL_MAX_COLOR_TEXTURE_SAMPLES)
#: the number of words for fragment shader uniform variables in all uniform blocks
self.MAX_COMBINED_FRAGMENT_UNIFORM_COMPONENTS = self.get(
gl.GL_MAX_COMBINED_FRAGMENT_UNIFORM_COMPONENTS
)
#: Number of words for geometry shader uniform variables in all uniform blocks
self.MAX_COMBINED_GEOMETRY_UNIFORM_COMPONENTS = self.get(
gl.GL_MAX_COMBINED_GEOMETRY_UNIFORM_COMPONENTS
)
#: Maximum supported texture image units that can be used to access texture maps from the vertex shader
self.MAX_COMBINED_TEXTURE_IMAGE_UNITS = self.get(
gl.GL_MAX_COMBINED_TEXTURE_IMAGE_UNITS
)
#: Maximum number of uniform blocks per program
self.MAX_COMBINED_UNIFORM_BLOCKS = self.get(gl.GL_MAX_COMBINED_UNIFORM_BLOCKS)
#: Number of words for vertex shader uniform variables in all uniform blocks
self.MAX_COMBINED_VERTEX_UNIFORM_COMPONENTS = self.get(
gl.GL_MAX_COMBINED_VERTEX_UNIFORM_COMPONENTS
)
#: A rough estimate of the largest cube-map texture that the GL can handle
self.MAX_CUBE_MAP_TEXTURE_SIZE = self.get(gl.GL_MAX_CUBE_MAP_TEXTURE_SIZE)
#: Maximum number of samples in a multisample depth or depth-stencil texture
self.MAX_DEPTH_TEXTURE_SAMPLES = self.get(gl.GL_MAX_DEPTH_TEXTURE_SAMPLES)
#: Maximum number of simultaneous outputs that may be written in a fragment shader
self.MAX_DRAW_BUFFERS = self.get(gl.GL_MAX_DRAW_BUFFERS)
#: Maximum number of active draw buffers when using dual-source blending
self.MAX_DUAL_SOURCE_DRAW_BUFFERS = self.get(gl.GL_MAX_DUAL_SOURCE_DRAW_BUFFERS)
#: Recommended maximum number of vertex array indices
self.MAX_ELEMENTS_INDICES = self.get(gl.GL_MAX_ELEMENTS_INDICES)
#: Recommended maximum number of vertex array vertices
self.MAX_ELEMENTS_VERTICES = self.get(gl.GL_MAX_ELEMENTS_VERTICES)
#: Maximum number of components of the inputs read by the fragment shader
self.MAX_FRAGMENT_INPUT_COMPONENTS = self.get(
gl.GL_MAX_FRAGMENT_INPUT_COMPONENTS
)
#: Maximum number of individual floating-point, integer, or boolean values that can be
#: held in uniform variable storage for a fragment shader
self.MAX_FRAGMENT_UNIFORM_COMPONENTS = self.get(
gl.GL_MAX_FRAGMENT_UNIFORM_COMPONENTS
)
#: maximum number of individual 4-vectors of floating-point, integer,
#: or boolean values that can be held in uniform variable storage for a fragment shader
self.MAX_FRAGMENT_UNIFORM_VECTORS = self.get(gl.GL_MAX_FRAGMENT_UNIFORM_VECTORS)
#: Maximum number of uniform blocks per fragment shader.
self.MAX_FRAGMENT_UNIFORM_BLOCKS = self.get(gl.GL_MAX_FRAGMENT_UNIFORM_BLOCKS)
#: Maximum number of components of inputs read by a geometry shader
self.MAX_GEOMETRY_INPUT_COMPONENTS = self.get(
gl.GL_MAX_GEOMETRY_INPUT_COMPONENTS
)
#: Maximum number of components of outputs written by a geometry shader
self.MAX_GEOMETRY_OUTPUT_COMPONENTS = self.get(
gl.GL_MAX_GEOMETRY_OUTPUT_COMPONENTS
)
#: Maximum supported texture image units that can be used to access texture maps from the geometry shader
self.MAX_GEOMETRY_TEXTURE_IMAGE_UNITS = self.get(
gl.GL_MAX_GEOMETRY_TEXTURE_IMAGE_UNITS
)
#: Maximum number of uniform blocks per geometry shader
self.MAX_GEOMETRY_UNIFORM_BLOCKS = self.get(gl.GL_MAX_GEOMETRY_UNIFORM_BLOCKS)
#: Maximum number of individual floating-point, integer, or boolean values that can
#: be held in uniform variable storage for a geometry shader
self.MAX_GEOMETRY_UNIFORM_COMPONENTS = self.get(
gl.GL_MAX_GEOMETRY_UNIFORM_COMPONENTS
)
#: Maximum number of samples supported in integer format multisample buffers
self.MAX_INTEGER_SAMPLES = self.get(gl.GL_MAX_INTEGER_SAMPLES)
#: Maximum samples for a framebuffer
self.MAX_SAMPLES = self.get(gl.GL_MAX_SAMPLES)
#: A rough estimate of the largest rectangular texture that the GL can handle
self.MAX_RECTANGLE_TEXTURE_SIZE = self.get(gl.GL_MAX_RECTANGLE_TEXTURE_SIZE)
#: Maximum supported size for renderbuffers
self.MAX_RENDERBUFFER_SIZE = self.get(gl.GL_MAX_RENDERBUFFER_SIZE)
#: Maximum number of sample mask words
self.MAX_SAMPLE_MASK_WORDS = self.get(gl.GL_MAX_SAMPLE_MASK_WORDS)
#: Maximum number of texels allowed in the texel array of a texture buffer object
self.MAX_TEXTURE_BUFFER_SIZE = self.get(gl.GL_MAX_TEXTURE_BUFFER_SIZE)
#: Maximum number of uniform buffer binding points on the context
self.MAX_UNIFORM_BUFFER_BINDINGS = self.get(gl.GL_MAX_UNIFORM_BUFFER_BINDINGS)
#: Maximum number of uniform buffer binding points on the context
self.MAX_UNIFORM_BUFFER_BINDINGS = self.get(gl.GL_MAX_UNIFORM_BUFFER_BINDINGS)
#: The value gives a rough estimate of the largest texture that the GL can handle
self.MAX_TEXTURE_SIZE = self.get(gl.GL_MAX_TEXTURE_SIZE)
#: Maximum number of uniform buffer binding points on the context
self.MAX_UNIFORM_BUFFER_BINDINGS = self.get(gl.GL_MAX_UNIFORM_BUFFER_BINDINGS)
#: Maximum size in basic machine units of a uniform block
self.MAX_UNIFORM_BLOCK_SIZE = self.get(gl.GL_MAX_UNIFORM_BLOCK_SIZE)
#: The number 4-vectors for varying variables
self.MAX_VARYING_VECTORS = self.get(gl.GL_MAX_VARYING_VECTORS)
#: Maximum number of 4-component generic vertex attributes accessible to a vertex shader.
self.MAX_VERTEX_ATTRIBS = self.get(gl.GL_MAX_VERTEX_ATTRIBS)
#: Maximum supported texture image units that can be used to access texture maps from the vertex shader.
self.MAX_VERTEX_TEXTURE_IMAGE_UNITS = self.get(
gl.GL_MAX_VERTEX_TEXTURE_IMAGE_UNITS
)
#: Maximum number of individual floating-point, integer, or boolean values that
#: can be held in uniform variable storage for a vertex shader
self.MAX_VERTEX_UNIFORM_COMPONENTS = self.get(
gl.GL_MAX_VERTEX_UNIFORM_COMPONENTS
)
#: Maximum number of 4-vectors that may be held in uniform variable storage for the vertex shader
self.MAX_VERTEX_UNIFORM_VECTORS = self.get(gl.GL_MAX_VERTEX_UNIFORM_VECTORS)
#: Maximum number of components of output written by a vertex shader
self.MAX_VERTEX_OUTPUT_COMPONENTS = self.get(gl.GL_MAX_VERTEX_OUTPUT_COMPONENTS)
#: Maximum number of uniform blocks per vertex shader.
self.MAX_VERTEX_UNIFORM_BLOCKS = self.get(gl.GL_MAX_VERTEX_UNIFORM_BLOCKS)
# self.MAX_VERTEX_ATTRIB_RELATIVE_OFFSET = self.get(gl.GL_MAX_VERTEX_ATTRIB_RELATIVE_OFFSET)
# self.MAX_VERTEX_ATTRIB_BINDINGS = self.get(gl.GL_MAX_VERTEX_ATTRIB_BINDINGS)
self.MAX_TEXTURE_IMAGE_UNITS = self.get(gl.GL_MAX_TEXTURE_IMAGE_UNITS)
# TODO: Missing in pyglet
# self.MAX_TEXTURE_MAX_ANISOTROPY = self.get_float(gl.GL_MAX_TEXTURE_MAX_ANISOTROPY)
err = self._ctx.error
if err:
from warnings import warn
warn("Error happened while querying of limits. Moving on ..")
def get(self, enum: gl.GLenum) -> int:
"""Get an integer limit"""
value = c_int()
gl.glGetIntegerv(enum, value)
return value.value
def get_float(self, enum) -> float:
"""Get a float limit"""
value = c_float()
gl.glGetFloatv(enum, value)
return value.value
def get_str(self, enum: gl.GLenum) -> str:
"""Get a string limit"""
return cast(gl.glGetString(enum), c_char_p).value.decode() # type: ignore
| 2.078125 | 2 |
api/app/models/bookings/exam.py | pixelater/queue-management | 0 | 1646 | <reponame>pixelater/queue-management
'''Copyright 2018 Province of British Columbia
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.'''
from app.models.bookings import Base
from qsystem import db
class Exam(Base):
exam_id = db.Column(db.Integer, primary_key=True, autoincrement=True, nullable=False)
booking_id = db.Column(db.Integer, db.ForeignKey("booking.booking_id", ondelete="set null"), nullable=True)
exam_type_id = db.Column(db.Integer, db.ForeignKey("examtype.exam_type_id"), nullable=False)
office_id = db.Column(db.Integer, db.ForeignKey("office.office_id"), nullable=False)
event_id = db.Column(db.String(25), nullable=False)
exam_name = db.Column(db.String(50), nullable=False)
examinee_name = db.Column(db.String(50), nullable=True)
expiry_date = db.Column(db.DateTime, nullable=True)
notes = db.Column(db.String(400), nullable=True)
exam_received_date = db.Column(db.DateTime, nullable=True)
session_number = db.Column(db.Integer, nullable=True)
number_of_students = db.Column(db.Integer, nullable=True)
exam_method = db.Column(db.String(15), nullable=False)
deleted_date = db.Column(db.String(50), nullable=True)
exam_returned_ind = db.Column(db.Integer, nullable=False, default=0)
exam_returned_tracking_number = db.Column(db.String(50), nullable=True)
offsite_location = db.Column(db.String(50), nullable=True)
booking = db.relationship("Booking")
exam_type = db.relationship("ExamType")
office = db.relationship("Office")
def __repr__(self):
return '<Exam Name: (name={self.exam_name!r})>'.format(self=self)
def __init__(self, **kwargs):
super(Exam, self).__init__(**kwargs)
| 1.773438 | 2 |
leetcode/1672 Richest Customer Wealth.py | jaredliw/python-question-bank | 1 | 1647 | <filename>leetcode/1672 Richest Customer Wealth.py
class Solution(object):
def maximumWealth(self, accounts):
"""
:type accounts: List[List[int]]
:rtype: int
"""
# Runtime: 36 ms
# Memory: 13.5 MB
return max(map(sum, accounts))
| 3.109375 | 3 |
datatableview/tests/test_helpers.py | gregneagle/sal | 2 | 1648 | <filename>datatableview/tests/test_helpers.py
# -*- encoding: utf-8 -*-
from datetime import datetime
from functools import partial
from django import get_version
from datatableview import helpers
import six
from .testcase import DatatableViewTestCase
from .test_app.models import ExampleModel, RelatedM2MModel
if get_version().split('.') < ['1', '7']:
test_data_fixture = 'test_data_legacy.json'
else:
test_data_fixture = 'test_data.json'
class HelpersTests(DatatableViewTestCase):
fixtures = [test_data_fixture]
def test_link_to_model(self):
""" Verifies that link_to_model works. """
helper = helpers.link_to_model
# Verify that a model without get_absolute_url() raises a complaint
related = RelatedM2MModel.objects.get(pk=1)
with self.assertRaises(AttributeError) as cm:
helper(related)
self.assertEqual(str(cm.exception), "'RelatedM2MModel' object has no attribute 'get_absolute_url'")
# Verify simple use
instance = ExampleModel.objects.get(pk=1)
output = helper(instance)
self.assertEqual(output, '<a href="#1">ExampleModel 1</a>')
# Verify text override
output = helper(instance, text="Special text")
self.assertEqual(output, '<a href="#1">Special text</a>')
# Verify ``key`` access to transition an instance to a related field
instance = ExampleModel.objects.get(pk=2)
secondary_helper = helper(key=lambda o: o.related)
output = secondary_helper(instance)
self.assertEqual(output, '<a href="#1">RelatedModel object</a>')
# Verify ``key`` access version of custom text
output = secondary_helper(instance, text="Special text")
self.assertEqual(output, '<a href="#1">Special text</a>')
def test_make_boolean_checkmark(self):
""" Verifies that make_boolean_checkmark works. """
helper = helpers.make_boolean_checkmark
# Verify simple use
output = helper("True-ish value")
self.assertEqual(output, '✔')
output = helper("")
self.assertEqual(output, '✘')
# Verify custom values
output = helper("True-ish value", true_value="Yes", false_value="No")
self.assertEqual(output, 'Yes')
output = helper("", true_value="Yes", false_value="No")
self.assertEqual(output, 'No')
def test_format_date(self):
""" Verifies that format_date works. """
helper = helpers.format_date
# Verify simple use
data = datetime.now()
secondary_helper = helper("%m/%d/%Y")
output = secondary_helper(data)
self.assertEqual(output, data.strftime("%m/%d/%Y"))
# Verify that None objects get swallowed without complaint.
# This helps promise that the helper won't blow up for models.DateTimeField that are allowed
# to be null.
output = secondary_helper(None)
self.assertEqual(output, "")
def test_format(self):
""" Verifies that format works. """
helper = helpers.format
# Verify simple use
data = 1234567890
secondary_helper = helper("{0:,}")
output = secondary_helper(data)
self.assertEqual(output, "{0:,}".format(data))
# Verify ``cast`` argument
data = "1234.56789"
secondary_helper = helper("{0:.2f}", cast=float)
output = secondary_helper(data)
self.assertEqual(output, "{0:.2f}".format(float(data)))
def test_through_filter(self):
""" Verifies that through_filter works. """
helper = helpers.through_filter
target_function = lambda data, arg=None: (data, arg)
# Verify simple use
data = "Data string"
secondary_helper = helper(target_function)
output = secondary_helper(data)
self.assertEqual(output, (data, None))
# Verify ``arg`` argument
secondary_helper = helper(target_function, arg="Arg data")
output = secondary_helper(data)
self.assertEqual(output, (data, "Arg data"))
def test_itemgetter(self):
""" Verifies that itemgetter works. """
helper = helpers.itemgetter
# Verify simple index access
data = list(range(5))
secondary_helper = helper(-1)
output = secondary_helper(data)
self.assertEqual(output, data[-1])
# Verify slicing access
secondary_helper = helper(slice(1, 3))
output = secondary_helper(data)
self.assertEqual(output, data[1:3])
# Verify ellipsis works for strings
data = str(range(10))
secondary_helper = helper(slice(0, 5), ellipsis=True)
output = secondary_helper(data)
self.assertEqual(output, data[:5] + "...")
# Verify ellipsis can be customized
secondary_helper = helper(slice(0, 5), ellipsis="custom")
output = secondary_helper(data)
self.assertEqual(output, data[:5] + "custom")
# Verify ellipsis does nothing for non-string data types
data = range(10)
output = secondary_helper(data)
self.assertEqual(output, data[:5])
def test_attrgetter(self):
""" Verifies that attrgetter works. """
helper = helpers.attrgetter
# Verify simple attr lookup
data = ExampleModel.objects.get(pk=1)
secondary_helper = helper('pk')
output = secondary_helper(data)
self.assertEqual(output, data.pk)
# Verify bad attribrute lookup
data = ExampleModel.objects.get(pk=1)
secondary_helper = helper('bad field name')
with self.assertRaises(AttributeError) as cm:
output = secondary_helper(data)
self.assertEqual(str(cm.exception), "'ExampleModel' object has no attribute 'bad field name'")
def test_make_xeditable(self):
""" Verifies that make_xeditable works. """
helper = helpers.make_xeditable
# Items that the helper normally expects in a callback context
internals = {'field_name': 'name'}
# Verify chain calls don't trigger rendering
secondary_helper = helper()
tertiary_helper = secondary_helper()
self.assertEqual(type(secondary_helper), partial)
self.assertEqual(type(tertiary_helper), partial)
# Verify chain ends with provision of a value
data = ExampleModel.objects.get(pk=1)
# This needs a "url" arg because we want to test successful use
output = tertiary_helper(data, url="/", **internals)
self.assertTrue(isinstance(output, six.string_types))
# Verify that no "view" kwarg means the url is required from the call
with self.assertRaises(ValueError) as cm:
tertiary_helper(data, **internals)
self.assertEqual(str(cm.exception), "'make_xeditable' cannot determine a value for 'url'.")
# Verify kwargs accumulate
kwargs1 = { 'type': 'textarea' }
kwargs2 = { 'other_arg': True }
secondary_helper = helper(**kwargs1)
expected_kwargs = dict(kwargs1, extra_attrs=[])
self.assertEqual(secondary_helper.keywords, expected_kwargs)
tertiary_helper = secondary_helper(**kwargs2)
expected_kwargs = dict(kwargs1, **dict(kwargs2, extra_attrs=[]))
self.assertEqual(tertiary_helper.keywords, expected_kwargs)
# Verify default kwarg names end up as attributes
data = ExampleModel.objects.get(pk=1)
kwargs = {
'pk': "PK DATA",
'type': "TYPE DATA",
'url': "URL DATA",
'source': "SOURCE DATA",
'title': "TITLE DATA",
'placeholder': "PLACEHOLDER DATA",
# Extra stuff not in anticipated to appear in rendered string
'special': "SPECIAL DATA",
'data_custom': "DATA-CUSTOM DATA",
}
secondary_helper = helper(**kwargs)
output = secondary_helper(data, **internals)
expected_output = """
<a href="#" data-name="name"
data-pk="PK DATA"
data-placeholder="PLACEHOLDER DATA"
data-source="SOURCE DATA"
data-title="TITLE DATA"
data-type="TYPE DATA"
data-url="URL DATA"
data-value="1"
data-xeditable="xeditable">
ExampleModel 1
</a>
"""
self.assertHTMLEqual(output, expected_output)
# Verify that explicit additions via ``extra_attrs`` allows kwargs to appear in HTML as
# "data-*" attributes.
secondary_helper = helper(extra_attrs=['special', 'data_custom', 'fake'], **kwargs)
output = secondary_helper(data, **internals)
expected_output = """
<a href="#" data-name="name"
data-pk="PK DATA"
data-placeholder="PLACEHOLDER DATA"
data-source="SOURCE DATA"
data-title="TITLE DATA"
data-type="TYPE DATA"
data-url="URL DATA"
data-value="1"
data-special="SPECIAL DATA"
data-custom="DATA-CUSTOM DATA"
data-xeditable="xeditable">
ExampleModel 1
</a>
"""
self.assertHTMLEqual(output, expected_output)
| 2.390625 | 2 |
discordbot.py | naari3/seibaribot | 0 | 1649 | <reponame>naari3/seibaribot
import traceback
from os import getenv
import discord
from discord import Message
from discord.ext import commands
from discord.ext.commands import Context
from asyncio import sleep
import asyncio
client = discord.Client()
# botの接頭辞を!にする
bot = commands.Bot(command_prefix='!')
# ギラティナのチャンネルのID
GIRATINA_CHANNEL_ID = 940610524415144036
WIP_CHANNEL_ID = 940966825087361025
@bot.event
async def on_command_error(ctx, error):
orig_error = getattr(error, 'original', error)
error_msg = ''.join(
traceback.TracebackException.from_exception(orig_error).format())
await ctx.send(error_msg)
# 起動時のメッセージの関数
async def ready_greet():
channel = bot.get_channel(GIRATINA_CHANNEL_ID)
await channel.send('ギラティナ、オォン!')
# Bot起動時に実行される関数
@bot.event
async def on_ready():
await ready_greet()
# ピンポン
@bot.command()
async def ping(ctx):
await ctx.send('pong')
@bot.event
async def on_message(message):
# 送信者がBotである場合は弾く
if message.author.bot:
return
# ドナルドの言葉狩り - https://qiita.com/sizumita/items/9d44ae7d1ce007391699
# メッセージの本文が ドナルド だった場合
if 'ドナルド' in str(message.content):
# 送信するメッセージをランダムで決める
# メッセージが送られてきたチャンネルに送る
await message.channel.send('https://tenor.com/view/ronald-mcdonald-insanity-ronald-mcdonald-gif-21974293')
# メッセージに場合
if message.attachments and message.channel.id == WIP_CHANNEL_ID:
for attachment in message.attachments:
# Attachmentの拡張子がmp3, wavのどれかだった場合
# https://discordpy.readthedocs.io/ja/latest/api.html#attachment
if attachment.content_type and "audio" in attachment.content_type:
await attachment.save("input.mp3")
command = "ffmpeg -y -loop 1 -i input.jpg -i input.mp3 -vcodec libx264 -vb 50k -acodec aac -strict experimental -ab 128k -ac 2 -ar 48000 -pix_fmt yuv420p -shortest output.mp4"
proc = await asyncio.create_subprocess_exec(
*command.split(" "),
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE)
stdout, stderr = await proc.communicate()
await message.channel.send(file=discord.File("output.mp4"))
await bot.process_commands(message)
# チーバくんの、なのはな体操
@bot.command()
async def chiibakun(ctx):
await ctx.send('https://www.youtube.com/watch?v=dC0eie-WQss')
# かおすちゃんを送信
@bot.command()
async def kaosu(ctx):
await ctx.send('https://pbs.twimg.com/media/E512yaSVIAQxfNn?format=jpg&name=large')
# イキス
@bot.command()
async def inm(ctx):
await ctx.send('聖バリ「イキスギィイクイク!!!ンアッー!!!マクラがデカすぎる!!!」\n\n'
f'{ctx.author.name}「聖なるバリア -ミラーフォース-、淫夢はもうやめてよ!淫夢ごっこは恥ずかしいよ!」\n\n聖バリ「{ctx.author.name}'
'、おっ大丈夫か大丈夫か〜???バッチェ冷えてるぞ〜淫夢が大好きだってはっきりわかんだね」')
# ギラティナの画像を送る
@bot.command()
async def giratina(ctx):
await ctx.send('https://img.gamewith.jp/article/thumbnail/rectangle/36417.png')
# bokuseku.mp3 流し逃げ - https://qiita.com/sizumita/items/cafd00fe3e114d834ce3
@bot.command()
async def bokuseku(ctx):
if ctx.author.voice is None:
await ctx.channel.send('望月くん・・・ボイスチャンネルに来なさい')
return
# ボイスチャンネルに接続する
await ctx.author.voice.channel.connect()
# 音声を再生する
ctx.guild.voice_client.play(discord.FFmpegPCMAudio('bokuseku.mp3'))
# 音声が再生中か確認する
while ctx.guild.voice_client.is_playing():
await sleep(1)
# 切断する
await ctx.guild.voice_client.disconnect()
token = getenv('DISCORD_BOT_TOKEN')
bot.run(token)
| 2.375 | 2 |
test/cpp/naming/utils/dns_server.py | arghyadip01/grpc | 9 | 1650 | #!/usr/bin/env python2.7
# Copyright 2015 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Starts a local DNS server for use in tests"""
import argparse
import sys
import yaml
import signal
import os
import threading
import time
import twisted
import twisted.internet
import twisted.internet.reactor
import twisted.internet.threads
import twisted.internet.defer
import twisted.internet.protocol
import twisted.names
import twisted.names.client
import twisted.names.dns
import twisted.names.server
from twisted.names import client, server, common, authority, dns
import argparse
import platform
_SERVER_HEALTH_CHECK_RECORD_NAME = 'health-check-local-dns-server-is-alive.resolver-tests.grpctestingexp' # missing end '.' for twisted syntax
_SERVER_HEALTH_CHECK_RECORD_DATA = '172.16.58.3'
class NoFileAuthority(authority.FileAuthority):
def __init__(self, soa, records):
# skip FileAuthority
common.ResolverBase.__init__(self)
self.soa = soa
self.records = records
def start_local_dns_server(args):
all_records = {}
def _push_record(name, r):
print('pushing record: |%s|' % name)
if all_records.get(name) is not None:
all_records[name].append(r)
return
all_records[name] = [r]
def _maybe_split_up_txt_data(name, txt_data, r_ttl):
start = 0
txt_data_list = []
while len(txt_data[start:]) > 0:
next_read = len(txt_data[start:])
if next_read > 255:
next_read = 255
txt_data_list.append(txt_data[start:start + next_read])
start += next_read
_push_record(name, dns.Record_TXT(*txt_data_list, ttl=r_ttl))
with open(args.records_config_path) as config:
test_records_config = yaml.load(config)
common_zone_name = test_records_config['resolver_tests_common_zone_name']
for group in test_records_config['resolver_component_tests']:
for name in group['records'].keys():
for record in group['records'][name]:
r_type = record['type']
r_data = record['data']
r_ttl = int(record['TTL'])
record_full_name = '%s.%s' % (name, common_zone_name)
assert record_full_name[-1] == '.'
record_full_name = record_full_name[:-1]
if r_type == 'A':
_push_record(record_full_name,
dns.Record_A(r_data, ttl=r_ttl))
if r_type == 'AAAA':
_push_record(record_full_name,
dns.Record_AAAA(r_data, ttl=r_ttl))
if r_type == 'SRV':
p, w, port, target = r_data.split(' ')
p = int(p)
w = int(w)
port = int(port)
target_full_name = '%s.%s' % (target, common_zone_name)
r_data = '%s %s %s %s' % (p, w, port, target_full_name)
_push_record(
record_full_name,
dns.Record_SRV(p, w, port, target_full_name, ttl=r_ttl))
if r_type == 'TXT':
_maybe_split_up_txt_data(record_full_name, r_data, r_ttl)
# Add an optional IPv4 record is specified
if args.add_a_record:
extra_host, extra_host_ipv4 = args.add_a_record.split(':')
_push_record(extra_host, dns.Record_A(extra_host_ipv4, ttl=0))
# Server health check record
_push_record(_SERVER_HEALTH_CHECK_RECORD_NAME,
dns.Record_A(_SERVER_HEALTH_CHECK_RECORD_DATA, ttl=0))
soa_record = dns.Record_SOA(mname=common_zone_name)
test_domain_com = NoFileAuthority(
soa=(common_zone_name, soa_record),
records=all_records,
)
server = twisted.names.server.DNSServerFactory(
authorities=[test_domain_com], verbose=2)
server.noisy = 2
twisted.internet.reactor.listenTCP(args.port, server)
dns_proto = twisted.names.dns.DNSDatagramProtocol(server)
dns_proto.noisy = 2
twisted.internet.reactor.listenUDP(args.port, dns_proto)
print('starting local dns server on 127.0.0.1:%s' % args.port)
print('starting twisted.internet.reactor')
twisted.internet.reactor.suggestThreadPoolSize(1)
twisted.internet.reactor.run()
def _quit_on_signal(signum, _frame):
print('Received SIGNAL %d. Quitting with exit code 0' % signum)
twisted.internet.reactor.stop()
sys.stdout.flush()
sys.exit(0)
def flush_stdout_loop():
num_timeouts_so_far = 0
sleep_time = 1
# Prevent zombies. Tests that use this server are short-lived.
max_timeouts = 60 * 10
while num_timeouts_so_far < max_timeouts:
sys.stdout.flush()
time.sleep(sleep_time)
num_timeouts_so_far += 1
print('Process timeout reached, or cancelled. Exitting 0.')
os.kill(os.getpid(), signal.SIGTERM)
def main():
argp = argparse.ArgumentParser(
description='Local DNS Server for resolver tests')
argp.add_argument('-p',
'--port',
default=None,
type=int,
help='Port for DNS server to listen on for TCP and UDP.')
argp.add_argument(
'-r',
'--records_config_path',
default=None,
type=str,
help=('Directory of resolver_test_record_groups.yaml file. '
'Defaults to path needed when the test is invoked as part '
'of run_tests.py.'))
argp.add_argument(
'--add_a_record',
default=None,
type=str,
help=('Add an A record via the command line. Useful for when we '
'need to serve a one-off A record that is under a '
'different domain then the rest the records configured in '
'--records_config_path (which all need to be under the '
'same domain). Format: <name>:<ipv4 address>'))
args = argp.parse_args()
signal.signal(signal.SIGTERM, _quit_on_signal)
signal.signal(signal.SIGINT, _quit_on_signal)
output_flush_thread = threading.Thread(target=flush_stdout_loop)
output_flush_thread.setDaemon(True)
output_flush_thread.start()
start_local_dns_server(args)
if __name__ == '__main__':
main()
| 2.171875 | 2 |
colour/examples/models/examples_ictcp.py | BPearlstine/colour | 2 | 1651 | # -*- coding: utf-8 -*-
"""
Showcases *ICTCP* *colour encoding* computations.
"""
import numpy as np
import colour
from colour.utilities import message_box
message_box('"ICTCP" Colour Encoding Computations')
RGB = np.array([0.45620519, 0.03081071, 0.04091952])
message_box(('Converting from "ITU-R BT.2020" colourspace to "ICTCP" colour '
'encoding given "RGB" values:\n'
'\n\t{0}'.format(RGB)))
print(colour.RGB_to_ICTCP(RGB))
print('\n')
ICTCP = np.array([0.07351364, 0.00475253, 0.09351596])
message_box(('Converting from "ICTCP" colour encoding to "ITU-R BT.2020" '
'colourspace given "ICTCP" values:\n'
'\n\t{0}'.format(ICTCP)))
print(colour.ICTCP_to_RGB(ICTCP))
| 3.09375 | 3 |
app/core/model/routine.py | MauricioAntonioMartinez/django-workout-tracker-api | 0 | 1652 | <reponame>MauricioAntonioMartinez/django-workout-tracker-api
import os
import uuid
from django.conf import settings # this is how we can retrive variables
# for the settings file
from django.contrib.auth.models import (AbstractBaseUser, BaseUserManager,
PermissionsMixin)
from django.core.validators import MaxValueValidator, MinValueValidator
from django.db import models
from django.utils.translation import gettext_lazy as _
from multiselectfield import MultiSelectField
# Maneger User class is the class that provides the creation
# of user or admin and all methods out of the box
from rest_framework import exceptions
from rest_framework.authentication import TokenAuthentication
from user.custom_token import ExpiringToken
from .exercise import BaseSerie
class RoutineDay(models.Model):
name = models.CharField(max_length=255, blank=True)
routine = models.ForeignKey(
'Routine', related_name='routines', on_delete=models.CASCADE)
def sets(self):
return SetRoutine.objects.filter(routine=self)
class Routine(models.Model):
name = models.CharField(max_length=255)
user = models.ForeignKey(settings.AUTH_USER_MODEL,
on_delete=models.CASCADE)
class SerieRoutine(BaseSerie):
father_set = models.ForeignKey(
'SetRoutine', on_delete=models.CASCADE, related_name='series')
class SetRoutine(models.Model):
exercise = models.ForeignKey(
'Exercise', on_delete=models.CASCADE)
routine = models.ForeignKey(
'RoutineDay', on_delete=models.CASCADE) | 2.28125 | 2 |
example_scripts/transect_tutorial.py | British-Oceanographic-Data-Centre/COAsT | 8 | 1653 | """
This is a demonstration script for using the Transect class in the COAsT
package. This object has strict data formatting requirements, which are
outlined in tranect.py.
Transect subsetting (a vertical slice of data between two coordinates): Creating them and performing some custom diagnostics with them.
---
In this tutorial we take a look at subsetting the model data along a transect (a custom straight line) and creating some bespoke diagnostics along it. We look at:
1. Creating a TRANSECT object, defined between two points.
2. Plotting data along a transect.
3. Calculating flow normal to the transect
"""
## Create a transect subset of the example dataset
# Load packages and define some file paths
import coast
import xarray as xr
import matplotlib.pyplot as plt
fn_nemo_dat_t = "./example_files/nemo_data_T_grid.nc"
fn_nemo_dat_u = "./example_files/nemo_data_U_grid.nc"
fn_nemo_dat_v = "./example_files/nemo_data_V_grid.nc"
fn_nemo_dom = "./example_files/COAsT_example_NEMO_domain.nc"
# Configuration files describing the data files
fn_config_t_grid = "./config/example_nemo_grid_t.json"
fn_config_f_grid = "./config/example_nemo_grid_f.json"
fn_config_u_grid = "./config/example_nemo_grid_u.json"
fn_config_v_grid = "./config/example_nemo_grid_v.json"
# %% Load data variables that are on the NEMO t-grid
nemo_t = coast.Gridded(fn_data=fn_nemo_dat_t, fn_domain=fn_nemo_dom, config=fn_config_t_grid)
# Now create a transect between the points (54 N 15 W) and (56 N, 12 W) using the `coast.TransectT` object. This needs to be passed the corresponding NEMO object and transect end points. The model points closest to these coordinates will be selected as the transect end points.
tran_t = coast.TransectT(nemo_t, (54, -15), (56, -12))
# Inspect the data
tran_t.data
# where `r_dim` is the dimension along the transect.
# %% Plot the data
# It is simple to plot a scalar such as temperature along the transect:
temp_mean = tran_t.data.temperature.mean(dim="t_dim")
plt.figure()
temp_mean.plot.pcolormesh(y="depth_0", yincrease=False)
plt.show()
# %% Flow across the transect
# With NEMO’s staggered grid, the first step is to define the transect on the f-grid so that the velocity components are between f-points. We do not need any model data on the f-grid, just the grid information, so create a nemo f-grid object
nemo_f = coast.Gridded(fn_domain=fn_nemo_dom, config=fn_config_f_grid)
# and a transect on the f-grid
tran_f = coast.TransectF(nemo_f, (54, -15), (56, -12))
tran_f.data
# We also need the i- and j-components of velocity so (lazy) load the model data on the u- and v-grid grids
nemo_u = coast.Gridded(fn_data=fn_nemo_dat_u, fn_domain=fn_nemo_dom, config=fn_config_u_grid)
nemo_v = coast.Gridded(fn_data=fn_nemo_dat_v, fn_domain=fn_nemo_dom, config=fn_config_v_grid)
# Now we can calculate the flow across the transect with the method
tran_f.calc_flow_across_transect(nemo_u, nemo_v)
# The flow across the transect is stored in a new dataset where the variables are all defined at the points between f-points.
tran_f.data_cross_tran_flow
# For example, to plot the time averaged velocity across the transect, we can plot the ‘normal_velocities’ variable
cross_velocity_mean = tran_f.data_cross_tran_flow.normal_velocities.mean(dim="t_dim")
plt.figure()
cross_velocity_mean.rolling(r_dim=2).mean().plot.pcolormesh(yincrease=False, y="depth_0", cbar_kwargs={"label": "m/s"})
plt.show()
# or the volume transport across the transect, we can plot the ‘normal_transports’ variable
plt.figure()
cross_transport_mean = tran_f.data_cross_tran_flow.normal_transports.mean(dim="t_dim")
cross_transport_mean.rolling(r_dim=2).mean().plot()
plt.ylabel("Sv")
plt.show()
| 3.265625 | 3 |
diofant/logic/boolalg.py | skirpichev/diofant | 0 | 1654 | <gh_stars>0
"""
Boolean algebra module for Diofant.
"""
from collections import defaultdict
from itertools import combinations, product
from ..core import Atom, cacheit
from ..core.expr import Expr
from ..core.function import Application
from ..core.numbers import Number
from ..core.operations import LatticeOp
from ..core.singleton import S
from ..core.singleton import SingletonWithManagedProperties as Singleton
from ..core.sympify import converter, sympify
from ..utilities import ordered
class Boolean(Expr):
"""A boolean object is an object for which logic operations make sense."""
def __and__(self, other):
"""Overloading for & operator."""
return And(self, other)
__rand__ = __and__
def __or__(self, other):
"""Overloading for | operator."""
return Or(self, other)
__ror__ = __or__
def __invert__(self):
"""Overloading for ~ operator."""
return Not(self)
def __rshift__(self, other):
"""Overloading for >> operator."""
return Implies(self, other)
def __lshift__(self, other):
"""Overloading for << operator."""
return Implies(other, self)
__rrshift__ = __lshift__
__rlshift__ = __rshift__
def __xor__(self, other):
return Xor(self, other)
__rxor__ = __xor__
def equals(self, other, failing_expression=False):
"""
Returns True if the given formulas have the same truth table.
For two formulas to be equal they must have the same literals.
Examples
========
>>> (a >> b).equals(~b >> ~a)
True
>>> Not(And(a, b, c)).equals(And(Not(a), Not(b), Not(c)))
False
>>> Not(And(a, Not(a))).equals(Or(b, Not(b)))
False
"""
from ..core.relational import Relational
from .inference import satisfiable
other = sympify(other)
if self.has(Relational) or other.has(Relational):
raise NotImplementedError('handling of relationals')
return self.atoms() == other.atoms() and \
not satisfiable(Not(Equivalent(self, other)))
class BooleanAtom(Atom, Boolean):
"""Base class of BooleanTrue and BooleanFalse."""
is_Boolean = True
@property
def canonical(self):
return self
def __int__(self):
return int(bool(self))
class BooleanTrue(BooleanAtom, metaclass=Singleton):
"""Diofant version of True, a singleton that can be accessed via ``true``.
This is the Diofant version of True, for use in the logic module. The
primary advantage of using true instead of True is that shorthand boolean
operations like ~ and >> will work as expected on this class, whereas with
True they act bitwise on 1. Functions in the logic module will return this
class when they evaluate to true.
Notes
=====
There is liable to be some confusion as to when ``True`` should
be used and when ``true`` should be used in various contexts
throughout Diofant. An important thing to remember is that
``sympify(True)`` returns ``true``. This means that for the most
part, you can just use ``True`` and it will automatically be converted
to ``true`` when necessary, similar to how you can generally use 1
instead of ``Integer(1)``.
The rule of thumb is:
"If the boolean in question can be replaced by an arbitrary symbolic
``Boolean``, like ``Or(x, y)`` or ``x > 1``, use ``true``.
Otherwise, use ``True``".
In other words, use ``true`` only on those contexts where the
boolean is being used as a symbolic representation of truth.
For example, if the object ends up in the ``.args`` of any expression,
then it must necessarily be ``true`` instead of ``True``, as
elements of ``.args`` must be ``Basic``. On the other hand,
``==`` is not a symbolic operation in Diofant, since it always returns
``True`` or ``False``, and does so in terms of structural equality
rather than mathematical, so it should return ``True``. The assumptions
system should use ``True`` and ``False``. Aside from not satisfying
the above rule of thumb, the
assumptions system uses a three-valued logic (``True``, ``False``, ``None``),
whereas ``true`` and ``false`` represent a two-valued logic. When in
doubt, use ``True``.
"``true == True is True``."
While "``true is True``" is ``False``, "``true == True``"
is ``True``, so if there is any doubt over whether a function or
expression will return ``true`` or ``True``, just use ``==``
instead of ``is`` to do the comparison, and it will work in either
case. Finally, for boolean flags, it's better to just use ``if x``
instead of ``if x is True``. To quote PEP 8:
Don't compare boolean values to ``True`` or ``False``
using ``==``.
* Yes: ``if greeting:``
* No: ``if greeting == True:``
* Worse: ``if greeting is True:``
Examples
========
>>> sympify(True)
true
>>> ~true
false
>>> ~True
-2
>>> Or(True, False)
true
See Also
========
BooleanFalse
"""
def __bool__(self):
return True
def __hash__(self):
return hash(True)
def as_set(self):
"""
Rewrite logic operators and relationals in terms of real sets.
Examples
========
>>> true.as_set()
UniversalSet()
"""
return S.UniversalSet
class BooleanFalse(BooleanAtom, metaclass=Singleton):
"""Diofant version of False, a singleton that can be accessed via ``false``.
This is the Diofant version of False, for use in the logic module. The
primary advantage of using false instead of False is that shorthand boolean
operations like ~ and >> will work as expected on this class, whereas with
False they act bitwise on 0. Functions in the logic module will return this
class when they evaluate to false.
Notes
=====
See note in :py:class:`~diofant.logic.boolalg.BooleanTrue`.
Examples
========
>>> sympify(False)
false
>>> false >> false
true
>>> False >> False
0
>>> Or(True, False)
true
See Also
========
BooleanTrue
"""
def __bool__(self):
return False
def __hash__(self):
return hash(False)
def as_set(self):
"""
Rewrite logic operators and relationals in terms of real sets.
Examples
========
>>> false.as_set()
EmptySet()
"""
from ..sets import EmptySet
return EmptySet()
true = BooleanTrue()
false: BooleanFalse = BooleanFalse()
# We want S.true and S.false to work, rather than S.BooleanTrue and
# S.BooleanFalse, but making the class and instance names the same causes some
# major issues (like the inability to import the class directly from this
# file).
S.true = true
S.false = false
converter[bool] = lambda x: true if x else false
class BooleanFunction(Application, Boolean):
"""Boolean function is a function that lives in a boolean space.
This is used as base class for And, Or, Not, etc.
"""
is_Boolean = True
def _eval_simplify(self, ratio, measure):
return simplify_logic(self)
def to_nnf(self, simplify=True):
return self._to_nnf(*self.args, simplify=simplify)
@classmethod
def _to_nnf(cls, *args, **kwargs):
simplify = kwargs.get('simplify', True)
argset = set()
for arg in args:
if not is_literal(arg):
arg = arg.to_nnf(simplify)
if simplify:
if isinstance(arg, cls):
arg = arg.args
else:
arg = arg,
for a in arg:
if Not(a) in argset:
return cls.zero
argset.add(a)
else:
argset.add(arg)
return cls(*argset)
class And(LatticeOp, BooleanFunction):
"""
Logical AND function.
It evaluates its arguments in order, giving False immediately
if any of them are False, and True if they are all True.
Examples
========
>>> x & y
x & y
Notes
=====
The ``&`` operator is provided as a convenience, but note that its use
here is different from its normal use in Python, which is bitwise
and. Hence, ``And(a, b)`` and ``a & b`` will return different things if
``a`` and ``b`` are integers.
>>> And(x, y).subs({x: 1})
y
"""
zero = false
identity = true
nargs = None
@classmethod
def _new_args_filter(cls, args):
newargs = []
rel = []
for x in reversed(list(args)):
if isinstance(x, Number) or x in (0, 1):
newargs.append(True if x else False)
continue
if x.is_Relational:
c = x.canonical
if c in rel:
continue
nc = (~c).canonical
if any(r == nc for r in rel):
return [false]
rel.append(c)
newargs.append(x)
return LatticeOp._new_args_filter(newargs, And)
def as_set(self):
"""
Rewrite logic operators and relationals in terms of real sets.
Examples
========
>>> And(x < 2, x > -2).as_set()
(-2, 2)
"""
from ..sets import Intersection
if len(self.free_symbols) == 1:
return Intersection(*[arg.as_set() for arg in self.args])
else:
raise NotImplementedError('Sorry, And.as_set has not yet been'
' implemented for multivariate'
' expressions')
class Or(LatticeOp, BooleanFunction):
"""
Logical OR function
It evaluates its arguments in order, giving True immediately
if any of them are True, and False if they are all False.
Examples
========
>>> x | y
x | y
Notes
=====
The ``|`` operator is provided as a convenience, but note that its use
here is different from its normal use in Python, which is bitwise
or. Hence, ``Or(a, b)`` and ``a | b`` will return different things if
``a`` and ``b`` are integers.
>>> Or(x, y).subs({x: 0})
y
"""
zero = true
identity = false
@classmethod
def _new_args_filter(cls, args):
newargs = []
rel = []
for x in args:
if isinstance(x, Number) or x in (0, 1):
newargs.append(True if x else False)
continue
if x.is_Relational:
c = x.canonical
if c in rel:
continue
nc = (~c).canonical
if any(r == nc for r in rel):
return [true]
rel.append(c)
newargs.append(x)
return LatticeOp._new_args_filter(newargs, Or)
def as_set(self):
"""
Rewrite logic operators and relationals in terms of real sets.
Examples
========
>>> Or(x > 2, x < -2).as_set()
[-oo, -2) U (2, oo]
"""
from ..sets import Union
if len(self.free_symbols) == 1:
return Union(*[arg.as_set() for arg in self.args])
else:
raise NotImplementedError('Sorry, Or.as_set has not yet been'
' implemented for multivariate'
' expressions')
class Not(BooleanFunction):
"""
Logical Not function (negation).
Returns True if the statement is False.
Returns False if the statement is True.
Examples
========
>>> Not(True)
false
>>> Not(False)
true
>>> Not(And(True, False))
true
>>> Not(Or(True, False))
false
>>> Not(And(And(True, x), Or(x, False)))
~x
>>> ~x
~x
>>> Not(And(Or(x, y), Or(~x, ~y)))
~((x | y) & (~x | ~y))
Notes
=====
The ``~`` operator is provided as a convenience, but note that its use
here is different from its normal use in Python, which is bitwise
not. In particular, ``~a`` and ``Not(a)`` will be different if ``a`` is
an integer. Furthermore, since bools in Python subclass from ``int``,
``~True`` is the same as ``~1`` which is ``-2``, which has a boolean
value of True. To avoid this issue, use the Diofant boolean types
``true`` and ``false``.
>>> ~True
-2
>>> ~true
false
"""
is_Not = True
@classmethod
def eval(cls, arg):
from ..core import (Equality, GreaterThan, LessThan, StrictGreaterThan,
StrictLessThan, Unequality)
if isinstance(arg, Number) or arg in (True, False):
return false if arg else true
if arg.is_Not:
return arg.args[0]
# Simplify Relational objects.
if isinstance(arg, Equality):
return Unequality(*arg.args)
if isinstance(arg, Unequality):
return Equality(*arg.args)
if isinstance(arg, StrictLessThan):
return GreaterThan(*arg.args)
if isinstance(arg, StrictGreaterThan):
return LessThan(*arg.args)
if isinstance(arg, LessThan):
return StrictGreaterThan(*arg.args)
if isinstance(arg, GreaterThan):
return StrictLessThan(*arg.args)
def as_set(self):
"""
Rewrite logic operators and relationals in terms of real sets.
Examples
========
>>> Not(x > 0, evaluate=False).as_set()
(-oo, 0]
"""
if len(self.free_symbols) == 1:
return self.args[0].as_set().complement(S.Reals)
else:
raise NotImplementedError('Sorry, Not.as_set has not yet been'
' implemented for mutivariate'
' expressions')
def to_nnf(self, simplify=True):
if is_literal(self):
return self
expr = self.args[0]
func, args = expr.func, expr.args
if func == And:
return Or._to_nnf(*[~arg for arg in args], simplify=simplify)
if func == Or:
return And._to_nnf(*[~arg for arg in args], simplify=simplify)
if func == Implies:
a, b = args
return And._to_nnf(a, ~b, simplify=simplify)
if func == Equivalent:
return And._to_nnf(Or(*args), Or(*[~arg for arg in args]), simplify=simplify)
if func == Xor:
result = []
for i in range(1, len(args)+1, 2):
for neg in combinations(args, i):
clause = [~s if s in neg else s for s in args]
result.append(Or(*clause))
return And._to_nnf(*result, simplify=simplify)
if func == ITE:
a, b, c = args
return And._to_nnf(Or(a, ~c), Or(~a, ~b), simplify=simplify)
raise ValueError(f'Illegal operator {func} in expression')
class Xor(BooleanFunction):
"""
Logical XOR (exclusive OR) function.
Returns True if an odd number of the arguments are True and the rest are
False.
Returns False if an even number of the arguments are True and the rest are
False.
Examples
========
>>> Xor(True, False)
true
>>> Xor(True, True)
false
>>> Xor(True, False, True, True, False)
true
>>> Xor(True, False, True, False)
false
>>> x ^ y
Xor(x, y)
Notes
=====
The ``^`` operator is provided as a convenience, but note that its use
here is different from its normal use in Python, which is bitwise xor. In
particular, ``a ^ b`` and ``Xor(a, b)`` will be different if ``a`` and
``b`` are integers.
>>> Xor(x, y).subs({y: 0})
x
"""
def __new__(cls, *args, **kwargs):
argset = set()
obj = super().__new__(cls, *args, **kwargs)
for arg in super(Xor, obj).args:
if isinstance(arg, Number) or arg in (True, False):
if not arg:
continue
else:
arg = true
if isinstance(arg, Xor):
for a in arg.args:
argset.remove(a) if a in argset else argset.add(a)
elif arg in argset:
argset.remove(arg)
else:
argset.add(arg)
rel = [(r, r.canonical, (~r).canonical) for r in argset if r.is_Relational]
odd = False # is number of complimentary pairs odd? start 0 -> False
remove = []
for i, (r, c, nc) in enumerate(rel):
for j in range(i + 1, len(rel)):
rj, cj = rel[j][:2]
if cj == nc:
odd = ~odd
break
elif cj == c:
break
else:
continue
remove.append((r, rj))
if odd:
argset.remove(true) if true in argset else argset.add(true)
for a, b in remove:
argset.remove(a)
argset.remove(b)
if len(argset) == 0:
return false
elif len(argset) == 1:
return argset.pop()
elif True in argset:
argset.remove(True)
return Not(Xor(*argset))
else:
obj._args = tuple(ordered(argset))
obj._argset = frozenset(argset)
return obj
@property # type: ignore[misc]
@cacheit
def args(self):
return tuple(ordered(self._argset))
def to_nnf(self, simplify=True):
args = []
for i in range(0, len(self.args)+1, 2):
for neg in combinations(self.args, i):
clause = [~s if s in neg else s for s in self.args]
args.append(Or(*clause))
return And._to_nnf(*args, simplify=simplify)
class Nand(BooleanFunction):
"""
Logical NAND function.
It evaluates its arguments in order, giving True immediately if any
of them are False, and False if they are all True.
Returns True if any of the arguments are False.
Returns False if all arguments are True.
Examples
========
>>> Nand(False, True)
true
>>> Nand(True, True)
false
>>> Nand(x, y)
~(x & y)
"""
@classmethod
def eval(cls, *args):
return Not(And(*args))
class Nor(BooleanFunction):
"""
Logical NOR function.
It evaluates its arguments in order, giving False immediately if any
of them are True, and True if they are all False.
Returns False if any argument is True.
Returns True if all arguments are False.
Examples
========
>>> Nor(True, False)
false
>>> Nor(True, True)
false
>>> Nor(False, True)
false
>>> Nor(False, False)
true
>>> Nor(x, y)
~(x | y)
"""
@classmethod
def eval(cls, *args):
return Not(Or(*args))
class Implies(BooleanFunction):
"""
Logical implication.
A implies B is equivalent to !A v B
Accepts two Boolean arguments; A and B.
Returns False if A is True and B is False.
Returns True otherwise.
Examples
========
>>> Implies(True, False)
false
>>> Implies(False, False)
true
>>> Implies(True, True)
true
>>> Implies(False, True)
true
>>> x >> y
Implies(x, y)
>>> y << x
Implies(x, y)
Notes
=====
The ``>>`` and ``<<`` operators are provided as a convenience, but note
that their use here is different from their normal use in Python, which is
bit shifts. Hence, ``Implies(a, b)`` and ``a >> b`` will return different
things if ``a`` and ``b`` are integers. In particular, since Python
considers ``True`` and ``False`` to be integers, ``True >> True`` will be
the same as ``1 >> 1``, i.e., 0, which has a truth value of False. To
avoid this issue, use the Diofant objects ``true`` and ``false``.
>>> True >> False
1
>>> true >> false
false
"""
@classmethod
def eval(cls, *args):
try:
newargs = []
for x in args:
if isinstance(x, Number) or x in (0, 1):
newargs.append(True if x else False)
else:
newargs.append(x)
A, B = newargs
except ValueError:
raise ValueError(f'{len(args)} operand(s) used for an Implies '
f'(pairs are required): {args!s}')
if A == true or A == false or B == true or B == false:
return Or(Not(A), B)
elif A == B:
return true
elif A.is_Relational and B.is_Relational:
if A.canonical == B.canonical:
return true
elif (~A).canonical == B.canonical:
return B
else:
return Expr.__new__(cls, *args)
def to_nnf(self, simplify=True):
a, b = self.args
return Or._to_nnf(~a, b, simplify=simplify)
class Equivalent(BooleanFunction):
"""
Equivalence relation.
Equivalent(A, B) is True iff A and B are both True or both False.
Returns True if all of the arguments are logically equivalent.
Returns False otherwise.
Examples
========
>>> Equivalent(False, False, False)
true
>>> Equivalent(True, False, False)
false
>>> Equivalent(x, And(x, True))
true
"""
def __new__(cls, *args, **options):
from ..core.relational import Relational
args = [sympify(arg, strict=True) for arg in args]
argset = set(args)
for x in args:
if isinstance(x, Number) or x in [True, False]: # Includes 0, 1
argset.discard(x)
argset.add(True if x else False)
rel = []
for r in argset:
if isinstance(r, Relational):
rel.append((r, r.canonical, (~r).canonical))
remove = []
for i, (r, c, nc) in enumerate(rel):
for j in range(i + 1, len(rel)):
rj, cj = rel[j][:2]
if cj == nc:
return false
elif cj == c:
remove.append((r, rj))
break
for a, b in remove:
argset.remove(a)
argset.remove(b)
argset.add(True)
if len(argset) <= 1:
return true
if True in argset:
argset.discard(True)
return And(*argset)
if False in argset:
argset.discard(False)
return And(*[~arg for arg in argset])
_args = frozenset(argset)
obj = super().__new__(cls, _args)
obj._argset = _args
return obj
@property # type: ignore[misc]
@cacheit
def args(self):
return tuple(ordered(self._argset))
def to_nnf(self, simplify=True):
args = []
for a, b in zip(self.args, self.args[1:]):
args.append(Or(~a, b))
args.append(Or(~self.args[-1], self.args[0]))
return And._to_nnf(*args, simplify=simplify)
class ITE(BooleanFunction):
"""
If then else clause.
ITE(A, B, C) evaluates and returns the result of B if A is true
else it returns the result of C.
Examples
========
>>> ITE(True, False, True)
false
>>> ITE(Or(True, False), And(True, True), Xor(True, True))
true
>>> ITE(x, y, z)
ITE(x, y, z)
>>> ITE(True, x, y)
x
>>> ITE(False, x, y)
y
>>> ITE(x, y, y)
y
"""
@classmethod
def eval(cls, *args):
try:
a, b, c = args
except ValueError:
raise ValueError('ITE expects exactly 3 arguments')
if a == true:
return b
elif a == false:
return c
elif b == c:
return b
elif b == true and c == false:
return a
elif b == false and c == true:
return Not(a)
def to_nnf(self, simplify=True):
a, b, c = self.args
return And._to_nnf(Or(~a, b), Or(a, c), simplify=simplify)
def _eval_derivative(self, x):
return self.func(self.args[0], *[a.diff(x) for a in self.args[1:]])
# end class definitions. Some useful methods
def conjuncts(expr):
"""Return a list of the conjuncts in the expr s.
Examples
========
>>> conjuncts(a & b) == frozenset([a, b])
True
>>> conjuncts(a | b) == frozenset([Or(a, b)])
True
"""
return And.make_args(expr)
def disjuncts(expr):
"""Return a list of the disjuncts in the sentence s.
Examples
========
>>> disjuncts(a | b) == frozenset([a, b])
True
>>> disjuncts(a & b) == frozenset([And(a, b)])
True
"""
return Or.make_args(expr)
def distribute_and_over_or(expr):
"""
Given a sentence s consisting of conjunctions and disjunctions
of literals, return an equivalent sentence in CNF.
Examples
========
>>> distribute_and_over_or(Or(a, And(Not(b), Not(c))))
(a | ~b) & (a | ~c)
"""
return _distribute((expr, And, Or))
def distribute_or_over_and(expr):
"""
Given a sentence s consisting of conjunctions and disjunctions
of literals, return an equivalent sentence in DNF.
Note that the output is NOT simplified.
Examples
========
>>> distribute_or_over_and(And(Or(Not(a), b), c))
(b & c) | (c & ~a)
"""
return _distribute((expr, Or, And))
def _distribute(info):
"""Distributes info[1] over info[2] with respect to info[0]."""
if isinstance(info[0], info[2]):
for arg in info[0].args:
if isinstance(arg, info[1]):
conj = arg
break
else:
return info[0]
rest = info[2](*[a for a in info[0].args if a is not conj])
return info[1](*list(map(_distribute,
((info[2](c, rest), info[1], info[2]) for c in conj.args))))
elif isinstance(info[0], info[1]):
return info[1](*list(map(_distribute,
((x, info[1], info[2]) for x in info[0].args))))
else:
return info[0]
def to_nnf(expr, simplify=True):
"""
Converts expr to Negation Normal Form.
A logical expression is in Negation Normal Form (NNF) if it
contains only And, Or and Not, and Not is applied only to literals.
If simplify is True, the result contains no redundant clauses.
Examples
========
>>> to_nnf(Not((~a & ~b) | (c & d)))
(a | b) & (~c | ~d)
>>> to_nnf(Equivalent(a >> b, b >> a))
(a | ~b | (a & ~b)) & (b | ~a | (b & ~a))
"""
expr = sympify(expr)
if is_nnf(expr, simplify):
return expr
return expr.to_nnf(simplify)
def to_cnf(expr, simplify=False):
"""
Convert a propositional logical sentence s to conjunctive normal form.
That is, of the form ((A | ~B | ...) & (B | C | ...) & ...).
If simplify is True, the expr is evaluated to its simplest CNF form.
Examples
========
>>> to_cnf(~(a | b) | c)
(c | ~a) & (c | ~b)
>>> to_cnf((a | b) & (a | ~a), True)
a | b
"""
expr = sympify(expr)
if not isinstance(expr, BooleanFunction):
return expr
if simplify:
return simplify_logic(expr, 'cnf', True)
# Don't convert unless we have to
if is_cnf(expr):
return expr
expr = eliminate_implications(expr)
return distribute_and_over_or(expr)
def to_dnf(expr, simplify=False):
"""
Convert a propositional logical sentence s to disjunctive normal form.
That is, of the form ((A & ~B & ...) | (B & C & ...) | ...).
If simplify is True, the expr is evaluated to its simplest DNF form.
Examples
========
>>> to_dnf(b & (a | c))
(a & b) | (b & c)
>>> to_dnf((a & b) | (a & ~b) | (b & c) | (~b & c), True)
a | c
"""
expr = sympify(expr)
if not isinstance(expr, BooleanFunction):
return expr
if simplify:
return simplify_logic(expr, 'dnf', True)
# Don't convert unless we have to
if is_dnf(expr):
return expr
expr = eliminate_implications(expr)
return distribute_or_over_and(expr)
def is_nnf(expr, simplified=True):
"""
Checks if expr is in Negation Normal Form.
A logical expression is in Negation Normal Form (NNF) if it
contains only And, Or and Not, and Not is applied only to literals.
If simplified is True, checks if result contains no redundant clauses.
Examples
========
>>> is_nnf(a & b | ~c)
True
>>> is_nnf((a | ~a) & (b | c))
False
>>> is_nnf((a | ~a) & (b | c), False)
True
>>> is_nnf(Not(a & b) | c)
False
>>> is_nnf((a >> b) & (b >> a))
False
"""
expr = sympify(expr)
if is_literal(expr):
return True
stack = [expr]
while stack:
expr = stack.pop()
if expr.func in (And, Or):
if simplified:
args = expr.args
for arg in args:
if Not(arg) in args:
return False
stack.extend(expr.args)
elif not is_literal(expr):
return False
return True
def is_cnf(expr):
"""
Test whether or not an expression is in conjunctive normal form.
Examples
========
>>> is_cnf(a | b | c)
True
>>> is_cnf(a & b & c)
True
>>> is_cnf((a & b) | c)
False
"""
return _is_form(expr, And, Or)
def is_dnf(expr):
"""
Test whether or not an expression is in disjunctive normal form.
Examples
========
>>> is_dnf(a | b | c)
True
>>> is_dnf(a & b & c)
True
>>> is_dnf((a & b) | c)
True
>>> is_dnf(a & (b | c))
False
"""
return _is_form(expr, Or, And)
def _is_form(expr, function1, function2):
"""Test whether or not an expression is of the required form."""
expr = sympify(expr)
# Special case of an Atom
if expr.is_Atom:
return True
# Special case of a single expression of function2
if isinstance(expr, function2):
for lit in expr.args:
if isinstance(lit, Not):
if not lit.args[0].is_Atom:
return False
else:
if not lit.is_Atom:
return False
return True
# Special case of a single negation
if isinstance(expr, Not):
if not expr.args[0].is_Atom:
return False
if not isinstance(expr, function1):
return False
for cls in expr.args:
if cls.is_Atom:
continue
if isinstance(cls, Not):
if not cls.args[0].is_Atom:
return False
elif not isinstance(cls, function2):
return False
for lit in cls.args:
if isinstance(lit, Not):
if not lit.args[0].is_Atom:
return False
else:
if not lit.is_Atom:
return False
return True
def eliminate_implications(expr):
"""
Change >>, <<, and Equivalent into &, |, and ~. That is, return an
expression that is equivalent to s, but has only &, |, and ~ as logical
operators.
Examples
========
>>> eliminate_implications(Implies(a, b))
b | ~a
>>> eliminate_implications(Equivalent(a, b))
(a | ~b) & (b | ~a)
>>> eliminate_implications(Equivalent(a, b, c))
(a | ~c) & (b | ~a) & (c | ~b)
"""
return to_nnf(expr)
def is_literal(expr):
"""
Returns True if expr is a literal, else False.
Examples
========
>>> is_literal(a)
True
>>> is_literal(~a)
True
>>> is_literal(a + b)
True
>>> is_literal(Or(a, b))
False
"""
if isinstance(expr, Not):
return not isinstance(expr.args[0], BooleanFunction)
else:
return not isinstance(expr, BooleanFunction)
def to_int_repr(clauses, symbols):
"""
Takes clauses in CNF format and puts them into an integer representation.
Examples
========
>>> to_int_repr([x | y, y], [x, y])
[{1, 2}, {2}]
"""
symbols = dict(zip(symbols, range(1, len(symbols) + 1)))
def append_symbol(arg, symbols):
if isinstance(arg, Not):
return -symbols[arg.args[0]]
else:
return symbols[arg]
return [{append_symbol(arg, symbols) for arg in Or.make_args(c)}
for c in clauses]
def _check_pair(minterm1, minterm2):
"""
Checks if a pair of minterms differs by only one bit. If yes, returns
index, else returns -1.
"""
index = -1
for x, (i, j) in enumerate(zip(minterm1, minterm2)):
if i != j:
if index == -1:
index = x
else:
return -1
return index
def _convert_to_varsSOP(minterm, variables):
"""
Converts a term in the expansion of a function from binary to it's
variable form (for SOP).
"""
temp = []
for i, m in enumerate(minterm):
if m == 0:
temp.append(Not(variables[i]))
elif m == 1:
temp.append(variables[i])
return And(*temp)
def _convert_to_varsPOS(maxterm, variables):
"""
Converts a term in the expansion of a function from binary to it's
variable form (for POS).
"""
temp = []
for i, m in enumerate(maxterm):
if m == 1:
temp.append(Not(variables[i]))
elif m == 0:
temp.append(variables[i])
return Or(*temp)
def _simplified_pairs(terms):
"""
Reduces a set of minterms, if possible, to a simplified set of minterms
with one less variable in the terms using QM method.
"""
simplified_terms = []
todo = list(range(len(terms)))
for i, ti in enumerate(terms[:-1]):
for j_i, tj in enumerate(terms[(i + 1):]):
index = _check_pair(ti, tj)
if index != -1:
todo[i] = todo[j_i + i + 1] = None
newterm = ti[:]
newterm[index] = 3
if newterm not in simplified_terms:
simplified_terms.append(newterm)
simplified_terms.extend(
[terms[i] for i in [_ for _ in todo if _ is not None]])
return simplified_terms
def _compare_term(minterm, term):
"""
Return True if a binary term is satisfied by the given term. Used
for recognizing prime implicants.
"""
for i, x in enumerate(term):
if x not in (3, minterm[i]):
return False
return True
def _rem_redundancy(l1, terms):
"""
After the truth table has been sufficiently simplified, use the prime
implicant table method to recognize and eliminate redundant pairs,
and return the essential arguments.
"""
essential = []
for x in terms:
temporary = []
for y in l1:
if _compare_term(x, y):
temporary.append(y)
if len(temporary) == 1:
if temporary[0] not in essential:
essential.append(temporary[0])
for x in terms:
for y in essential:
if _compare_term(x, y):
break
else:
for z in l1: # pragma: no branch
if _compare_term(x, z):
assert z not in essential
essential.append(z)
break
return essential
def SOPform(variables, minterms, dontcares=None):
"""
The SOPform function uses simplified_pairs and a redundant group-
eliminating algorithm to convert the list of all input combos that
generate '1' (the minterms) into the smallest Sum of Products form.
The variables must be given as the first argument.
Return a logical Or function (i.e., the "sum of products" or "SOP"
form) that gives the desired outcome. If there are inputs that can
be ignored, pass them as a list, too.
The result will be one of the (perhaps many) functions that satisfy
the conditions.
Examples
========
>>> minterms = [[0, 0, 0, 1], [0, 0, 1, 1],
... [0, 1, 1, 1], [1, 0, 1, 1], [1, 1, 1, 1]]
>>> dontcares = [[0, 0, 0, 0], [0, 0, 1, 0], [0, 1, 0, 1]]
>>> SOPform([t, x, y, z], minterms, dontcares)
(y & z) | (z & ~t)
References
==========
* https://en.wikipedia.org/wiki/Quine-McCluskey_algorithm
"""
variables = [sympify(v) for v in variables]
if minterms == []:
return false
minterms = [list(i) for i in minterms]
dontcares = [list(i) for i in (dontcares or [])]
for d in dontcares:
if d in minterms:
raise ValueError(f'{d} in minterms is also in dontcares')
old = None
new = minterms + dontcares
while new != old:
old = new
new = _simplified_pairs(old)
essential = _rem_redundancy(new, minterms)
return Or(*[_convert_to_varsSOP(x, variables) for x in essential])
def POSform(variables, minterms, dontcares=None):
"""
The POSform function uses simplified_pairs and a redundant-group
eliminating algorithm to convert the list of all input combinations
that generate '1' (the minterms) into the smallest Product of Sums form.
The variables must be given as the first argument.
Return a logical And function (i.e., the "product of sums" or "POS"
form) that gives the desired outcome. If there are inputs that can
be ignored, pass them as a list, too.
The result will be one of the (perhaps many) functions that satisfy
the conditions.
Examples
========
>>> minterms = [[0, 0, 0, 1], [0, 0, 1, 1], [0, 1, 1, 1],
... [1, 0, 1, 1], [1, 1, 1, 1]]
>>> dontcares = [[0, 0, 0, 0], [0, 0, 1, 0], [0, 1, 0, 1]]
>>> POSform([t, x, y, z], minterms, dontcares)
z & (y | ~t)
References
==========
* https://en.wikipedia.org/wiki/Quine-McCluskey_algorithm
"""
variables = [sympify(v) for v in variables]
if minterms == []:
return false
minterms = [list(i) for i in minterms]
dontcares = [list(i) for i in (dontcares or [])]
for d in dontcares:
if d in minterms:
raise ValueError(f'{d} in minterms is also in dontcares')
maxterms = []
for t in product([0, 1], repeat=len(variables)):
t = list(t)
if (t not in minterms) and (t not in dontcares):
maxterms.append(t)
old = None
new = maxterms + dontcares
while new != old:
old = new
new = _simplified_pairs(old)
essential = _rem_redundancy(new, maxterms)
return And(*[_convert_to_varsPOS(x, variables) for x in essential])
def _find_predicates(expr):
"""Helper to find logical predicates in BooleanFunctions.
A logical predicate is defined here as anything within a BooleanFunction
that is not a BooleanFunction itself.
"""
if not isinstance(expr, BooleanFunction):
return {expr}
return set().union(*(_find_predicates(i) for i in expr.args))
def simplify_logic(expr, form=None, deep=True):
"""
This function simplifies a boolean function to its simplified version
in SOP or POS form. The return type is an Or or And object in Diofant.
Parameters
==========
expr : string or boolean expression
form : string ('cnf' or 'dnf') or None (default).
If 'cnf' or 'dnf', the simplest expression in the corresponding
normal form is returned; if None, the answer is returned
according to the form with fewest args (in CNF by default).
deep : boolean (default True)
indicates whether to recursively simplify any
non-boolean functions contained within the input.
Examples
========
>>> b = (~x & ~y & ~z) | (~x & ~y & z)
>>> simplify_logic(b)
~x & ~y
>>> sympify(b)
(z & ~x & ~y) | (~x & ~y & ~z)
>>> simplify_logic(_)
~x & ~y
"""
if form == 'cnf' or form == 'dnf' or form is None:
expr = sympify(expr)
if not isinstance(expr, BooleanFunction):
return expr
variables = _find_predicates(expr)
truthtable = []
for t in product([0, 1], repeat=len(variables)):
t = list(t)
if expr.xreplace(dict(zip(variables, t))):
truthtable.append(t)
if deep:
from ..simplify import simplify
variables = [simplify(v) for v in variables]
if form == 'dnf' or \
(form is None and len(truthtable) >= (2 ** (len(variables) - 1))):
return SOPform(variables, truthtable)
elif form == 'cnf' or form is None: # pragma: no branch
return POSform(variables, truthtable)
else:
raise ValueError('form can be cnf or dnf only')
def _finger(eq):
"""
Assign a 5-item fingerprint to each symbol in the equation:
[
# of times it appeared as a Symbol,
# of times it appeared as a Not(symbol),
# of times it appeared as a Symbol in an And or Or,
# of times it appeared as a Not(Symbol) in an And or Or,
sum of the number of arguments with which it appeared,
counting Symbol as 1 and Not(Symbol) as 2
]
>>> eq = Or(And(Not(y), a), And(Not(y), b), And(x, y))
>>> dict(_finger(eq))
{(0, 0, 1, 0, 2): [x],
(0, 0, 1, 0, 3): [a, b],
(0, 0, 1, 2, 8): [y]}
So y and x have unique fingerprints, but a and b do not.
"""
f = eq.free_symbols
d = {fi: [0] * 5 for fi in f}
for a in eq.args:
if a.is_Symbol:
d[a][0] += 1
elif a.is_Not:
d[a.args[0]][1] += 1
else:
o = len(a.args) + sum(isinstance(ai, Not) for ai in a.args)
for ai in a.args:
if ai.is_Symbol:
d[ai][2] += 1
d[ai][-1] += o
else:
d[ai.args[0]][3] += 1
d[ai.args[0]][-1] += o
inv = defaultdict(list)
for k, v in ordered(d.items()):
inv[tuple(v)].append(k)
return inv
def bool_map(bool1, bool2):
"""
Return the simplified version of bool1, and the mapping of variables
that makes the two expressions bool1 and bool2 represent the same
logical behaviour for some correspondence between the variables
of each.
If more than one mappings of this sort exist, one of them
is returned.
For example, And(x, y) is logically equivalent to And(a, b) for
the mapping {x: a, y:b} or {x: b, y:a}.
If no such mapping exists, return False.
Examples
========
>>> function1 = SOPform([x, z, y], [[1, 0, 1], [0, 0, 1]])
>>> function2 = SOPform([a, b, c], [[1, 0, 1], [1, 0, 0]])
>>> bool_map(function1, function2)
(y & ~z, {y: a, z: b})
The results are not necessarily unique, but they are canonical. Here,
``(t, z)`` could be ``(a, d)`` or ``(d, a)``:
>>> eq1 = Or(And(Not(y), t), And(Not(y), z), And(x, y))
>>> eq2 = Or(And(Not(c), a), And(Not(c), d), And(b, c))
>>> bool_map(eq1, eq2)
((x & y) | (t & ~y) | (z & ~y), {t: a, x: b, y: c, z: d})
>>> eq = And(Xor(a, b), c, And(c, d))
>>> bool_map(eq, eq.subs({c: x}))
(c & d & (a | b) & (~a | ~b), {a: a, b: b, c: d, d: x})
"""
def match(function1, function2):
"""Return the mapping that equates variables between two
simplified boolean expressions if possible.
By "simplified" we mean that a function has been denested
and is either an And (or an Or) whose arguments are either
symbols (x), negated symbols (Not(x)), or Or (or an And) whose
arguments are only symbols or negated symbols. For example,
And(x, Not(y), Or(w, Not(z))).
Basic.match is not robust enough (see issue sympy/sympy#4835) so this is
a workaround that is valid for simplified boolean expressions.
"""
# do some quick checks
if function1.__class__ != function2.__class__:
return
if len(function1.args) != len(function2.args):
return
if function1.is_Symbol:
return {function1: function2}
# get the fingerprint dictionaries
f1 = _finger(function1)
f2 = _finger(function2)
# more quick checks
if len(f1) != len(f2):
return
# assemble the match dictionary if possible
matchdict = {}
for k in f1:
if k not in f2 or len(f1[k]) != len(f2[k]):
return
for i, x in enumerate(f1[k]):
matchdict[x] = f2[k][i]
return matchdict if matchdict else None
a = simplify_logic(bool1)
b = simplify_logic(bool2)
m = match(a, b)
if m:
return a, m
return m is not None
| 2.953125 | 3 |
amazon/goods_review_thread.py | JoanLee0826/amazon | 5 | 1655 | <gh_stars>1-10
import pandas as pd
import requests
from lxml import etree
import re, time, random, datetime
from queue import Queue
import threading
class Review:
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 \
(KHTML, like Gecko) Chrome/69.0.3497.81 Safari/537.36"
}
proxies = {
"http": "http://172.16.17.32:9999",
}
def __init__(self, domain):
self.view_list = []
self.page_list = []
self.url_queue = Queue()
if domain.strip().lower() == 'jp':
self.row_url = "https://www.amazon.co.jp"
elif domain.strip().lower == 'com':
self.row_url = "https://www.amazon.com"
self.s = requests.Session()
self.s.get(url=self.row_url, headers=self.headers, proxies=self.proxies)
def get_review(self, url):
res = self.s.get(url, headers=self.headers, proxies=self.proxies)
if res.status_code != 200:
print("请求出错,状态码为:%s" % res.status_code)
print(res.text)
return
res_html = etree.HTML(res.text)
# 商品评价名称
view_goods = res_html.xpath('//span[@class="a-list-item"]/a/text()')[0]
# 商品评价容器
view_con = res_html.xpath('//div[@class="a-section review aok-relative"]')
for each_view in view_con:
# 评价人
view_name = each_view.xpath('.//span[@class="a-profile-name"]/text()')[0]
view_star_raw = each_view.xpath('.//div[@class="a-row"]/a[@class="a-link-normal"]/@title')[0]
# 评价星级
view_star = view_star_raw.split(' ')[0]
# 评价title
view_title = each_view.xpath('.//a[@data-hook="review-title"]/span/text()')[0]
# 评价日期
view_date = each_view.xpath('.//span[@data-hook="review-date"]/text()')[0]
view_format = each_view.xpath('.//a[@data-hook="format-strip"]/text()')
view_colour = None
view_size = None
try:
for each in view_format:
if re.search("color|colour|色", each, re.I):
view_colour = each.split(':')[1].strip()
if re.search("size|style|サイズ", each, re.I):
view_size = each.split(":")[1].strip()
except:
pass
# 评价内容
view_body = each_view.xpath('string(.//span[@data-hook="review-body"]/span)')
# 评价有用数量
try:
view_useful_raw = each_view.xpath('.//span[@data-hook="helpful-vote-statement"]/text()')[0]
view_useful = view_useful_raw.split(' ')[0]
if view_useful == 'one':
view_useful = 1
try:
view_useful = int(view_useful)
except:
pass
except:
view_useful = 0
# 商品的评价信息表
each_view_list = [view_goods, view_name, view_star, view_title, view_date, view_colour, view_size,
view_body, view_useful]
self.view_list.append(each_view_list)
# print(self.view_list[-1])
def run(self, data):
goods_data = pd.read_excel(data, encoding='utf-8')
base_url = self.row_url + "/product-reviews/"
# goods_data.drop_duplicates(subset=['r','评价数量'],inplace=True)
for each_asin, each_count in zip(goods_data['ASIN'][5:50], goods_data['goods_review_count'][5:50]):
if each_asin and int(each_count) > 0:
if int(each_count) % 10 == 0:
end_page = int(each_count) // 10 + 1
else:
end_page = int(each_count) // 10 + 2
for page in range(1, end_page):
if page == 1:
url = base_url + each_asin
else:
url = base_url + each_asin + '?pageNumber=' + str(page)
self.url_queue.put(url)
print("review_page_%d" % page, url)
time.sleep(1.5)
while True:
try:
review_threads = [threading.Thread(target=self.get_review, args=(self.url_queue.get(),))
for m in range(30) if not self.url_queue.empty()]
for each in review_threads:
each.start()
print("队列剩余数量", self.url_queue.qsize())
for each in review_threads:
each.join()
except:
print("请求链接出错,重试中...")
pass
time.sleep(random.uniform(0.5,2.1))
if self.url_queue.empty():
break
view_goods_pd = pd.DataFrame(self.view_list,
columns=['review_goods', 'review_name', 'review_star', 'review_title',
'review_date', 'review_colour', 'review_size', 'review_body',
'review_useful'])
view_goods_pd.drop_duplicates(subset=['review_name', 'review_date','review_body'], inplace=True)
aft = datetime.datetime.now().strftime('%m%d%H%M')
file_name = r'../data/goods_review/' + "reviews_" + aft + ".xlsx"
view_goods_pd.to_excel(file_name, encoding='utf-8', engine='xlsxwriter')
print("共获取评论数量:", len(self.view_list))
if __name__ == '__main__':
data = r"../data/category/Kid's Weighted Blankets_08_28_13_22.xlsx"
review = Review(domain='com')
review.run(data=data)
| 2.796875 | 3 |
lumicks/pylake/population/tests/conftest.py | lumicks/pylake | 8 | 1656 | import pytest
import numpy as np
from pathlib import Path
def extract_param(data, n_states):
keys = ("initial_state_prob", "transition_prob", "means", "st_devs")
param = {"n_states": n_states}
for key in keys:
param[key] = data[f"{key}_{n_states}"]
return param
@pytest.fixture(scope="session", params=[2, 3, 4])
def trace_lownoise(request):
"""Trace data can be generated by running ./data/generate_trace_data.py """
data = np.load(Path(__file__).parent / "data/trace_data.npz")
n_states = request.param
param = extract_param(data, n_states)
y = data[f"y_{n_states}"]
sp = data[f"sp_{n_states}"]
return y, sp, param
@pytest.fixture(scope="session")
def trace_simple(request):
"""Trace data can be generated by running ./data/generate_trace_data.py """
data = np.load(Path(__file__).parent / "data/trace_data.npz")
n_states = 2
param = extract_param(data, n_states)
y = data[f"y_{n_states}"]
sp = data[f"sp_{n_states}"]
return y, sp, param
| 2.125 | 2 |
Concurrent/PipelineDecomposingTask.py | rafagarciac/ParallelProgrammingPython | 0 | 1657 | <reponame>rafagarciac/ParallelProgrammingPython
#!/usr/bin/env python
"""
Artesanal example Pipe without Pipe class.
"""
__author__ = "<NAME>"
__email__ = "<EMAIL>"
__copyright__ = "Copyright (c) 2018 <NAME>"
__license__ = "MIT"
from concurrent.futures import ProcessPoolExecutor
import time
import random
def worker(arg):
time.sleep(random.random())
return arg
def pipeline(future):
pools[1].submit(worker, future.result()).add_done_callback(printer)
def printer(future):
pools[2].submit(worker, future.result()).add_done_callback(spout)
def spout(future):
print(future.result())
def instanceProcessPool():
pools = []
for i in range(3):
pool = ProcessPoolExecutor(2)
pools.append(pool)
return pools
def shutdownPools(pools):
for pool in pools:
pool.shutdown()
def runThreadsInPipeline(pools):
for pool in pools:
pool.submit(worker, random.random()).add_done_callback(pipeline)
if __name__ == "__main__":
__spec__ = None # Fix multiprocessing in Spyder's IPython
pools = instanceProcessPool() # pool = ProcessPoolExecutor([max_workers])
runThreadsInPipeline(pools) # pools[0].submit(worker, random.random()).add_done_callback(pipeline)
shutdownPools(pools) # pool.shutdown() | 3.015625 | 3 |
src/digibujogens/__main__.py | roaet/digibujogens | 0 | 1658 | """ Main application entry point.
python -m digibujogens ...
"""
def main():
""" Execute the application.
"""
raise NotImplementedError
# Make the script executable.
if __name__ == "__main__":
raise SystemExit(main())
| 2.171875 | 2 |
lisa/target.py | mrkajetanp/lisa | 0 | 1659 | # SPDX-License-Identifier: Apache-2.0
#
# Copyright (C) 2018, ARM Limited and contributors.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from datetime import datetime
import os
import os.path
import contextlib
import shlex
from collections.abc import Mapping
import copy
import sys
import argparse
import textwrap
import functools
import inspect
import pickle
import tempfile
from types import ModuleType, FunctionType
from operator import itemgetter
import devlib
from devlib.exception import TargetStableError
from devlib.utils.misc import which
from devlib.platform.gem5 import Gem5SimulationPlatform
from lisa.utils import Loggable, HideExekallID, resolve_dotted_name, get_subclasses, import_all_submodules, LISA_HOME, RESULT_DIR, LATEST_LINK, setup_logging, ArtifactPath, nullcontext, ExekallTaggable, memoized
from lisa.assets import ASSETS_PATH
from lisa.conf import SimpleMultiSrcConf, KeyDesc, LevelKeyDesc, TopLevelKeyDesc,Configurable
from lisa.generic import TypedList
from lisa.platforms.platinfo import PlatformInfo
class PasswordKeyDesc(KeyDesc):
def pretty_format(self, v):
return '<password>'
# Make sure all submodules of devlib.module are imported so the classes
# are all created before we list them
import_all_submodules(devlib.module)
_DEVLIB_AVAILABLE_MODULES = {
cls.name
for cls in get_subclasses(devlib.module.Module)
if (
getattr(cls, 'name', None)
# early modules try to connect to UART and do very
# platform-specific things we are not interested in
and getattr(cls, 'stage') != 'early'
)
}
class TargetConf(SimpleMultiSrcConf, HideExekallID):
"""
Target connection settings.
Only keys defined below are allowed, with the given meaning and type:
{generated_help}
An instance can be created by calling :class:`~TargetConf` with a
dictionary. The top-level `target-conf` key is not needed here:
.. code-block:: python
TargetConf({{
'name': 'myboard',
'host': 192.0.2.1,
'kind': 'linux',
'username': 'foo',
'password': '<PASSWORD>',
}})
Or alternatively, from a YAML configuration file:
Content of target_conf.yml:
.. literalinclude:: ../target_conf.yml
:language: YAML
::
TargetConf.from_yaml_map('target_conf.yml')
The following special YAML tags can be used in the configuration file:
.. code-block:: YAML
target-conf:
# "!env:<type> ENV_VAR_NAME" can be used to reference an
# environment variable.
name: !env:str BOARD_NAME
port: !env:int PORT
.. note:: Only load trusted YAML files as it can lead to abritrary code
execution.
.. note:: That structure in a YAML file is allowed and will work:
* file foo.yml::
target-conf:
name: myboard
* file bar.yml::
target-conf:
!include foo.yml
This will result in that structure which would normally be invalid, but
is handled as a special case::
target-conf:
target-conf:
name: myboard
"""
STRUCTURE = TopLevelKeyDesc('target-conf', 'target connection settings', (
KeyDesc('name', 'Board name, free-form value only used to embelish logs', [str]),
KeyDesc('kind', 'Target kind. Can be "linux" (ssh) or "android" (adb)', [str]),
KeyDesc('host', 'Hostname or IP address of the host', [str, None]),
KeyDesc('username', 'SSH username. On ADB connections, "root" username will root adb upon target connection', [str, None]),
PasswordKeyDesc('password', 'SSH password', [str, None]),
KeyDesc('port', 'SSH or ADB server port', [int, None]),
KeyDesc('device', 'ADB device. Takes precedence over "host"', [str, None]),
KeyDesc('keyfile', 'SSH private key file', [str, None]),
KeyDesc('strict-host-check', 'Equivalent to StrictHostKeyChecking option of OpenSSH', [bool, None]),
KeyDesc('workdir', 'Remote target workdir', [str]),
KeyDesc('tools', 'List of tools to install on the target', [TypedList[str]]),
KeyDesc('lazy-platinfo', 'Lazily autodect the platform information to speed up the connection', [bool]),
LevelKeyDesc('wait-boot', 'Wait for the target to finish booting', (
KeyDesc('enable', 'Enable the boot check', [bool]),
KeyDesc('timeout', 'Timeout of the boot check', [int]),
)),
LevelKeyDesc('devlib', 'devlib configuration', (
# Using textual name of the Platform allows this YAML configuration
# to not use any python-specific YAML tags, so TargetConf files can
# be parsed and produced by any other third-party code
LevelKeyDesc('platform', 'devlib.platform.Platform subclass specification', (
KeyDesc('class', 'Name of the class to use', [str]),
KeyDesc('args', 'Keyword arguments to build the Platform object', [Mapping]),
)),
KeyDesc('excluded-modules', 'List of devlib modules to *not* load', [TypedList[str]]),
KeyDesc('file-xfer', 'File transfer method. Can be "sftp" (default) or "scp". (Only valid for linux targets)', [TypedList[str]]),
))
))
DEFAULT_SRC = {
'devlib': {
'platform': {
'class': 'devlib.platform.Platform'
}
}
}
class Target(Loggable, HideExekallID, ExekallTaggable, Configurable):
"""
Wrap :class:`devlib.target.Target` to provide additional features on top of
it.
{configurable_params}
:param devlib_platform: Instance of :class:`devlib.platform.Platform` to
use to build the :class:`devlib.target.Target`
:type devlib_platform: devlib.platform.Platform
:param plat_info: Platform information attached to this target, for the
benefits of user code.
:type plat_info: lisa.platforms.platinfo.PlatformInfo
You need to provide the information needed to connect to the
target. For SSH targets that means "host", "username" and
either "password" or "keyfile". All other fields are optional if
the relevant features aren't needed.
.. note:: The wrapping of :class:`devlib.target.Target` is done using
composition, as opposed to inheritance. This allows swapping the exact
class used under the hood, and avoids messing up with ``devlib``
internal members.
"""
ADB_PORT_DEFAULT = 5555
SSH_PORT_DEFAULT = 22
CRITICAL_TASKS = {
'linux': [
'init',
# We want to freeze everything except PID 1, we don't want to let
# sysmted-journald or systemd-timesyncd running.
'systemd[^-]',
'dbus',
'sh',
'ssh',
'rsyslogd',
'jbd2'
],
'android': [
'sh', 'adbd',
'usb', 'transport',
# We don't actually need this task but on Google Pixel it apparently
# cannot be frozen, so the cgroup state gets stuck in FREEZING if we
# try to freeze it.
'thermal-engine',
# Similar issue with HiKey960, the board will crash if this is frozen
# for too long.
'watchdogd',
]
}
"""
Dictionary mapping OS name to list of task names that we can't afford to
freeze when using :meth:`freeze_userspace`.
"""
CONF_CLASS = TargetConf
INIT_KWARGS_KEY_MAP = {
'devlib_excluded_modules': ['devlib', 'excluded-modules'],
'devlib_file_xfer': ['devlib', 'file-xfer'],
'wait_boot': ['wait-boot', 'enable'],
'wait_boot_timeout': ['wait-boot', 'timeout'],
}
def __init__(self, kind, name='<noname>', tools=[], res_dir=None,
plat_info=None, lazy_platinfo=False, workdir=None, device=None, host=None, port=None,
username=None, password=<PASSWORD>, keyfile=None, strict_host_check=None,
devlib_platform=None, devlib_excluded_modules=[], devlib_file_xfer=None,
wait_boot=True, wait_boot_timeout=10,
):
# pylint: disable=dangerous-default-value
super().__init__()
logger = self.get_logger()
self.name = name
res_dir = res_dir if res_dir else self._get_res_dir(
root=os.path.join(LISA_HOME, RESULT_DIR),
relative='',
name=f'{self.__class__.__qualname__}-{self.name}',
append_time=True,
symlink=True
)
self._res_dir = res_dir
os.makedirs(self._res_dir, exist_ok=True)
if os.listdir(self._res_dir):
raise ValueError(f'res_dir must be empty: {self._res_dir}')
if plat_info is None:
plat_info = PlatformInfo()
else:
# Make a copy of the PlatformInfo so we don't modify the original
# one we were passed when adding the target source to it
plat_info = copy.copy(plat_info)
logger.info(f'User-defined platform information:\n{plat_info}')
self.plat_info = plat_info
# Take the board name from the target configuration so it becomes
# available for later inspection. That board name is mostly free form
# and no specific value should be expected for a given kind of board
# (i.e. a Juno board might be named "foo-bar-juno-on-my-desk")
if name:
self.plat_info.add_src('target-conf', dict(name=name))
# Determine file transfer method. Currently avaliable options
# are 'sftp' and 'scp', defaults to sftp.
if devlib_file_xfer and devlib_file_xfer not in ('scp', 'sftp'):
raise ValueError(f'Invalid file transfer method: {devlib_file_xfer}')
use_scp = devlib_file_xfer == 'scp'
self._installed_tools = set()
self.target = self._init_target(
kind=kind,
name=name,
workdir=workdir,
device=device,
host=host,
port=port,
username=username,
password=password,
keyfile=keyfile,
strict_host_check=strict_host_check,
use_scp=use_scp,
devlib_platform=devlib_platform,
wait_boot=wait_boot,
wait_boot_timeout=wait_boot_timeout,
)
devlib_excluded_modules = set(devlib_excluded_modules)
# Sorry, can't let you do that. Messing with cgroups in a systemd
# system is pretty bad idea.
if self._uses_systemd:
logger.warning('Will not load cgroups devlib module: target is using systemd, which already uses cgroups')
devlib_excluded_modules.add('cgroups')
self._devlib_loadable_modules = _DEVLIB_AVAILABLE_MODULES - devlib_excluded_modules
# Initialize binary tools to deploy
if tools:
logger.info(f'Tools to install: {tools}')
self.install_tools(tools)
# Autodetect information from the target, after the Target is
# initialized. Expensive computations are deferred so they will only be
# computed when actually needed.
rta_calib_res_dir = ArtifactPath.join(self._res_dir, 'rta_calib')
os.makedirs(rta_calib_res_dir)
self.plat_info.add_target_src(self, rta_calib_res_dir, deferred=lazy_platinfo, fallback=True)
logger.info(f'Effective platform information:\n{self.plat_info}')
@property
@memoized
def _uses_systemd(self):
try:
# Check if systemd is being used, according to:
# https://www.freedesktop.org/software/systemd/man/sd_booted.html
self.execute('test -d /run/systemd/system/', check_exit_code=True)
except TargetStableError:
return False
else:
return True
def is_module_available(self, module):
"""
Check if the given devlib module is available.
:returns: ``True`` if module is available, ``False`` otherwise.
:param module: Devlib module to check.
:type module: str
.. note:: This will attempt to load the module if it's not loaded
already, and bail out if it fails to load.
"""
if module not in _DEVLIB_AVAILABLE_MODULES:
raise ValueError(f'"{module}" is not a devlib module')
try:
getattr(self, module)
except Exception: # pylint: disable=broad-except
return False
else:
return True
def __getattr__(self, attr):
"""
Forward all non-overriden attributes/method accesses to the underlying
:class:`devlib.target.Target`.
.. note:: That will not forward special methods like __str__, since the
interpreter bypasses __getattr__ when looking them up.
.. note:: Devlib modules are loaded on demand when accessed.
"""
def get():
return getattr(self.target, attr)
try:
return get()
except AttributeError:
# Load the module on demand
if attr in self._devlib_loadable_modules:
self.get_logger().info(f'Loading target devlib module {attr}')
self.target.install_module(attr)
return get()
# If it was not in the loadable list, it
# has been excluded explicitly
elif attr in _DEVLIB_AVAILABLE_MODULES:
# pylint: disable=raise-missing-from
raise AttributeError(f'Devlib target module {attr} was explicitly excluded, not loading it')
# Something else that does not exist ...
else:
raise
def __dir__(self):
"""
List our attributes plus the ones from the underlying target, and the
devlib modules that could be loaded on-demand.
"""
attrs = set(super().__dir__()) | set(dir(self.target)) | self._devlib_loadable_modules
return sorted(attrs)
@classmethod
def from_conf(cls, conf: TargetConf, res_dir: ArtifactPath = None, plat_info: PlatformInfo = None) -> 'Target':
cls.get_logger().info(f'Target configuration:\n{conf}')
kwargs = cls.conf_to_init_kwargs(conf)
kwargs['res_dir'] = res_dir
kwargs['plat_info'] = plat_info
# Create a devlib Platform instance out of the configuration file
devlib_platform_conf = conf['devlib']['platform']
devlib_platform_cls = resolve_dotted_name(devlib_platform_conf['class'])
devlib_platform_kwargs = copy.copy(devlib_platform_conf.get('args', {}))
# Hack for Gem5 devlib Platform, that requires a "host_output_dir"
# argument computed at runtime.
# Note: lisa.target.Gem5SimulationPlatformWrapper should be used instead
# of the original one to benefit from mapping configuration
if issubclass(devlib_platform_cls, Gem5SimulationPlatform):
devlib_platform_kwargs.setdefault('host_output_dir', res_dir)
# Actually build the devlib Platform object
devlib_platform = devlib_platform_cls(**devlib_platform_kwargs)
kwargs['devlib_platform'] = devlib_platform
cls.check_init_param(**kwargs)
return cls(**kwargs)
@classmethod
def from_default_conf(cls):
"""
Create a :class:`Target` from the YAML configuration file pointed by
``LISA_CONF`` environment variable.
.. note:: Only load trusted YAML files as it can lead to abritrary code
execution.
"""
path = os.environ['LISA_CONF']
return cls.from_one_conf(path)
@classmethod
def from_one_conf(cls, path):
"""
Create a :class:`Target` from a single YAML configuration file.
This file will be used to provide a :class:`TargetConf` and
:class:`lisa.platforms.platinfo.PlatformInfo` instances.
.. note:: Only load trusted YAML files as it can lead to abritrary code
execution.
"""
conf = TargetConf.from_yaml_map(path)
try:
plat_info = PlatformInfo.from_yaml_map(path)
except Exception as e: # pylint: disable=broad-except
cls.get_logger().warning(f'No platform information could be found: {e}')
plat_info = None
return cls.from_conf(conf=conf, plat_info=plat_info)
@classmethod
# Keep the signature without *args and **kwargs so that it's usable by exekall
def from_cli(cls, argv=None, params=None) -> 'Target':
"""
Same as :meth:`from_custom_cli` without the custom parameters
capabilities.
:return: A connected :class:`Target`
"""
_, target = cls.from_custom_cli(argv=argv, params=params)
return target
@classmethod
def from_custom_cli(cls, argv=None, params=None):
"""
Create a Target from command line arguments.
:param argv: The list of arguments. ``sys.argv[1:]`` will be used if
this is ``None``.
:type argv: list(str)
:param params: Dictionary of custom parameters to add to the parser. It
is in the form of
``{param_name: {dict of ArgumentParser.add_argument() options}}``.
:type params: dict(str, dict)
:return: A tuple ``(args, target)``
.. note:: This method should not be relied upon to implement long-term
scripts, it's more designed for quick scripting.
"""
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description=textwrap.dedent(
"""
Connect to a target using the provided configuration in order
to run a test.
EXAMPLES
--conf can point to a YAML target configuration file
with all the necessary connection information:
$ {script} --conf my_target.yml
Alternatively, --kind must be set along the relevant credentials:
$ {script} --kind linux --host 192.0.2.1 --username root --password <PASSWORD>
In both cases, --conf can also contain a PlatformInfo YAML description.
Note: only load trusted YAML files as it can lead to abritrary
code execution.
""".format(
script=os.path.basename(sys.argv[0])
)))
parser.add_argument("--conf", '-c',
help="Path to a TargetConf and PlatformInfo yaml file. Other options will override what is specified in the file."
)
parser.add_argument("--kind", "-k",
choices=["android", "linux", "host"],
help="The kind of target to connect to.")
device_group = parser.add_mutually_exclusive_group()
device_group.add_argument("--device", "-d",
help="The ADB ID of the target. Superseeds --host. Only applies to Android kind.")
device_group.add_argument("--host", "-n",
help="The hostname/IP of the target.")
parser.add_argument("--username", "-u",
help="Login username. Only applies to Linux kind.")
parser.add_argument("--password", <PASSWORD>",
help="Login password. Only applies to Linux kind.")
parser.add_argument("--log-level",
default='info',
choices=('warning', 'info', 'debug'),
help="Verbosity level of the logs.")
parser.add_argument("--res-dir", "-o",
help="Result directory of the created Target. If no directory is specified, a default location under $LISA_HOME will be used.")
params = params or {}
for param, settings in params.items():
parser.add_argument(f'--{param}', **settings)
custom_params = {k.replace('-', '_') for k in params.keys()}
# Options that are not a key in TargetConf must be listed here
not_target_conf_opt = {
'platform_info', 'log_level', 'res_dir', 'conf',
}
not_target_conf_opt.update(custom_params)
args = parser.parse_args(argv)
setup_logging(level=args.log_level.upper())
target_conf = TargetConf()
platform_info = None
if args.conf:
# Tentatively load a PlatformInfo from the conf file
with contextlib.suppress(KeyError, ValueError):
platform_info = PlatformInfo.from_yaml_map(args.conf)
# Load the TargetConf from the file, and update it with command
# line arguments
try:
conf = TargetConf.from_yaml_map(args.conf)
except (KeyError, ValueError):
pass
else:
target_conf.add_src(args.conf, conf)
target_conf.add_src('command-line', {
k: v for k, v in vars(args).items()
if v is not None and k not in not_target_conf_opt
})
# Some sanity check to get better error messages
if 'kind' not in target_conf:
parser.error('--conf with target configuration or any of the connection options is required')
if args.kind == 'android':
if ('host' not in target_conf) and ('device' not in target_conf):
parser.error('--host or --device must be specified')
if args.kind == 'linux':
for required in ['host', 'username', 'password']:
if required not in target_conf:
parser.error(f'--{required} must be specified')
custom_args = {
param: value
for param, value in vars(args).items()
if param in custom_params
}
custom_args = argparse.Namespace(**custom_args)
return custom_args, cls.from_conf(conf=target_conf, plat_info=platform_info, res_dir=args.res_dir)
def _init_target(self, kind, name, workdir, device, host,
port, username, password, keyfile, strict_host_check, use_scp,
devlib_platform,
wait_boot, wait_boot_timeout,
):
"""
Initialize the Target
"""
logger = self.get_logger()
conn_settings = {}
resolved_username = username or 'root'
logger.debug(f'Setting up {kind} target...')
# If the target is Android, we need just (eventually) the device
if kind == 'android':
devlib_target_cls = devlib.AndroidTarget
# Workaround for ARM-software/devlib#225
workdir = workdir or '/data/local/tmp/devlib-target'
if device:
pass
elif host:
port = port or self.ADB_PORT_DEFAULT
device = f'{host}:{port}'
else:
device = 'DEFAULT'
conn_settings['device'] = device
# If the username was explicitly set to "root", root the target as
# early as possible
conn_settings['adb_as_root'] = (username == 'root')
elif kind == 'linux':
devlib_target_cls = devlib.LinuxTarget
conn_settings.update(
username=resolved_username,
port=port or self.SSH_PORT_DEFAULT,
host=host,
strict_host_check=True if strict_host_check is None else strict_host_check,
use_scp=False if use_scp is None else use_scp,
)
# Configure password or SSH keyfile
if keyfile:
conn_settings['keyfile'] = keyfile
else:
conn_settings['password'] = password
elif kind == 'host':
devlib_target_cls = devlib.LocalLinuxTarget
# If we are given a password, assume we can use it as a sudo
# password.
conn_settings.update(
unrooted=password is None,
password=password,
)
else:
raise ValueError(f'Unsupported platform type {kind}')
settings = '\n '.join(
f' {key}: {val}'
for key, val in conn_settings.items()
if key != 'password'
)
logger.debug(f'{kind} {name} target connection settings:\n {settings}')
########################################################################
# Devlib Platform configuration
########################################################################
if not devlib_platform:
devlib_platform = devlib.platform.Platform()
########################################################################
# Create devlib Target object
########################################################################
target = devlib_target_cls(
platform=devlib_platform,
load_default_modules=False,
connection_settings=conn_settings,
working_directory=workdir,
connect=False,
)
target.connect(check_boot_completed=wait_boot, timeout=wait_boot_timeout)
# None as username means adb root will be attempted, but failure will
# not prevent from connecting to the target.
if kind == 'android' and username is None:
try:
target.adb_root(enable=True)
except Exception as e: # pylint: disable=broad-except
logger.warning(f'"adb root" failed: {e}')
logger.debug(f'Target info: {dict(abi=target.abi, cpuinfo=target.cpuinfo, workdir=target.working_directory)}')
target.setup()
logger.info(f"Connected to target {(name or '')}")
return target
def get_res_dir(self, name=None, append_time=True, symlink=True):
"""
Returns a directory managed by LISA to store results.
Usage of that function is reserved to interactive use or simple scripts.
Tests should not rely on that as the created folder will not be tracked
by any external entity, which means the results will be lost in some
automated environment.
:param name: Name of the results directory
:type name: str
:param append_time: If True, the current datetime will be appended to
the given ``name``. If ``name`` is None, the directory name will be
the current datetime.
:type append_time: bool
:param symlink: Create a symlink named ``results_latest`` to the newly
created results directory
:type symlink: bool
"""
if isinstance(self._res_dir, ArtifactPath):
root = self._res_dir.root
relative = self._res_dir.relative
else:
root = self._res_dir
relative = ''
return self._get_res_dir(
root=root,
relative=relative,
name=name,
append_time=append_time,
symlink=symlink,
)
def _get_res_dir(self, root, relative, name, append_time, symlink):
logger = self.get_logger()
while True:
time_str = datetime.now().strftime('%Y%m%d_%H%M%S.%f')
if not name:
name = time_str
elif append_time:
name = f"{name}-{time_str}"
# If we were given an ArtifactPath with an existing root, we
# preserve that root so it can be relocated as the caller wants it
res_dir = ArtifactPath(root, os.path.join(relative, name))
# Compute base installation path
logger.info(f'Creating result directory: {res_dir}')
# It will fail if the folder already exists. In that case,
# append_time should be used to ensure we get a unique name.
try:
os.makedirs(res_dir)
break
except FileExistsError:
# If the time is used in the name, there is some hope that the
# next time it will succeed
if append_time:
logger.info('Directory already exists, retrying ...')
continue
else:
raise
if symlink:
res_lnk = os.path.join(LISA_HOME, LATEST_LINK)
with contextlib.suppress(FileNotFoundError):
os.remove(res_lnk)
# There may be a race condition with another tool trying to create
# the link
with contextlib.suppress(FileExistsError):
os.symlink(res_dir, res_lnk)
return res_dir
def install_tools(self, tools):
"""
Install tools additional to those specified in the test config 'tools'
field
:param tools: The list of names of tools to install
:type tools: list(str)
"""
def bin_path(tool):
binary = os.path.join(ASSETS_PATH, 'binaries', self.abi, tool)
if not os.path.isfile(binary):
binary = os.path.join(ASSETS_PATH, 'binaries', 'scripts', tool)
return binary
tools = set(tools) - self._installed_tools
# TODO: compute the checksum of the tool + install location and keep
# that in _installed_tools, so we are sure to be correct
for tool in map(bin_path, tools):
self.target.install(tool)
self._installed_tools.add(tool)
@contextlib.contextmanager
def freeze_userspace(self):
"""
Context manager that lets you freeze the userspace.
.. note:: A number of situations prevent from freezing anything. When
that happens, a warning is logged but no exception is raised, so
it's a best-effort approach.
"""
logger = self.get_logger()
if not self.is_rooted:
logger.warning('Could not freeze userspace: target is not rooted')
cm = nullcontext
elif not self.is_module_available('cgroups'):
logger.warning('Could not freeze userspace: "cgroups" devlib module is necessary')
cm = nullcontext
else:
controllers = [s.name for s in self.cgroups.list_subsystems()]
if 'freezer' not in controllers:
logger.warning('Could not freeze userspace: freezer cgroup controller not available on the target')
cm = nullcontext
else:
exclude = copy.copy(self.CRITICAL_TASKS[self.target.os])
# Do not freeze the process in charge of de-freezing, otherwise we
# will freeze to death and a machine hard reboot will be required
if isinstance(self.target, devlib.LocalLinuxTarget):
exclude.append(str(os.getpid()))
@contextlib.contextmanager
def cm():
logger.info(f"Freezing all tasks except: {','.join(exclude)}")
try:
yield self.cgroups.freeze(exclude)
finally:
logger.info('Un-freezing userspace tasks')
self.cgroups.freeze(thaw=True)
with cm() as x:
yield x
@contextlib.contextmanager
def disable_idle_states(self):
"""
Context manager that lets you disable all idle states
"""
logger = self.get_logger()
logger.info('Disabling idle states for all domains')
try:
cpuidle = self.cpuidle
except AttributeError:
logger.warning('Could not disable idle states, cpuidle devlib module is not loaded')
cm = nullcontext
else:
@contextlib.contextmanager
def cm():
try:
for cpu in range(self.plat_info['cpus-count']):
cpuidle.disable_all(cpu)
yield
finally:
logger.info('Re-enabling idle states for all domains')
for cpu in range(self.plat_info['cpus-count']):
cpuidle.enable_all(cpu)
with cm() as x:
yield x
def get_tags(self):
return {'board': self.name}
@classmethod
def _make_remote_snippet(cls, name, code_str, module, kwargs, global_vars, out_tempfiles):
# Inject the parameters inside the wrapper's globals so that it can
# access them. It's harmless as they would shadow any global name
# anyway, and it's restricted to the wrapper using eval()
global_vars = {
**global_vars,
**kwargs,
}
# Treat the modules separately as they cannot be pickled
modules = {
name: mod
for name, mod in global_vars.items()
if isinstance(mod, ModuleType)
}
def can_include(f):
return (
isinstance(f, FunctionType) and
# Only allow inlining of functions defined in the same module so that:
# 1. there is no name clash risk
# 2. we don't inline the whole world, which could lead to a
# number of problems that could appear after another module
# is updated or so. We only inline local things that are in
# direct control
f.__module__ == module
)
def add_func(f, name):
# Disallow decorated functions since their definition depends on
# external callable we cannot control
if hasattr(f, '__wrapped__'):
raise TypeError('Decorated functions cannot be called from remote functions')
closure_vars = {
name: val
for var_dct in inspect.getclosurevars(f)
if isinstance(var_dct, Mapping)
for name, val in var_dct.items()
}
funcs[name] = (f, cls._get_code(f)[1])
for _name, _f in closure_vars.items():
if _f is not f and can_include(_f):
add_func(_f, _name)
modules.update(
(name, mod)
for name, mod in closure_vars.items()
if isinstance(mod, ModuleType)
)
funcs = {}
for f_name, f in global_vars.items():
if can_include(f):
add_func(f, f_name)
code_str += '\n' + '\n'.join(map(itemgetter(1), funcs.values()))
non_pickled = set(modules.keys()) | set(funcs.keys())
global_vars = {
name: val
for name, val in global_vars.items()
if name not in non_pickled
}
if modules:
modules = f"import {', '.join(sorted(modules))}"
else:
modules = ''
script = textwrap.dedent('''
import pickle
import sys
def wrapper():
{modules}
{code}
return {f}({kwargs})
try:
out = eval(wrapper.__code__, pickle.loads({globals}))
except BaseException as e:
out = e
out_is_excep = True
else:
out_is_excep = False
out = pickle.dumps(out)
out_tempfile = {out_tempfiles}[1] if out_is_excep else {out_tempfiles}[0]
with open(out_tempfile, 'wb') as f:
f.write(out)
''').format(
f=name,
code=textwrap.dedent(code_str).replace('\n', '\n' + ' ' * 4),
modules=modules,
out_tempfiles=repr(out_tempfiles),
globals=repr(pickle.dumps(global_vars)),
kwargs=', '.join(
f'{name}={name}'
for name in kwargs.keys()
)
)
return script
@staticmethod
def _get_code(f):
lines, _ = inspect.getsourcelines(f)
# Remove decorators, as they are either undefined or just were used to
# feed the function to us
lines = [
line
for line in lines
if not line.strip().startswith('@')
]
code_str = textwrap.dedent(''.join(lines))
name = f.__name__
return (name, code_str)
def execute_python(self, f, args, kwargs, **execute_kwargs):
"""
Executes the given Python function ``f`` with the provided positional
and keyword arguments.
The return value or any exception is pickled back and is
returned/raised in the host caller.
:Variable keyword arguments: Forwarded to :meth:`execute` that
will spawn the Python interpreter on the target
.. note:: Closure variables are supported, but mutating them will not
be reflected in the caller's context. Also, functions that are
referred to will be:
* bundled in the script if it is defined in the same module
* referred to by name, assuming it comes from a module that is
installed on the target and that this module is in scope. If
that is not the case, a :exc:`NameError` will be raised.
.. attention:: Decorators are ignored and not applied.
"""
sig = inspect.signature(f)
kwargs = sig.bind(*args, **kwargs).arguments
closure_vars = inspect.getclosurevars(f)
name, code_str = self._get_code(f)
def mktemp():
return self.execute(
f'mktemp -p {shlex.quote(self.working_directory)}'
).strip()
def read_output(path):
with tempfile.TemporaryDirectory() as d:
name = os.path.join(d, 'out')
self.pull(path, name)
with open(name, 'rb') as f:
return pickle.loads(f.read())
def parse_output(paths, err):
val, excep = paths
try:
return read_output(val)
# If the file is empty, we probably got an exception
except EOFError:
# pylint: disable=raise-missing-from
try:
excep = read_output(excep)
# If we can't even read the exception, raise the initial one
# from devlib
except EOFError:
raise err if err is not None else ValueError('No exception was raised or value returned by the function')
else:
raise excep
out_tempfiles = tuple()
try:
out_tempfiles = (mktemp(), mktemp())
snippet = self._make_remote_snippet(
name=name,
code_str=code_str,
module=f.__module__,
kwargs=kwargs,
global_vars={
**closure_vars.globals,
**closure_vars.nonlocals,
},
out_tempfiles=out_tempfiles
)
cmd = ['python3', '-c', snippet]
cmd = ' '.join(map(shlex.quote, cmd))
try:
self.execute(cmd, **execute_kwargs)
except Exception as e: # pylint: disable=broad-except
err = e
else:
err = None
return parse_output(out_tempfiles, err)
finally:
for path in out_tempfiles:
self.remove(path)
def remote_func(self, **kwargs):
"""
Decorates a given function to execute remotely using
:meth:`execute_python`::
target = Target(...)
@target.remote_func(timeout=42)
def foo(x, y):
return x + y
# Execute the function on the target transparently
val = foo(1, y=2)
:Variable keyword arguments: Forwarded to :meth:`execute` that
will spawn the Python interpreter on the target
"""
def wrapper_param(f):
@functools.wraps(f)
def wrapper(*f_args, **f_kwargs):
return self.execute_python(f, f_args, f_kwargs, **kwargs)
return wrapper
return wrapper_param
class Gem5SimulationPlatformWrapper(Gem5SimulationPlatform):
def __init__(self, system, simulator, **kwargs):
simulator_args = copy.copy(simulator.get('args', []))
system_platform = system['platform']
# Get gem5 binary arguments
simulator_args.append('--listener-mode=on')
simulator_args.append(system_platform['description'])
simulator_args.extend(system_platform.get('args', []))
simulator_args.extend((
f"--kernel {system['kernel']}",
f"--dtb {system['dtb']}",
f"--disk-image {system['disk']}"
))
diod_path = which('diod')
if diod_path is None:
raise RuntimeError('Failed to find "diod" on your host machine, check your installation or your PATH variable')
# Setup virtio
# Brackets are there to let the output dir be created automatically
virtio_args = [
f'--which-diod={diod_path}',
'--workload-automation-vio={}',
]
simulator_args.extend(virtio_args)
# Quote/escape arguments and build the command line
gem5_args = ' '.join(shlex.quote(a) for a in simulator_args)
super().__init__(
gem5_args=gem5_args,
gem5_bin=simulator['bin'],
**kwargs
)
# vim :set tabstop=4 shiftwidth=4 expandtab textwidth=80
| 1.640625 | 2 |
iota/commands/core/get_node_info.py | EasonC13/iota.py | 347 | 1660 | import filters as f
from iota import TransactionHash, Address
from iota.commands import FilterCommand, RequestFilter, ResponseFilter
from iota.filters import Trytes
__all__ = [
'GetNodeInfoCommand',
]
class GetNodeInfoCommand(FilterCommand):
"""
Executes `getNodeInfo` command.
See :py:meth:`iota.api.StrictIota.get_node_info`.
"""
command = 'getNodeInfo'
def get_request_filter(self):
return GetNodeInfoRequestFilter()
def get_response_filter(self):
return GetNodeInfoResponseFilter()
class GetNodeInfoRequestFilter(RequestFilter):
def __init__(self) -> None:
# ``getNodeInfo`` does not accept any parameters.
# Using a filter here just to enforce that the request is empty.
super(GetNodeInfoRequestFilter, self).__init__({})
class GetNodeInfoResponseFilter(ResponseFilter):
def __init__(self) -> None:
super(GetNodeInfoResponseFilter, self).__init__({
'coordinatorAddress':
f.ByteString(encoding='ascii') | Trytes(Address),
'latestMilestone':
f.ByteString(encoding='ascii') | Trytes(TransactionHash),
'latestSolidSubtangleMilestone':
f.ByteString(encoding='ascii') | Trytes(TransactionHash),
})
| 2.34375 | 2 |
Aplicacion/Presentacion/views.py | Juandiegordp/TPI | 0 | 1661 | from Negocio import controller
import forms, functions
from flask import Flask, render_template, request, redirect, url_for, flash
def register(mysql, request):
registerForm= forms.RegisterForm(request.form)
if request.method == 'POST' and registerForm.validate():
return controller.registraUsuario(mysql, request, registerForm)
return render_template('register.html', form=registerForm)
def Index(mysql, request):
if request.method=='GET':
success= request.args.get('success')
if success==None:
if controller.usuarioIniciado():
return redirect(url_for('home'))
else:
return render_template('Index.html')
else:
return render_template('Index.html', success=success)
return render_template('Index.html')
def home(mysql, request):
if request.method== 'POST':
controller.iniciarSesion(mysql, request)
if controller.usuarioIniciado() and request.method== 'GET':
return controller.mostrarRutinas(mysql, request)
else:
return redirect(url_for('Index'))
def historial_rutina(mysql, request):
if controller.usuarioIniciado() and request.method== 'GET':
return controller.mostrar_historial_rutina(mysql, request)
else:
return redirect(url_for('Index'))
def historial_usuario(mysql, request):
if controller.usuarioIniciado() and request.method== 'GET':
return controller.mostrar_historial_usuario(mysql, request)
else:
return redirect(url_for('Index'))
def perfil(mysql, request):
if controller.usuarioIniciado and request.method=='GET':
success= request.args.get('success')
usuario=controller.datosUsuario(mysql, request)
imc=functions.IMC(usuario[8], usuario[7])
m_basal= controller.calcular_metabolismo_basal(mysql, usuario[7], usuario[8])
return render_template('perfil.html', success=success, usuario=usuario, imc=imc, evaluacion=functions.evaluarIMC(imc), pg=functions.porcentajeGrasa(usuario[5], usuario[9], usuario[10], usuario[7], usuario[11]), m_basal=m_basal )
else:
return redirect(url_for('Index'))
def ActualizarPerfil(mysql, request):
actualize_form= forms.PerfilForm(request.form)
if request.method == 'POST' and controller.usuarioIniciado:
if actualize_form.validate():
return controller.actualizar_perfil(mysql, request)
else:
flash("Alguno de los datos es incorrecto")
return redirect(url_for('actualizar_perfil', success=False))
else:
if request.method == 'GET' and controller.usuarioIniciado:
datos=controller.formulario_perfil(mysql)
return render_template('actualizar_perfil.html', form=actualize_form, datos=datos)
return redirect(url_for('perfil'))
def administracionRutinas(mysql, request):
if controller.usuarioIniciado():
return render_template('administracion_rutinas.html')
else:
return redirect(url_for('Index'))
def crearRutina(mysql, request):
if request.method =='POST' and controller.usuarioIniciado():
return controller.agregarRutina(mysql, request)
else:
if controller.rutinaIniciada() and controller.usuarioIniciado():
return controller.rutinaEnCurso(mysql, request)
if controller.usuarioIniciado():
return redirect(url_for('adm_rutinas'))
else:
return redirect(url_for('Index'))
def registrarEjerciciosRutina(mysql, request):
if request.method == 'POST':
return controller.registrarEjerciciosRutina(mysql, request)
return redirect(url_for('adm_rutinas'))
def modificarRutina(mysql, request):
if controller.usuarioIniciado():
rutinas=controller.rutinasUsuario(mysql)
rutinaEjercicios=controller.rutinaEjercicios(mysql)
datosEjer=controller.datosEjercicios(mysql)
return render_template('modify_rutina.html', rutinas=rutinas , ejercicios=datosEjer, rutinaEjer=rutinaEjercicios)
else:
return redirect(url_for('Index'))
def registrarModiciaciones(mysql, request):
if request.method == 'POST':
return controller.registrarModificaciones(mysql, request)
return redirect(url_for('adm_rutinas'))
def eliminarRutina(mysql,request):
if controller.usuarioIniciado():
rutinas=controller.rutinasUsuario(mysql)
rutinaEjercicios=controller.rutinaEjercicios(mysql)
return render_template('delete_rutina.html', rutinas=rutinas , rutinaEjer=rutinaEjercicios)
else:
return redirect(url_for('Index'))
def registrarEliminacion(mysql, request):
if request.method=='POST' and controller.usuarioIniciado():
return controller.registrarEliminacion(mysql, request)
else:
return redirect(url_for('Index'))
def registrarEjercicios(mysql, request):
if request.method == 'POST':
return controller.registrarEjercicio(mysql, request)
return redirect(url_for('ejercicios')) | 2.625 | 3 |
evalme/tests/test_old_format.py | heartexlabs/label-studio-evalme | 3 | 1662 | <filename>evalme/tests/test_old_format.py
from evalme.matcher import Matcher
def test_old_format_agreement_matrix():
m = Matcher(new_format=False)
m.load(r"./tests/test_data/test_old_format.json")
matrix = m.get_annotations_agreement()
assert matrix is not None
assert matrix > 0
def test_old_format_load():
m = Matcher(new_format=False)
m.load(r"./tests/test_data/test_old_format.json")
assert m._new_format is False
assert m._result_name == 'completions'
def test_new_format_load():
m = Matcher(new_format=False)
m.load(r"./tests/test_data/test_bbox.json")
assert m._new_format is True
assert m._result_name == 'annotations'
| 2.546875 | 3 |
behave/runner.py | wombat70/behave | 13 | 1663 | # -*- coding: UTF-8 -*-
"""
This module provides Runner class to run behave feature files (or model elements).
"""
from __future__ import absolute_import, print_function, with_statement
import contextlib
import os.path
import sys
import warnings
import weakref
import six
from behave._types import ExceptionUtil
from behave.capture import CaptureController
from behave.exception import ConfigError
from behave.formatter._registry import make_formatters
from behave.runner_util import \
collect_feature_locations, parse_features, \
exec_file, load_step_modules, PathManager
from behave.step_registry import registry as the_step_registry
from enum import Enum
if six.PY2:
# -- USE PYTHON3 BACKPORT: With unicode traceback support.
import traceback2 as traceback
else:
import traceback
class CleanupError(RuntimeError):
pass
class ContextMaskWarning(UserWarning):
"""Raised if a context variable is being overwritten in some situations.
If the variable was originally set by user code then this will be raised if
*behave* overwrites the value.
If the variable was originally set by *behave* then this will be raised if
user code overwrites the value.
"""
pass
class ContextMode(Enum):
"""Used to distinguish between the two usage modes while using the context:
* BEHAVE: Indicates "behave" (internal) mode
* USER: Indicates "user" mode (in steps, hooks, fixtures, ...)
"""
BEHAVE = 1
USER = 2
class Context(object):
"""Hold contextual information during the running of tests.
This object is a place to store information related to the tests you're
running. You may add arbitrary attributes to it of whatever value you need.
During the running of your tests the object will have additional layers of
namespace added and removed automatically. There is a "root" namespace and
additional namespaces for features and scenarios.
Certain names are used by *behave*; be wary of using them yourself as
*behave* may overwrite the value you set. These names are:
.. attribute:: feature
This is set when we start testing a new feature and holds a
:class:`~behave.model.Feature`. It will not be present outside of a
feature (i.e. within the scope of the environment before_all and
after_all).
.. attribute:: scenario
This is set when we start testing a new scenario (including the
individual scenarios of a scenario outline) and holds a
:class:`~behave.model.Scenario`. It will not be present outside of the
scope of a scenario.
.. attribute:: tags
The current set of active tags (as a Python set containing instances of
:class:`~behave.model.Tag` which are basically just glorified strings)
combined from the feature and scenario. This attribute will not be
present outside of a feature scope.
.. attribute:: aborted
This is set to true in the root namespace when the user aborts a test run
(:exc:`KeyboardInterrupt` exception). Initially: False.
.. attribute:: failed
This is set to true in the root namespace as soon as a step fails.
Initially: False.
.. attribute:: table
This is set at the step level and holds any :class:`~behave.model.Table`
associated with the step.
.. attribute:: text
This is set at the step level and holds any multiline text associated
with the step.
.. attribute:: config
The configuration of *behave* as determined by configuration files and
command-line options. The attributes of this object are the same as the
`configuration file section names`_.
.. attribute:: active_outline
This is set for each scenario in a scenario outline and references the
:class:`~behave.model.Row` that is active for the current scenario. It is
present mostly for debugging, but may be useful otherwise.
.. attribute:: log_capture
If logging capture is enabled then this attribute contains the captured
logging as an instance of :class:`~behave.log_capture.LoggingCapture`.
It is not present if logging is not being captured.
.. attribute:: stdout_capture
If stdout capture is enabled then this attribute contains the captured
output as a StringIO instance. It is not present if stdout is not being
captured.
.. attribute:: stderr_capture
If stderr capture is enabled then this attribute contains the captured
output as a StringIO instance. It is not present if stderr is not being
captured.
A :class:`behave.runner.ContextMaskWarning` warning will be raised if user
code attempts to overwrite one of these variables, or if *behave* itself
tries to overwrite a user-set variable.
You may use the "in" operator to test whether a certain value has been set
on the context, for example:
"feature" in context
checks whether there is a "feature" value in the context.
Values may be deleted from the context using "del" but only at the level
they are set. You can't delete a value set by a feature at a scenario level
but you can delete a value set for a scenario in that scenario.
.. _`configuration file section names`: behave.html#configuration-files
"""
# pylint: disable=too-many-instance-attributes
FAIL_ON_CLEANUP_ERRORS = True
def __init__(self, runner):
self._runner = weakref.proxy(runner)
self._config = runner.config
d = self._root = {
"aborted": False,
"failed": False,
"config": self._config,
"active_outline": None,
"cleanup_errors": 0,
"@cleanups": [], # -- REQUIRED-BY: before_all() hook
"@layer": "testrun",
}
self._stack = [d]
self._record = {}
self._origin = {}
self._mode = ContextMode.BEHAVE
# -- MODEL ENTITY REFERENCES/SUPPORT:
self.feature = None
# DISABLED: self.rule = None
# DISABLED: self.scenario = None
self.text = None
self.table = None
# -- RUNTIME SUPPORT:
self.stdout_capture = None
self.stderr_capture = None
self.log_capture = None
self.fail_on_cleanup_errors = self.FAIL_ON_CLEANUP_ERRORS
@staticmethod
def ignore_cleanup_error(context, cleanup_func, exception):
pass
@staticmethod
def print_cleanup_error(context, cleanup_func, exception):
cleanup_func_name = getattr(cleanup_func, "__name__", None)
if not cleanup_func_name:
cleanup_func_name = "%r" % cleanup_func
print(u"CLEANUP-ERROR in %s: %s: %s" %
(cleanup_func_name, exception.__class__.__name__, exception))
traceback.print_exc(file=sys.stdout)
# MAYBE: context._dump(pretty=True, prefix="Context: ")
# -- MARK: testrun as FAILED
# context._set_root_attribute("failed", True)
def _do_cleanups(self):
"""Execute optional cleanup functions when stack frame is popped.
A user can add a user-specified handler for cleanup errors.
.. code-block:: python
# -- FILE: features/environment.py
def cleanup_database(database):
pass
def handle_cleanup_error(context, cleanup_func, exception):
pass
def before_all(context):
context.on_cleanup_error = handle_cleanup_error
context.add_cleanup(cleanup_database, the_database)
"""
# -- BEST-EFFORT ALGORITHM: Tries to perform all cleanups.
assert self._stack, "REQUIRE: Non-empty stack"
current_layer = self._stack[0]
cleanup_funcs = current_layer.get("@cleanups", [])
on_cleanup_error = getattr(self, "on_cleanup_error",
self.print_cleanup_error)
context = self
cleanup_errors = []
for cleanup_func in reversed(cleanup_funcs):
try:
cleanup_func()
except Exception as e: # pylint: disable=broad-except
# pylint: disable=protected-access
context._root["cleanup_errors"] += 1
cleanup_errors.append(sys.exc_info())
on_cleanup_error(context, cleanup_func, e)
if self.fail_on_cleanup_errors and cleanup_errors:
first_cleanup_erro_info = cleanup_errors[0]
del cleanup_errors # -- ENSURE: Release other exception frames.
six.reraise(*first_cleanup_erro_info)
def _push(self, layer_name=None):
"""Push a new layer on the context stack.
HINT: Use layer_name values: "scenario", "feature", "testrun".
:param layer_name: Layer name to use (or None).
"""
initial_data = {"@cleanups": []}
if layer_name:
initial_data["@layer"] = layer_name
self._stack.insert(0, initial_data)
def _pop(self):
"""Pop the current layer from the context stack.
Performs any pending cleanups, registered for this layer.
"""
try:
self._do_cleanups()
finally:
# -- ENSURE: Layer is removed even if cleanup-errors occur.
self._stack.pop(0)
def _use_with_behave_mode(self):
"""Provides a context manager for using the context in BEHAVE mode."""
return use_context_with_mode(self, ContextMode.BEHAVE)
def use_with_user_mode(self):
"""Provides a context manager for using the context in USER mode."""
return use_context_with_mode(self, ContextMode.USER)
def user_mode(self):
warnings.warn("Use 'use_with_user_mode()' instead",
PendingDeprecationWarning, stacklevel=2)
return self.use_with_user_mode()
def _set_root_attribute(self, attr, value):
for frame in self.__dict__["_stack"]:
if frame is self.__dict__["_root"]:
continue
if attr in frame:
record = self.__dict__["_record"][attr]
params = {
"attr": attr,
"filename": record[0],
"line": record[1],
"function": record[3],
}
self._emit_warning(attr, params)
self.__dict__["_root"][attr] = value
if attr not in self._origin:
self._origin[attr] = self._mode
def _emit_warning(self, attr, params):
msg = ""
if self._mode is ContextMode.BEHAVE and self._origin[attr] is not ContextMode.BEHAVE:
msg = "behave runner is masking context attribute '%(attr)s' " \
"originally set in %(function)s (%(filename)s:%(line)s)"
elif self._mode is ContextMode.USER:
if self._origin[attr] is not ContextMode.USER:
msg = "user code is masking context attribute '%(attr)s' " \
"originally set by behave"
elif self._config.verbose:
msg = "user code is masking context attribute " \
"'%(attr)s'; see the tutorial for what this means"
if msg:
msg = msg % params
warnings.warn(msg, ContextMaskWarning, stacklevel=3)
def _dump(self, pretty=False, prefix=" "):
for level, frame in enumerate(self._stack):
print("%sLevel %d" % (prefix, level))
if pretty:
for name in sorted(frame.keys()):
value = frame[name]
print("%s %-15s = %r" % (prefix, name, value))
else:
print(prefix + repr(frame))
def __getattr__(self, attr):
if attr[0] == "_":
try:
return self.__dict__[attr]
except KeyError:
raise AttributeError(attr)
for frame in self._stack:
if attr in frame:
return frame[attr]
msg = "'{0}' object has no attribute '{1}'"
msg = msg.format(self.__class__.__name__, attr)
raise AttributeError(msg)
def __setattr__(self, attr, value):
if attr[0] == "_":
self.__dict__[attr] = value
return
for frame in self._stack[1:]:
if attr in frame:
record = self._record[attr]
params = {
"attr": attr,
"filename": record[0],
"line": record[1],
"function": record[3],
}
self._emit_warning(attr, params)
stack_limit = 2
if six.PY2:
stack_limit += 1 # Due to traceback2 usage.
stack_frame = traceback.extract_stack(limit=stack_limit)[0]
self._record[attr] = stack_frame
frame = self._stack[0]
frame[attr] = value
if attr not in self._origin:
self._origin[attr] = self._mode
def __delattr__(self, attr):
frame = self._stack[0]
if attr in frame:
del frame[attr]
del self._record[attr]
else:
msg = "'{0}' object has no attribute '{1}' at the current level"
msg = msg.format(self.__class__.__name__, attr)
raise AttributeError(msg)
def __contains__(self, attr):
if attr[0] == "_":
return attr in self.__dict__
for frame in self._stack:
if attr in frame:
return True
return False
def execute_steps(self, steps_text):
"""The steps identified in the "steps" text string will be parsed and
executed in turn just as though they were defined in a feature file.
If the execute_steps call fails (either through error or failure
assertion) then the step invoking it will need to catch the resulting
exceptions.
:param steps_text: Text with the Gherkin steps to execute (as string).
:returns: True, if the steps executed successfully.
:raises: AssertionError, if a step failure occurs.
:raises: ValueError, if invoked without a feature context.
"""
assert isinstance(steps_text, six.text_type), "Steps must be unicode."
if not self.feature:
raise ValueError("execute_steps() called outside of feature")
# -- PREPARE: Save original context data for current step.
# Needed if step definition that called this method uses .table/.text
original_table = getattr(self, "table", None)
original_text = getattr(self, "text", None)
self.feature.parser.variant = "steps"
steps = self.feature.parser.parse_steps(steps_text)
with self._use_with_behave_mode():
for step in steps:
passed = step.run(self._runner, quiet=True, capture=False)
if not passed:
# -- ISSUE #96: Provide more substep info to diagnose problem.
step_line = u"%s %s" % (step.keyword, step.name)
message = "%s SUB-STEP: %s" % \
(step.status.name.upper(), step_line)
if step.error_message:
message += "\nSubstep info: %s\n" % step.error_message
message += u"Traceback (of failed substep):\n"
message += u"".join(traceback.format_tb(step.exc_traceback))
# message += u"\nTraceback (of context.execute_steps()):"
assert False, message
# -- FINALLY: Restore original context data for current step.
self.table = original_table
self.text = original_text
return True
def add_cleanup(self, cleanup_func, *args, **kwargs):
"""Adds a cleanup function that is called when :meth:`Context._pop()`
is called. This is intended for user-cleanups.
:param cleanup_func: Callable function
:param args: Args for cleanup_func() call (optional).
:param kwargs: Kwargs for cleanup_func() call (optional).
"""
# MAYBE:
assert callable(cleanup_func), "REQUIRES: callable(cleanup_func)"
assert self._stack
if args or kwargs:
def internal_cleanup_func():
cleanup_func(*args, **kwargs)
else:
internal_cleanup_func = cleanup_func
current_frame = self._stack[0]
if cleanup_func not in current_frame["@cleanups"]:
# -- AVOID DUPLICATES:
current_frame["@cleanups"].append(internal_cleanup_func)
@contextlib.contextmanager
def use_context_with_mode(context, mode):
"""Switch context to ContextMode.BEHAVE or ContextMode.USER mode.
Provides a context manager for switching between the two context modes.
.. sourcecode:: python
context = Context()
with use_context_with_mode(context, ContextMode.BEHAVE):
... # Do something
# -- POSTCONDITION: Original context._mode is restored.
:param context: Context object to use.
:param mode: Mode to apply to context object.
"""
# pylint: disable=protected-access
assert mode in (ContextMode.BEHAVE, ContextMode.USER)
current_mode = context._mode
try:
context._mode = mode
yield
finally:
# -- RESTORE: Initial current_mode
# Even if an AssertionError/Exception is raised.
context._mode = current_mode
@contextlib.contextmanager
def scoped_context_layer(context, layer_name=None):
"""Provides context manager for context layer (push/do-something/pop cycle).
.. code-block::
with scoped_context_layer(context):
the_fixture = use_fixture(foo, context, name="foo_42")
"""
# pylint: disable=protected-access
try:
context._push(layer_name)
yield context
finally:
context._pop()
def path_getrootdir(path):
"""
Extract rootdir from path in a platform independent way.
POSIX-PATH EXAMPLE:
rootdir = path_getrootdir("/foo/bar/one.feature")
assert rootdir == "/"
WINDOWS-PATH EXAMPLE:
rootdir = path_getrootdir("D:\\foo\\bar\\one.feature")
assert rootdir == r"D:\"
"""
drive, _ = os.path.splitdrive(path)
if drive:
# -- WINDOWS:
return drive + os.path.sep
# -- POSIX:
return os.path.sep
class ModelRunner(object):
"""
Test runner for a behave model (features).
Provides the core functionality of a test runner and
the functional API needed by model elements.
.. attribute:: aborted
This is set to true when the user aborts a test run
(:exc:`KeyboardInterrupt` exception). Initially: False.
Stored as derived attribute in :attr:`Context.aborted`.
"""
# pylint: disable=too-many-instance-attributes
def __init__(self, config, features=None, step_registry=None):
self.config = config
self.features = features or []
self.hooks = {}
self.formatters = []
self.undefined_steps = []
self.step_registry = step_registry
self.capture_controller = CaptureController(config)
self.context = None
self.feature = None
self.hook_failures = 0
# @property
def _get_aborted(self):
value = False
if self.context:
value = self.context.aborted
return value
# @aborted.setter
def _set_aborted(self, value):
# pylint: disable=protected-access
assert self.context, "REQUIRE: context, but context=%r" % self.context
self.context._set_root_attribute("aborted", bool(value))
aborted = property(_get_aborted, _set_aborted,
doc="Indicates that test run is aborted by the user.")
def run_hook(self, name, context, *args):
if not self.config.dry_run and (name in self.hooks):
try:
with context.use_with_user_mode():
self.hooks[name](context, *args)
# except KeyboardInterrupt:
# self.aborted = True
# if name not in ("before_all", "after_all"):
# raise
except Exception as e: # pylint: disable=broad-except
# -- HANDLE HOOK ERRORS:
use_traceback = False
if self.config.verbose:
use_traceback = True
ExceptionUtil.set_traceback(e)
extra = u""
if "tag" in name:
extra = "(tag=%s)" % args[0]
error_text = ExceptionUtil.describe(e, use_traceback).rstrip()
error_message = u"HOOK-ERROR in %s%s: %s" % (name, extra, error_text)
print(error_message)
self.hook_failures += 1
if "tag" in name:
# -- SCENARIO or FEATURE
statement = getattr(context, "scenario", context.feature)
elif "all" in name:
# -- ABORT EXECUTION: For before_all/after_all
self.aborted = True
statement = None
else:
# -- CASE: feature, scenario, step
statement = args[0]
if statement:
# -- CASE: feature, scenario, step
statement.hook_failed = True
if statement.error_message:
# -- NOTE: One exception/failure is already stored.
# Append only error message.
statement.error_message += u"\n"+ error_message
else:
# -- FIRST EXCEPTION/FAILURE:
statement.store_exception_context(e)
statement.error_message = error_message
def setup_capture(self):
if not self.context:
self.context = Context(self)
self.capture_controller.setup_capture(self.context)
def start_capture(self):
self.capture_controller.start_capture()
def stop_capture(self):
self.capture_controller.stop_capture()
def teardown_capture(self):
self.capture_controller.teardown_capture()
def run_model(self, features=None):
# pylint: disable=too-many-branches
if not self.context:
self.context = Context(self)
if self.step_registry is None:
self.step_registry = the_step_registry
if features is None:
features = self.features
# -- ENSURE: context.execute_steps() works in weird cases (hooks, ...)
context = self.context
self.hook_failures = 0
self.setup_capture()
self.run_hook("before_all", context)
run_feature = not self.aborted
failed_count = 0
undefined_steps_initial_size = len(self.undefined_steps)
for feature in features:
if run_feature:
try:
self.feature = feature
for formatter in self.formatters:
formatter.uri(feature.filename)
failed = feature.run(self)
if failed:
failed_count += 1
if self.config.stop or self.aborted:
# -- FAIL-EARLY: After first failure.
run_feature = False
except KeyboardInterrupt:
self.aborted = True
failed_count += 1
run_feature = False
# -- ALWAYS: Report run/not-run feature to reporters.
# REQUIRED-FOR: Summary to keep track of untested features.
for reporter in self.config.reporters:
reporter.feature(feature)
# -- AFTER-ALL:
# pylint: disable=protected-access, broad-except
cleanups_failed = False
self.run_hook("after_all", self.context)
try:
self.context._do_cleanups() # Without dropping the last context layer.
except Exception:
cleanups_failed = True
if self.aborted:
print("\nABORTED: By user.")
for formatter in self.formatters:
formatter.close()
for reporter in self.config.reporters:
reporter.end()
failed = ((failed_count > 0) or self.aborted or (self.hook_failures > 0)
or (len(self.undefined_steps) > undefined_steps_initial_size)
or cleanups_failed)
# XXX-MAYBE: or context.failed)
return failed
def run(self):
"""
Implements the run method by running the model.
"""
self.context = Context(self)
return self.run_model()
class Runner(ModelRunner):
"""
Standard test runner for behave:
* setup paths
* loads environment hooks
* loads step definitions
* select feature files, parses them and creates model (elements)
"""
def __init__(self, config):
super(Runner, self).__init__(config)
self.path_manager = PathManager()
self.base_dir = None
def setup_paths(self):
# pylint: disable=too-many-branches, too-many-statements
if self.config.paths:
if self.config.verbose:
print("Supplied path:", \
", ".join('"%s"' % path for path in self.config.paths))
first_path = self.config.paths[0]
if hasattr(first_path, "filename"):
# -- BETTER: isinstance(first_path, FileLocation):
first_path = first_path.filename
base_dir = first_path
if base_dir.startswith("@"):
# -- USE: behave @features.txt
base_dir = base_dir[1:]
file_locations = self.feature_locations()
if file_locations:
base_dir = os.path.dirname(file_locations[0].filename)
base_dir = os.path.abspath(base_dir)
# supplied path might be to a feature file
if os.path.isfile(base_dir):
if self.config.verbose:
print("Primary path is to a file so using its directory")
base_dir = os.path.dirname(base_dir)
else:
if self.config.verbose:
print('Using default path "./features"')
base_dir = os.path.abspath("features")
# Get the root. This is not guaranteed to be "/" because Windows.
root_dir = path_getrootdir(base_dir)
new_base_dir = base_dir
steps_dir = self.config.steps_dir
environment_file = self.config.environment_file
while True:
if self.config.verbose:
print("Trying base directory:", new_base_dir)
if os.path.isdir(os.path.join(new_base_dir, steps_dir)):
break
if os.path.isfile(os.path.join(new_base_dir, environment_file)):
break
if new_base_dir == root_dir:
break
new_base_dir = os.path.dirname(new_base_dir)
if new_base_dir == root_dir:
if self.config.verbose:
if not self.config.paths:
print('ERROR: Could not find "%s" directory. '\
'Please specify where to find your features.' % \
steps_dir)
else:
print('ERROR: Could not find "%s" directory in your '\
'specified path "%s"' % (steps_dir, base_dir))
message = 'No %s directory in %r' % (steps_dir, base_dir)
raise ConfigError(message)
base_dir = new_base_dir
self.config.base_dir = base_dir
for dirpath, dirnames, filenames in os.walk(base_dir, followlinks=True):
if [fn for fn in filenames if fn.endswith(".feature")]:
break
else:
if self.config.verbose:
if not self.config.paths:
print('ERROR: Could not find any "<name>.feature" files. '\
'Please specify where to find your features.')
else:
print('ERROR: Could not find any "<name>.feature" files '\
'in your specified path "%s"' % base_dir)
raise ConfigError('No feature files in %r' % base_dir)
self.base_dir = base_dir
self.path_manager.add(base_dir)
if not self.config.paths:
self.config.paths = [base_dir]
if base_dir != os.getcwd():
self.path_manager.add(os.getcwd())
def before_all_default_hook(self, context):
"""
Default implementation for :func:`before_all()` hook.
Setup the logging subsystem based on the configuration data.
"""
# pylint: disable=no-self-use
context.config.setup_logging()
def load_hooks(self, filename=None):
filename = filename or self.config.environment_file
hooks_path = os.path.join(self.base_dir, filename)
if os.path.exists(hooks_path):
exec_file(hooks_path, self.hooks)
if "before_all" not in self.hooks:
self.hooks["before_all"] = self.before_all_default_hook
def load_step_definitions(self, extra_step_paths=None):
if extra_step_paths is None:
extra_step_paths = []
# -- Allow steps to import other stuff from the steps dir
# NOTE: Default matcher can be overridden in "environment.py" hook.
steps_dir = os.path.join(self.base_dir, self.config.steps_dir)
step_paths = [steps_dir] + list(extra_step_paths)
load_step_modules(step_paths)
def feature_locations(self):
return collect_feature_locations(self.config.paths)
def run(self):
with self.path_manager:
self.setup_paths()
return self.run_with_paths()
def run_with_paths(self):
self.context = Context(self)
self.load_hooks()
self.load_step_definitions()
# -- ENSURE: context.execute_steps() works in weird cases (hooks, ...)
# self.setup_capture()
# self.run_hook("before_all", self.context)
# -- STEP: Parse all feature files (by using their file location).
feature_locations = [filename for filename in self.feature_locations()
if not self.config.exclude(filename)]
features = parse_features(feature_locations, language=self.config.lang)
self.features.extend(features)
# -- STEP: Run all features.
stream_openers = self.config.outputs
self.formatters = make_formatters(self.config, stream_openers)
return self.run_model()
| 2.359375 | 2 |
01_P/P_2_1_1_02/main.py | genfifth/generative-design_Code-Package-Python-Mode | 1 | 1664 | add_library('pdf')
import random
from datetime import datetime
tileCount = 20
def setup():
global savePDF, actStrokeCap, actRandomSeed, colorLeft, colorRight, alphaLeft, alphaRight
savePDF = False
actStrokeCap = ROUND
actRandomSeed = 0
colorLeft = color(197, 0, 123)
colorRight = color(87, 35, 129)
alphaLeft = 100
alphaRight = 100
def draw():
global savePDF, actStrokeCap, actRandomSeed, colorLeft, colorRight, alphaLeft, alphaRight
if savePDF:
beginRecord(PDF, datetime.now().strftime("%Y%m%d%H%M%S")+".pdf")
background(255)
smooth()
noFill()
strokeCap(actStrokeCap)
random.seed(actRandomSeed)
for gridY in range(tileCount):
for gridX in range(tileCount):
posX = int(width/tileCount*gridX)
posY = int(height/tileCount*gridY)
toggle = random.randint(0,1)
if (toggle == 0):
strokeWeight(mouseX/20)
stroke(colorLeft, alphaLeft)
line(posX, posY, posX+width/tileCount, posY+height/tileCount)
elif (toggle == 1):
strokeWeight(mouseY/20)
stroke(colorRight, alphaRight)
line(posX, posY+width/tileCount, posX+height/tileCount, posY)
if (savePDF):
savePDF = False
endRecord()
def mousePressed():
global savePDF, actStrokeCap, actRandomSeed, colorLeft, colorRight, alphaLeft, alphaRight
actRandomSeed = random.randint(0, 100000)
def keyReleased():
global savePDF, actStrokeCap, actRandomSeed, colorLeft, colorRight, alphaLeft, alphaRight
if (key=='s' or key=='S'):
saveFrame(datetime.now().strftime("%Y%m%d%H%M%S")+".png")
if (key=='p' or key=='P'):
savePDF = True
if key == "1":
actStrokeCap = ROUND
elif key == "2":
actStrokeCap = SQUARE
elif key == "3":
actStrokeCap = PROJECT
elif (key == '4'):
if (colorLeft == color(0)):
colorLeft = color(323, 100, 77)
else:
colorLeft = color(0)
elif (key == '5'):
if (colorRight == color(0)):
colorRight = color(273, 73, 51)
else:
colorRight = color(0)
elif (key == '6'):
if (alphaLeft == 100):
alphaLeft = 50
else:
alphaLeft = 100
elif (key == '7'):
if (alphaRight == 100):
alphaRight = 50
else:
alphaRight = 100
if (key == '0'):
actStrokeCap = ROUND
colorLeft = color(0)
colorRight = color(0)
alphaLeft = 100
alphaRight = 100
| 2.78125 | 3 |
core/dbt/contracts/graph/manifest.py | peiwangdb/dbt | 0 | 1665 | import enum
from dataclasses import dataclass, field
from itertools import chain, islice
from mashumaro import DataClassMessagePackMixin
from multiprocessing.synchronize import Lock
from typing import (
Dict, List, Optional, Union, Mapping, MutableMapping, Any, Set, Tuple,
TypeVar, Callable, Iterable, Generic, cast, AbstractSet, ClassVar
)
from typing_extensions import Protocol
from uuid import UUID
from dbt.contracts.graph.compiled import (
CompileResultNode, ManifestNode, NonSourceCompiledNode, GraphMemberNode
)
from dbt.contracts.graph.parsed import (
ParsedMacro, ParsedDocumentation, ParsedNodePatch, ParsedMacroPatch,
ParsedSourceDefinition, ParsedExposure, HasUniqueID,
UnpatchedSourceDefinition, ManifestNodes
)
from dbt.contracts.graph.unparsed import SourcePatch
from dbt.contracts.files import SourceFile, SchemaSourceFile, FileHash, AnySourceFile
from dbt.contracts.util import (
BaseArtifactMetadata, SourceKey, ArtifactMixin, schema_version
)
from dbt.dataclass_schema import dbtClassMixin
from dbt.exceptions import (
CompilationException,
raise_duplicate_resource_name, raise_compiler_error, warn_or_error,
raise_duplicate_patch_name,
raise_duplicate_macro_patch_name, raise_duplicate_source_patch_name,
)
from dbt.helper_types import PathSet
from dbt.logger import GLOBAL_LOGGER as logger
from dbt.node_types import NodeType
from dbt.ui import line_wrap_message
from dbt import flags
from dbt import tracking
import dbt.utils
NodeEdgeMap = Dict[str, List[str]]
PackageName = str
DocName = str
RefName = str
UniqueID = str
def find_unique_id_for_package(storage, key, package: Optional[PackageName]):
if key not in storage:
return None
pkg_dct: Mapping[PackageName, UniqueID] = storage[key]
if package is None:
if not pkg_dct:
return None
else:
return next(iter(pkg_dct.values()))
elif package in pkg_dct:
return pkg_dct[package]
else:
return None
class DocLookup(dbtClassMixin):
def __init__(self, manifest: 'Manifest'):
self.storage: Dict[str, Dict[PackageName, UniqueID]] = {}
self.populate(manifest)
def get_unique_id(self, key, package: Optional[PackageName]):
return find_unique_id_for_package(self.storage, key, package)
def find(self, key, package: Optional[PackageName], manifest: 'Manifest'):
unique_id = self.get_unique_id(key, package)
if unique_id is not None:
return self.perform_lookup(unique_id, manifest)
return None
def add_doc(self, doc: ParsedDocumentation):
if doc.name not in self.storage:
self.storage[doc.name] = {}
self.storage[doc.name][doc.package_name] = doc.unique_id
def populate(self, manifest):
for doc in manifest.docs.values():
self.add_doc(doc)
def perform_lookup(
self, unique_id: UniqueID, manifest
) -> ParsedDocumentation:
if unique_id not in manifest.docs:
raise dbt.exceptions.InternalException(
f'Doc {unique_id} found in cache but not found in manifest'
)
return manifest.docs[unique_id]
class SourceLookup(dbtClassMixin):
def __init__(self, manifest: 'Manifest'):
self.storage: Dict[Tuple[str, str], Dict[PackageName, UniqueID]] = {}
self.populate(manifest)
def get_unique_id(self, key, package: Optional[PackageName]):
return find_unique_id_for_package(self.storage, key, package)
def find(self, key, package: Optional[PackageName], manifest: 'Manifest'):
unique_id = self.get_unique_id(key, package)
if unique_id is not None:
return self.perform_lookup(unique_id, manifest)
return None
def add_source(self, source: ParsedSourceDefinition):
key = (source.source_name, source.name)
if key not in self.storage:
self.storage[key] = {}
self.storage[key][source.package_name] = source.unique_id
def populate(self, manifest):
for source in manifest.sources.values():
if hasattr(source, 'source_name'):
self.add_source(source)
def perform_lookup(
self, unique_id: UniqueID, manifest: 'Manifest'
) -> ParsedSourceDefinition:
if unique_id not in manifest.sources:
raise dbt.exceptions.InternalException(
f'Source {unique_id} found in cache but not found in manifest'
)
return manifest.sources[unique_id]
class RefableLookup(dbtClassMixin):
# model, seed, snapshot
_lookup_types: ClassVar[set] = set(NodeType.refable())
# refables are actually unique, so the Dict[PackageName, UniqueID] will
# only ever have exactly one value, but doing 3 dict lookups instead of 1
# is not a big deal at all and retains consistency
def __init__(self, manifest: 'Manifest'):
self.storage: Dict[str, Dict[PackageName, UniqueID]] = {}
self.populate(manifest)
def get_unique_id(self, key, package: Optional[PackageName]):
return find_unique_id_for_package(self.storage, key, package)
def find(self, key, package: Optional[PackageName], manifest: 'Manifest'):
unique_id = self.get_unique_id(key, package)
if unique_id is not None:
return self.perform_lookup(unique_id, manifest)
return None
def add_node(self, node: ManifestNode):
if node.resource_type in self._lookup_types:
if node.name not in self.storage:
self.storage[node.name] = {}
self.storage[node.name][node.package_name] = node.unique_id
def populate(self, manifest):
for node in manifest.nodes.values():
self.add_node(node)
def perform_lookup(
self, unique_id: UniqueID, manifest
) -> ManifestNode:
if unique_id not in manifest.nodes:
raise dbt.exceptions.InternalException(
f'Node {unique_id} found in cache but not found in manifest'
)
return manifest.nodes[unique_id]
class AnalysisLookup(RefableLookup):
_lookup_types: ClassVar[set] = set(NodeType.Analysis)
def _search_packages(
current_project: str,
node_package: str,
target_package: Optional[str] = None,
) -> List[Optional[str]]:
if target_package is not None:
return [target_package]
elif current_project == node_package:
return [current_project, None]
else:
return [current_project, node_package, None]
@dataclass
class ManifestMetadata(BaseArtifactMetadata):
"""Metadata for the manifest."""
dbt_schema_version: str = field(
default_factory=lambda: str(WritableManifest.dbt_schema_version)
)
project_id: Optional[str] = field(
default=None,
metadata={
'description': 'A unique identifier for the project',
},
)
user_id: Optional[UUID] = field(
default=None,
metadata={
'description': 'A unique identifier for the user',
},
)
send_anonymous_usage_stats: Optional[bool] = field(
default=None,
metadata=dict(description=(
'Whether dbt is configured to send anonymous usage statistics'
)),
)
adapter_type: Optional[str] = field(
default=None,
metadata=dict(description='The type name of the adapter'),
)
def __post_init__(self):
if tracking.active_user is None:
return
if self.user_id is None:
self.user_id = tracking.active_user.id
if self.send_anonymous_usage_stats is None:
self.send_anonymous_usage_stats = (
not tracking.active_user.do_not_track
)
@classmethod
def default(cls):
return cls(
dbt_schema_version=str(WritableManifest.dbt_schema_version),
)
def _sort_values(dct):
"""Given a dictionary, sort each value. This makes output deterministic,
which helps for tests.
"""
return {k: sorted(v) for k, v in dct.items()}
def build_node_edges(nodes: List[ManifestNode]):
"""Build the forward and backward edges on the given list of ParsedNodes
and return them as two separate dictionaries, each mapping unique IDs to
lists of edges.
"""
backward_edges: Dict[str, List[str]] = {}
# pre-populate the forward edge dict for simplicity
forward_edges: Dict[str, List[str]] = {n.unique_id: [] for n in nodes}
for node in nodes:
backward_edges[node.unique_id] = node.depends_on_nodes[:]
for unique_id in node.depends_on_nodes:
if unique_id in forward_edges.keys():
forward_edges[unique_id].append(node.unique_id)
return _sort_values(forward_edges), _sort_values(backward_edges)
# Build a map of children of macros
def build_macro_edges(nodes: List[Any]):
forward_edges: Dict[str, List[str]] = {
n.unique_id: [] for n in nodes if n.unique_id.startswith('macro') or n.depends_on.macros
}
for node in nodes:
for unique_id in node.depends_on.macros:
if unique_id in forward_edges.keys():
forward_edges[unique_id].append(node.unique_id)
return _sort_values(forward_edges)
def _deepcopy(value):
return value.from_dict(value.to_dict(omit_none=True))
class Locality(enum.IntEnum):
Core = 1
Imported = 2
Root = 3
class Specificity(enum.IntEnum):
Default = 1
Adapter = 2
@dataclass
class MacroCandidate:
locality: Locality
macro: ParsedMacro
def __eq__(self, other: object) -> bool:
if not isinstance(other, MacroCandidate):
return NotImplemented
return self.locality == other.locality
def __lt__(self, other: object) -> bool:
if not isinstance(other, MacroCandidate):
return NotImplemented
if self.locality < other.locality:
return True
if self.locality > other.locality:
return False
return False
@dataclass
class MaterializationCandidate(MacroCandidate):
specificity: Specificity
@classmethod
def from_macro(
cls, candidate: MacroCandidate, specificity: Specificity
) -> 'MaterializationCandidate':
return cls(
locality=candidate.locality,
macro=candidate.macro,
specificity=specificity,
)
def __eq__(self, other: object) -> bool:
if not isinstance(other, MaterializationCandidate):
return NotImplemented
equal = (
self.specificity == other.specificity and
self.locality == other.locality
)
if equal:
raise_compiler_error(
'Found two materializations with the name {} (packages {} and '
'{}). dbt cannot resolve this ambiguity'
.format(self.macro.name, self.macro.package_name,
other.macro.package_name)
)
return equal
def __lt__(self, other: object) -> bool:
if not isinstance(other, MaterializationCandidate):
return NotImplemented
if self.specificity < other.specificity:
return True
if self.specificity > other.specificity:
return False
if self.locality < other.locality:
return True
if self.locality > other.locality:
return False
return False
M = TypeVar('M', bound=MacroCandidate)
class CandidateList(List[M]):
def last(self) -> Optional[ParsedMacro]:
if not self:
return None
self.sort()
return self[-1].macro
def _get_locality(
macro: ParsedMacro, root_project_name: str, internal_packages: Set[str]
) -> Locality:
if macro.package_name == root_project_name:
return Locality.Root
elif macro.package_name in internal_packages:
return Locality.Core
else:
return Locality.Imported
class Searchable(Protocol):
resource_type: NodeType
package_name: str
@property
def search_name(self) -> str:
raise NotImplementedError('search_name not implemented')
N = TypeVar('N', bound=Searchable)
@dataclass
class NameSearcher(Generic[N]):
name: str
package: Optional[str]
nodetypes: List[NodeType]
def _matches(self, model: N) -> bool:
"""Return True if the model matches the given name, package, and type.
If package is None, any package is allowed.
nodetypes should be a container of NodeTypes that implements the 'in'
operator.
"""
if model.resource_type not in self.nodetypes:
return False
if self.name != model.search_name:
return False
return self.package is None or self.package == model.package_name
def search(self, haystack: Iterable[N]) -> Optional[N]:
"""Find an entry in the given iterable by name."""
for model in haystack:
if self._matches(model):
return model
return None
D = TypeVar('D')
@dataclass
class Disabled(Generic[D]):
target: D
MaybeDocumentation = Optional[ParsedDocumentation]
MaybeParsedSource = Optional[Union[
ParsedSourceDefinition,
Disabled[ParsedSourceDefinition],
]]
MaybeNonSource = Optional[Union[
ManifestNode,
Disabled[ManifestNode]
]]
T = TypeVar('T', bound=GraphMemberNode)
def _update_into(dest: MutableMapping[str, T], new_item: T):
"""Update dest to overwrite whatever is at dest[new_item.unique_id] with
new_itme. There must be an existing value to overwrite, and they two nodes
must have the same original file path.
"""
unique_id = new_item.unique_id
if unique_id not in dest:
raise dbt.exceptions.RuntimeException(
f'got an update_{new_item.resource_type} call with an '
f'unrecognized {new_item.resource_type}: {new_item.unique_id}'
)
existing = dest[unique_id]
if new_item.original_file_path != existing.original_file_path:
raise dbt.exceptions.RuntimeException(
f'cannot update a {new_item.resource_type} to have a new file '
f'path!'
)
dest[unique_id] = new_item
# This contains macro methods that are in both the Manifest
# and the MacroManifest
class MacroMethods:
# Just to make mypy happy. There must be a better way.
def __init__(self):
self.macros = []
self.metadata = {}
def find_macro_by_name(
self, name: str, root_project_name: str, package: Optional[str]
) -> Optional[ParsedMacro]:
"""Find a macro in the graph by its name and package name, or None for
any package. The root project name is used to determine priority:
- locally defined macros come first
- then imported macros
- then macros defined in the root project
"""
filter: Optional[Callable[[MacroCandidate], bool]] = None
if package is not None:
def filter(candidate: MacroCandidate) -> bool:
return package == candidate.macro.package_name
candidates: CandidateList = self._find_macros_by_name(
name=name,
root_project_name=root_project_name,
filter=filter,
)
return candidates.last()
def find_generate_macro_by_name(
self, component: str, root_project_name: str
) -> Optional[ParsedMacro]:
"""
The `generate_X_name` macros are similar to regular ones, but ignore
imported packages.
- if there is a `generate_{component}_name` macro in the root
project, return it
- return the `generate_{component}_name` macro from the 'dbt'
internal project
"""
def filter(candidate: MacroCandidate) -> bool:
return candidate.locality != Locality.Imported
candidates: CandidateList = self._find_macros_by_name(
name=f'generate_{component}_name',
root_project_name=root_project_name,
# filter out imported packages
filter=filter,
)
return candidates.last()
def _find_macros_by_name(
self,
name: str,
root_project_name: str,
filter: Optional[Callable[[MacroCandidate], bool]] = None
) -> CandidateList:
"""Find macros by their name.
"""
# avoid an import cycle
from dbt.adapters.factory import get_adapter_package_names
candidates: CandidateList = CandidateList()
packages = set(get_adapter_package_names(self.metadata.adapter_type))
for unique_id, macro in self.macros.items():
if macro.name != name:
continue
candidate = MacroCandidate(
locality=_get_locality(macro, root_project_name, packages),
macro=macro,
)
if filter is None or filter(candidate):
candidates.append(candidate)
return candidates
@dataclass
class ParsingInfo:
static_analysis_parsed_path_count: int = 0
static_analysis_path_count: int = 0
@dataclass
class ManifestStateCheck(dbtClassMixin):
vars_hash: FileHash = field(default_factory=FileHash.empty)
profile_hash: FileHash = field(default_factory=FileHash.empty)
project_hashes: MutableMapping[str, FileHash] = field(default_factory=dict)
@dataclass
class Manifest(MacroMethods, DataClassMessagePackMixin, dbtClassMixin):
"""The manifest for the full graph, after parsing and during compilation.
"""
# These attributes are both positional and by keyword. If an attribute
# is added it must all be added in the __reduce_ex__ method in the
# args tuple in the right position.
nodes: MutableMapping[str, ManifestNode] = field(default_factory=dict)
sources: MutableMapping[str, ParsedSourceDefinition] = field(default_factory=dict)
macros: MutableMapping[str, ParsedMacro] = field(default_factory=dict)
docs: MutableMapping[str, ParsedDocumentation] = field(default_factory=dict)
exposures: MutableMapping[str, ParsedExposure] = field(default_factory=dict)
selectors: MutableMapping[str, Any] = field(default_factory=dict)
disabled: List[CompileResultNode] = field(default_factory=list)
files: MutableMapping[str, AnySourceFile] = field(default_factory=dict)
metadata: ManifestMetadata = field(default_factory=ManifestMetadata)
flat_graph: Dict[str, Any] = field(default_factory=dict)
state_check: ManifestStateCheck = field(default_factory=ManifestStateCheck)
# Moved from the ParseResult object
source_patches: MutableMapping[SourceKey, SourcePatch] = field(default_factory=dict)
# following is from ParseResult
_disabled: MutableMapping[str, List[CompileResultNode]] = field(default_factory=dict)
_doc_lookup: Optional[DocLookup] = field(
default=None, metadata={'serialize': lambda x: None, 'deserialize': lambda x: None}
)
_source_lookup: Optional[SourceLookup] = field(
default=None, metadata={'serialize': lambda x: None, 'deserialize': lambda x: None}
)
_ref_lookup: Optional[RefableLookup] = field(
default=None, metadata={'serialize': lambda x: None, 'deserialize': lambda x: None}
)
_analysis_lookup: Optional[AnalysisLookup] = field(
default=None, metadata={'serialize': lambda x: None, 'deserialize': lambda x: None}
)
_parsing_info: ParsingInfo = field(
default_factory=ParsingInfo,
metadata={'serialize': lambda x: None, 'deserialize': lambda x: None}
)
_lock: Lock = field(
default_factory=flags.MP_CONTEXT.Lock,
metadata={'serialize': lambda x: None, 'deserialize': lambda x: None}
)
def __pre_serialize__(self):
# serialization won't work with anything except an empty source_patches because
# tuple keys are not supported, so ensure it's empty
self.source_patches = {}
return self
@classmethod
def __post_deserialize__(cls, obj):
obj._lock = flags.MP_CONTEXT.Lock()
return obj
def sync_update_node(
self, new_node: NonSourceCompiledNode
) -> NonSourceCompiledNode:
"""update the node with a lock. The only time we should want to lock is
when compiling an ephemeral ancestor of a node at runtime, because
multiple threads could be just-in-time compiling the same ephemeral
dependency, and we want them to have a consistent view of the manifest.
If the existing node is not compiled, update it with the new node and
return that. If the existing node is compiled, do not update the
manifest and return the existing node.
"""
with self._lock:
existing = self.nodes[new_node.unique_id]
if getattr(existing, 'compiled', False):
# already compiled -> must be a NonSourceCompiledNode
return cast(NonSourceCompiledNode, existing)
_update_into(self.nodes, new_node)
return new_node
def update_exposure(self, new_exposure: ParsedExposure):
_update_into(self.exposures, new_exposure)
def update_node(self, new_node: ManifestNode):
_update_into(self.nodes, new_node)
def update_source(self, new_source: ParsedSourceDefinition):
_update_into(self.sources, new_source)
def build_flat_graph(self):
"""This attribute is used in context.common by each node, so we want to
only build it once and avoid any concurrency issues around it.
Make sure you don't call this until you're done with building your
manifest!
"""
self.flat_graph = {
'exposures': {
k: v.to_dict(omit_none=False)
for k, v in self.exposures.items()
},
'nodes': {
k: v.to_dict(omit_none=False)
for k, v in self.nodes.items()
},
'sources': {
k: v.to_dict(omit_none=False)
for k, v in self.sources.items()
}
}
def find_disabled_by_name(
self, name: str, package: Optional[str] = None
) -> Optional[ManifestNode]:
searcher: NameSearcher = NameSearcher(
name, package, NodeType.refable()
)
result = searcher.search(self.disabled)
return result
def find_disabled_source_by_name(
self, source_name: str, table_name: str, package: Optional[str] = None
) -> Optional[ParsedSourceDefinition]:
search_name = f'{source_name}.{table_name}'
searcher: NameSearcher = NameSearcher(
search_name, package, [NodeType.Source]
)
result = searcher.search(self.disabled)
if result is not None:
assert isinstance(result, ParsedSourceDefinition)
return result
def _materialization_candidates_for(
self, project_name: str,
materialization_name: str,
adapter_type: Optional[str],
) -> CandidateList:
if adapter_type is None:
specificity = Specificity.Default
else:
specificity = Specificity.Adapter
full_name = dbt.utils.get_materialization_macro_name(
materialization_name=materialization_name,
adapter_type=adapter_type,
with_prefix=False,
)
return CandidateList(
MaterializationCandidate.from_macro(m, specificity)
for m in self._find_macros_by_name(full_name, project_name)
)
def find_materialization_macro_by_name(
self, project_name: str, materialization_name: str, adapter_type: str
) -> Optional[ParsedMacro]:
candidates: CandidateList = CandidateList(chain.from_iterable(
self._materialization_candidates_for(
project_name=project_name,
materialization_name=materialization_name,
adapter_type=atype,
) for atype in (adapter_type, None)
))
return candidates.last()
def get_resource_fqns(self) -> Mapping[str, PathSet]:
resource_fqns: Dict[str, Set[Tuple[str, ...]]] = {}
all_resources = chain(self.exposures.values(), self.nodes.values(), self.sources.values())
for resource in all_resources:
resource_type_plural = resource.resource_type.pluralize()
if resource_type_plural not in resource_fqns:
resource_fqns[resource_type_plural] = set()
resource_fqns[resource_type_plural].add(tuple(resource.fqn))
return resource_fqns
# This is called by 'parse_patch' in the NodePatchParser
def add_patch(
self, source_file: SchemaSourceFile, patch: ParsedNodePatch,
) -> None:
if patch.yaml_key in ['models', 'seeds', 'snapshots']:
unique_id = self.ref_lookup.get_unique_id(patch.name, None)
elif patch.yaml_key == 'analyses':
unique_id = self.analysis_lookup.get_unique_id(patch.name, None)
else:
raise dbt.exceptions.InternalException(
f'Unexpected yaml_key {patch.yaml_key} for patch in '
f'file {source_file.path.original_file_path}'
)
if unique_id is None:
# This will usually happen when a node is disabled
return
# patches can't be overwritten
node = self.nodes.get(unique_id)
if node:
if node.patch_path:
package_name, existing_file_path = node.patch_path.split('://')
raise_duplicate_patch_name(patch, existing_file_path)
source_file.append_patch(patch.yaml_key, unique_id)
node.patch(patch)
def add_macro_patch(
self, source_file: SchemaSourceFile, patch: ParsedMacroPatch,
) -> None:
# macros are fully namespaced
unique_id = f'macro.{patch.package_name}.{patch.name}'
macro = self.macros.get(unique_id)
if not macro:
warn_or_error(
f'WARNING: Found documentation for macro "{patch.name}" '
f'which was not found'
)
return
if macro.patch_path:
package_name, existing_file_path = macro.patch_path.split('://')
raise_duplicate_macro_patch_name(patch, existing_file_path)
source_file.macro_patches[patch.name] = unique_id
macro.patch(patch)
def add_source_patch(
self, source_file: SchemaSourceFile, patch: SourcePatch,
) -> None:
# source patches must be unique
key = (patch.overrides, patch.name)
if key in self.source_patches:
raise_duplicate_source_patch_name(patch, self.source_patches[key])
self.source_patches[key] = patch
source_file.source_patches.append(key)
def get_used_schemas(self, resource_types=None):
return frozenset({
(node.database, node.schema) for node in
chain(self.nodes.values(), self.sources.values())
if not resource_types or node.resource_type in resource_types
})
def get_used_databases(self):
return frozenset(
x.database for x in
chain(self.nodes.values(), self.sources.values())
)
# This is used in dbt.task.rpc.sql_commands 'add_new_refs'
def deepcopy(self):
return Manifest(
nodes={k: _deepcopy(v) for k, v in self.nodes.items()},
sources={k: _deepcopy(v) for k, v in self.sources.items()},
macros={k: _deepcopy(v) for k, v in self.macros.items()},
docs={k: _deepcopy(v) for k, v in self.docs.items()},
exposures={k: _deepcopy(v) for k, v in self.exposures.items()},
selectors={k: _deepcopy(v) for k, v in self.selectors.items()},
metadata=self.metadata,
disabled=[_deepcopy(n) for n in self.disabled],
files={k: _deepcopy(v) for k, v in self.files.items()},
state_check=_deepcopy(self.state_check),
)
def build_parent_and_child_maps(self):
edge_members = list(chain(
self.nodes.values(),
self.sources.values(),
self.exposures.values(),
))
forward_edges, backward_edges = build_node_edges(edge_members)
self.child_map = forward_edges
self.parent_map = backward_edges
def build_macro_child_map(self):
edge_members = list(chain(
self.nodes.values(),
self.macros.values(),
))
forward_edges = build_macro_edges(edge_members)
return forward_edges
def writable_manifest(self):
self.build_parent_and_child_maps()
return WritableManifest(
nodes=self.nodes,
sources=self.sources,
macros=self.macros,
docs=self.docs,
exposures=self.exposures,
selectors=self.selectors,
metadata=self.metadata,
disabled=self.disabled,
child_map=self.child_map,
parent_map=self.parent_map,
)
def write(self, path):
self.writable_manifest().write(path)
# Called in dbt.compilation.Linker.write_graph and
# dbt.graph.queue.get and ._include_in_cost
def expect(self, unique_id: str) -> GraphMemberNode:
if unique_id in self.nodes:
return self.nodes[unique_id]
elif unique_id in self.sources:
return self.sources[unique_id]
elif unique_id in self.exposures:
return self.exposures[unique_id]
else:
# something terrible has happened
raise dbt.exceptions.InternalException(
'Expected node {} not found in manifest'.format(unique_id)
)
@property
def doc_lookup(self) -> DocLookup:
if self._doc_lookup is None:
self._doc_lookup = DocLookup(self)
return self._doc_lookup
def rebuild_doc_lookup(self):
self._doc_lookup = DocLookup(self)
@property
def source_lookup(self) -> SourceLookup:
if self._source_lookup is None:
self._source_lookup = SourceLookup(self)
return self._source_lookup
def rebuild_source_lookup(self):
self._source_lookup = SourceLookup(self)
@property
def ref_lookup(self) -> RefableLookup:
if self._ref_lookup is None:
self._ref_lookup = RefableLookup(self)
return self._ref_lookup
def rebuild_ref_lookup(self):
self._ref_lookup = RefableLookup(self)
@property
def analysis_lookup(self) -> AnalysisLookup:
if self._analysis_lookup is None:
self._analysis_lookup = AnalysisLookup(self)
return self._analysis_lookup
# Called by dbt.parser.manifest._resolve_refs_for_exposure
# and dbt.parser.manifest._process_refs_for_node
def resolve_ref(
self,
target_model_name: str,
target_model_package: Optional[str],
current_project: str,
node_package: str,
) -> MaybeNonSource:
node: Optional[ManifestNode] = None
disabled: Optional[ManifestNode] = None
candidates = _search_packages(
current_project, node_package, target_model_package
)
for pkg in candidates:
node = self.ref_lookup.find(target_model_name, pkg, self)
if node is not None and node.config.enabled:
return node
# it's possible that the node is disabled
if disabled is None:
disabled = self.find_disabled_by_name(
target_model_name, pkg
)
if disabled is not None:
return Disabled(disabled)
return None
# Called by dbt.parser.manifest._resolve_sources_for_exposure
# and dbt.parser.manifest._process_source_for_node
def resolve_source(
self,
target_source_name: str,
target_table_name: str,
current_project: str,
node_package: str
) -> MaybeParsedSource:
key = (target_source_name, target_table_name)
candidates = _search_packages(current_project, node_package)
source: Optional[ParsedSourceDefinition] = None
disabled: Optional[ParsedSourceDefinition] = None
for pkg in candidates:
source = self.source_lookup.find(key, pkg, self)
if source is not None and source.config.enabled:
return source
if disabled is None:
disabled = self.find_disabled_source_by_name(
target_source_name, target_table_name, pkg
)
if disabled is not None:
return Disabled(disabled)
return None
# Called by DocsRuntimeContext.doc
def resolve_doc(
self,
name: str,
package: Optional[str],
current_project: str,
node_package: str,
) -> Optional[ParsedDocumentation]:
"""Resolve the given documentation. This follows the same algorithm as
resolve_ref except the is_enabled checks are unnecessary as docs are
always enabled.
"""
candidates = _search_packages(
current_project, node_package, package
)
for pkg in candidates:
result = self.doc_lookup.find(name, pkg, self)
if result is not None:
return result
return None
# Called by RunTask.defer_to_manifest
def merge_from_artifact(
self,
adapter,
other: 'WritableManifest',
selected: AbstractSet[UniqueID],
) -> None:
"""Given the selected unique IDs and a writable manifest, update this
manifest by replacing any unselected nodes with their counterpart.
Only non-ephemeral refable nodes are examined.
"""
refables = set(NodeType.refable())
merged = set()
for unique_id, node in other.nodes.items():
current = self.nodes.get(unique_id)
if current and (
node.resource_type in refables and
not node.is_ephemeral and
unique_id not in selected and
not adapter.get_relation(
current.database, current.schema, current.identifier
)
):
merged.add(unique_id)
self.nodes[unique_id] = node.replace(deferred=True)
# log up to 5 items
sample = list(islice(merged, 5))
logger.debug(
f'Merged {len(merged)} items from state (sample: {sample})'
)
# Methods that were formerly in ParseResult
def add_macro(self, source_file: SourceFile, macro: ParsedMacro):
if macro.unique_id in self.macros:
# detect that the macro exists and emit an error
other_path = self.macros[macro.unique_id].original_file_path
# subtract 2 for the "Compilation Error" indent
# note that the line wrap eats newlines, so if you want newlines,
# this is the result :(
msg = line_wrap_message(
f'''\
dbt found two macros named "{macro.name}" in the project
"{macro.package_name}".
To fix this error, rename or remove one of the following
macros:
- {macro.original_file_path}
- {other_path}
''',
subtract=2
)
raise_compiler_error(msg)
self.macros[macro.unique_id] = macro
source_file.macros.append(macro.unique_id)
def has_file(self, source_file: SourceFile) -> bool:
key = source_file.file_id
if key is None:
return False
if key not in self.files:
return False
my_checksum = self.files[key].checksum
return my_checksum == source_file.checksum
def add_source(
self, source_file: SchemaSourceFile, source: UnpatchedSourceDefinition
):
# sources can't be overwritten!
_check_duplicates(source, self.sources)
self.sources[source.unique_id] = source # type: ignore
source_file.sources.append(source.unique_id)
def add_node_nofile(self, node: ManifestNodes):
# nodes can't be overwritten!
_check_duplicates(node, self.nodes)
self.nodes[node.unique_id] = node
def add_node(self, source_file: AnySourceFile, node: ManifestNodes, test_from=None):
self.add_node_nofile(node)
if isinstance(source_file, SchemaSourceFile):
assert test_from
source_file.add_test(node.unique_id, test_from)
else:
source_file.nodes.append(node.unique_id)
def add_exposure(self, source_file: SchemaSourceFile, exposure: ParsedExposure):
_check_duplicates(exposure, self.exposures)
self.exposures[exposure.unique_id] = exposure
source_file.exposures.append(exposure.unique_id)
def add_disabled_nofile(self, node: CompileResultNode):
if node.unique_id in self._disabled:
self._disabled[node.unique_id].append(node)
else:
self._disabled[node.unique_id] = [node]
def add_disabled(self, source_file: AnySourceFile, node: CompileResultNode, test_from=None):
self.add_disabled_nofile(node)
if isinstance(source_file, SchemaSourceFile):
assert test_from
source_file.add_test(node.unique_id, test_from)
else:
source_file.nodes.append(node.unique_id)
def add_doc(self, source_file: SourceFile, doc: ParsedDocumentation):
_check_duplicates(doc, self.docs)
self.docs[doc.unique_id] = doc
source_file.docs.append(doc.unique_id)
# end of methods formerly in ParseResult
# Provide support for copy.deepcopy() - we just need to avoid the lock!
# pickle and deepcopy use this. It returns a callable object used to
# create the initial version of the object and a tuple of arguments
# for the object, i.e. the Manifest.
# The order of the arguments must match the order of the attributes
# in the Manifest class declaration, because they are used as
# positional arguments to construct a Manifest.
def __reduce_ex__(self, protocol):
args = (
self.nodes,
self.sources,
self.macros,
self.docs,
self.exposures,
self.selectors,
self.disabled,
self.files,
self.metadata,
self.flat_graph,
self.state_check,
self.source_patches,
self._disabled,
self._doc_lookup,
self._source_lookup,
self._ref_lookup,
)
return self.__class__, args
class MacroManifest(MacroMethods):
def __init__(self, macros):
self.macros = macros
self.metadata = ManifestMetadata()
# This is returned by the 'graph' context property
# in the ProviderContext class.
self.flat_graph = {}
AnyManifest = Union[Manifest, MacroManifest]
@dataclass
@schema_version('manifest', 2)
class WritableManifest(ArtifactMixin):
nodes: Mapping[UniqueID, ManifestNode] = field(
metadata=dict(description=(
'The nodes defined in the dbt project and its dependencies'
))
)
sources: Mapping[UniqueID, ParsedSourceDefinition] = field(
metadata=dict(description=(
'The sources defined in the dbt project and its dependencies'
))
)
macros: Mapping[UniqueID, ParsedMacro] = field(
metadata=dict(description=(
'The macros defined in the dbt project and its dependencies'
))
)
docs: Mapping[UniqueID, ParsedDocumentation] = field(
metadata=dict(description=(
'The docs defined in the dbt project and its dependencies'
))
)
exposures: Mapping[UniqueID, ParsedExposure] = field(
metadata=dict(description=(
'The exposures defined in the dbt project and its dependencies'
))
)
selectors: Mapping[UniqueID, Any] = field(
metadata=dict(description=(
'The selectors defined in selectors.yml'
))
)
disabled: Optional[List[CompileResultNode]] = field(metadata=dict(
description='A list of the disabled nodes in the target'
))
parent_map: Optional[NodeEdgeMap] = field(metadata=dict(
description='A mapping from child nodes to their dependencies',
))
child_map: Optional[NodeEdgeMap] = field(metadata=dict(
description='A mapping from parent nodes to their dependents',
))
metadata: ManifestMetadata = field(metadata=dict(
description='Metadata about the manifest',
))
def _check_duplicates(
value: HasUniqueID, src: Mapping[str, HasUniqueID]
):
if value.unique_id in src:
raise_duplicate_resource_name(value, src[value.unique_id])
K_T = TypeVar('K_T')
V_T = TypeVar('V_T')
def _expect_value(
key: K_T, src: Mapping[K_T, V_T], old_file: SourceFile, name: str
) -> V_T:
if key not in src:
raise CompilationException(
'Expected to find "{}" in cached "result.{}" based '
'on cached file information: {}!'
.format(key, name, old_file)
)
return src[key]
| 1.476563 | 1 |
openGaussBase/testcase/SQL/DCL/Alter_Default_Privileges/Opengauss_Function_Alter_Default_Privileges_Case0016.py | opengauss-mirror/Yat | 0 | 1666 | """
Copyright (c) 2022 Huawei Technologies Co.,Ltd.
openGauss is licensed under Mulan PSL v2.
You can use this software according to the terms and conditions of the Mulan PSL v2.
You may obtain a copy of Mulan PSL v2 at:
http://license.coscl.org.cn/MulanPSL2
THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
See the Mulan PSL v2 for more details.
"""
"""
Case Type : 功能测试
Case Name : 初始用户和sysadmin自己alter自己权限
Description :
1.初始用户alter自己的权限:alter不报错,但不生效,查询权限不变
1.1.初始用户alter自己的权限
1.2.清理环境 期望:清理成功
2.sysadmin用户alter自己的权限:alter不报错,但不生效,查询权限不变
2.1.管理员用户连接创建sysadmin用户 default016_01 期望:创建成功
2.2.default016_016用户连接 执行alter测试
2.3.清理 期望:清理成功
备注:以上alter测试包括对表(包含视图),类型,函数的权限测试
Expect :
1.初始用户alter自己的权限:alter不报错,但不生效,查询权限不变
1.1.初始用户alter自己的权限
1.2.清理环境 期望:清理成功
2.sysadmin用户alter自己的权限:alter不报错,但不生效,查询权限不变
2.1.管理员用户连接创建sysadmin用户 default016_01 期望:创建成功
2.2.default016_016用户连接 执行alter测试
2.3.清理 期望:清理成功
备注:以上alter测试包括对表(包含视图),类型,函数的权限测试
History :
"""
import sys
import unittest
from yat.test import macro
from yat.test import Node
sys.path.append(sys.path[0] + "/../")
from testcase.utils.Logger import Logger
from testcase.utils.Constant import Constant
from testcase.utils.CommonSH import CommonSH
logger = Logger()
commonsh = CommonSH('dbuser')
class Privategrant(unittest.TestCase):
def setUp(self):
logger.info('--------Opengauss_Function_Alter_Default_Privileges_Case0016开始执行--------')
self.userNode = Node('dbuser')
self.DB_ENV_PATH = macro.DB_ENV_PATH
self.Constant = Constant()
# 初始用户用户名
self.username = self.userNode.ssh_user
# 初始用户密码
self.password = <PASSWORD>
def test_common_user_permission(self):
logger.info('--------1.初始用户alter自己的权限--------')
logger.info('--------1.1.初始用户alter自己的权限--------')
sql_cmd = (f'''
drop schema if exists schema_016 cascade;
create schema schema_016;
ALTER DEFAULT PRIVILEGES for role {self.username} in schema schema_016 GRANT ALL PRIVILEGES on tables to {self.username} WITH GRANT OPTION ;
ALTER DEFAULT PRIVILEGES for role {self.username} GRANT select,insert,update,truncate,references,TRIGGER,DELETE on tables to {self.username} WITH GRANT OPTION ;
ALTER DEFAULT PRIVILEGES for role {self.username} in schema schema_016 GRANT ALL PRIVILEGES on functions to {self.username} WITH GRANT OPTION ;
ALTER DEFAULT PRIVILEGES for role {self.username} GRANT EXECUTE on functions to {self.username} WITH GRANT OPTION ;
ALTER DEFAULT PRIVILEGES for role {self.username} in schema schema_016 GRANT ALL PRIVILEGES on TYPES to {self.username} WITH GRANT OPTION ;
ALTER DEFAULT PRIVILEGES for role {self.username} GRANT USAGE on TYPES to {self.username} WITH GRANT OPTION ;
drop schema if exists schema_016 cascade;
create schema schema_016;
drop table if exists test_alter_default_016 cascade;
create table test_alter_default_016(id int unique);
select * from test_alter_default_016;
drop function if exists test_default_016(int) cascade;
create or replace function test_default_016(a int) return int
as
b int:= a;
begin
for i in 1..a loop
b:=b+1;
end loop;
return b;
end;
select test_default_016(16);
drop type if exists type016;
CREATE TYPE type016 AS (c_int int,c_text text);
drop table if exists test_alter_default_016 cascade;
create table test_alter_default_016(id type016);
select * from test_alter_default_016;
ALTER DEFAULT PRIVILEGES for role {self.username} in schema schema_016 revoke ALL on tables from {self.username} CASCADE CONSTRAINTS ;
ALTER DEFAULT PRIVILEGES for role {self.username} revoke select,insert,update,truncate,references,TRIGGER,DELETE on tables from {self.username} CASCADE CONSTRAINTS;
ALTER DEFAULT PRIVILEGES for role {self.username} in schema schema_016 revoke ALL on functions from {self.username} CASCADE CONSTRAINTS ;
ALTER DEFAULT PRIVILEGES for role {self.username} revoke EXECUTE on functions from {self.username} CASCADE CONSTRAINTS;
ALTER DEFAULT PRIVILEGES for role {self.username} in schema schema_016 revoke ALL on TYPES from {self.username} CASCADE CONSTRAINTS ;
ALTER DEFAULT PRIVILEGES for role {self.username} revoke USAGE on TYPES from {self.username} CASCADE CONSTRAINTS;
''')
excute_cmd = f'''
source {self.DB_ENV_PATH};
gsql -d {self.userNode.db_name} -p {self.userNode.db_port} -U {self.username} -W {self.password} -c "{sql_cmd}"
'''
logger.info(excute_cmd)
msg = self.userNode.sh(excute_cmd).result()
logger.info(msg)
self.assertNotIn(self.Constant.SQL_WRONG_MSG[1], msg)
self.assertIn(self.Constant.ALTER_DEFAULT_PRIVILEGES, msg)
logger.info('--------1.2.清理环境--------')
sql_cmd = ('''
drop table if exists test_alter_default_016 cascade;
drop type if exists type016 cascade;
drop function if exists test_default_016(int) cascade;
drop schema if exists schema_016 cascade;
''')
excute_cmd = f'''
source {self.DB_ENV_PATH};
gsql -d {self.userNode.db_name} -p {self.userNode.db_port} -U {self.username} -W {self.password} -c "{sql_cmd}"
'''
logger.info(excute_cmd)
msg = self.userNode.sh(excute_cmd).result()
logger.info(msg)
self.assertNotIn(self.Constant.SQL_WRONG_MSG[1], msg)
logger.info('--------2.sysadmin用户alter自己的权限--------')
logger.info('--------2.1.管理员用户连接创建sysadmin用户 default016_01 --------')
sql_cmd = commonsh.execut_db_sql(f'''
drop owned by default016_01 cascade;
drop user if exists default016_01;
create user default016_01 password '{<PASSWORD>}';
grant all privileges to default016_01;
''')
logger.info(sql_cmd)
self.assertIn(self.Constant.CREATE_ROLE_SUCCESS_MSG, sql_cmd)
logger.info('--------2.2.default016_01用户连接 执行alter测试--------')
sql_cmd = (f'''
drop schema if exists schema_016 cascade;
create schema schema_016;
ALTER DEFAULT PRIVILEGES for role default016_01 in schema schema_016 GRANT ALL PRIVILEGES on tables to default016_01 WITH GRANT OPTION ;
ALTER DEFAULT PRIVILEGES for role default016_01 GRANT select,insert,update,truncate,references,TRIGGER,DELETE on tables to default016_01 WITH GRANT OPTION ;
ALTER DEFAULT PRIVILEGES for role default016_01 in schema schema_016 GRANT ALL PRIVILEGES on functions to default016_01 WITH GRANT OPTION ;
ALTER DEFAULT PRIVILEGES for role default016_01 GRANT EXECUTE on functions to default016_01 WITH GRANT OPTION ;
ALTER DEFAULT PRIVILEGES for role default016_01 in schema schema_016 GRANT ALL PRIVILEGES on TYPES to default016_01 WITH GRANT OPTION ;
ALTER DEFAULT PRIVILEGES for role default016_01 GRANT USAGE on TYPES to default016_01 WITH GRANT OPTION ;
drop schema if exists schema_016 cascade;
create schema schema_016;
drop table if exists test_alter_default_016 cascade;
create table test_alter_default_016(id int unique);
select * from test_alter_default_016;
drop function if exists test_default_016(int) cascade;
create or replace function test_default_016(a int) return int
as
b int:= a;
begin
for i in 1..a loop
b:=b+1;
end loop;
return b;
end;
select test_default_016(16);
drop type if exists type016;
CREATE TYPE type016 AS (c_int int,c_text text);
drop table if exists test_alter_default_016 cascade;
create table test_alter_default_016(id type016);
select * from test_alter_default_016;
ALTER DEFAULT PRIVILEGES for role default016_01 in schema schema_016 revoke ALL on tables from default016_01 CASCADE CONSTRAINTS ;
ALTER DEFAULT PRIVILEGES for role default016_01 revoke select,insert,update,truncate,references,TRIGGER,DELETE on tables from default016_01 CASCADE CONSTRAINTS;
ALTER DEFAULT PRIVILEGES for role default016_01 in schema schema_016 revoke ALL on functions from default016_01 CASCADE CONSTRAINTS ;
ALTER DEFAULT PRIVILEGES for role default016_01 revoke EXECUTE on functions from default016_01 CASCADE CONSTRAINTS;
ALTER DEFAULT PRIVILEGES for role default016_01 in schema schema_016 revoke ALL on TYPES from default016_01 CASCADE CONSTRAINTS ;
ALTER DEFAULT PRIVILEGES for role default016_01 revoke USAGE on TYPES from default016_01 CASCADE CONSTRAINTS;
''')
excute_cmd = f'''
source {self.DB_ENV_PATH};
gsql -d {self.userNode.db_name} -p {self.userNode.db_port} -U default016_01 -W {macro.COMMON_PASSWD} -c "{sql_cmd}"
'''
logger.info(excute_cmd)
msg = self.userNode.sh(excute_cmd).result()
logger.info(msg)
self.assertNotIn(self.Constant.SQL_WRONG_MSG[1], msg)
self.assertIn(self.Constant.ALTER_DEFAULT_PRIVILEGES, msg)
logger.info('--------2.3.清理--------')
sql_cmd = commonsh.execut_db_sql(f'''
drop owned by default016_01 cascade;
drop user if exists default016_01;
''')
logger.info(sql_cmd)
self.assertNotIn(self.Constant.SQL_WRONG_MSG[1], sql_cmd)
def tearDown(self):
logger.info('----------------------------------清理环境----------------------------------')
sql_cmd = commonsh.execut_db_sql('''
drop owned by default016_01 cascade;
drop user if exists default016_01;
''')
logger.info(sql_cmd)
logger.info('--------Opengauss_Function_Alter_Default_Privileges_Case0016执行结束--------') | 1.757813 | 2 |
Apps/phdigitalshadows/dsapi/service/infrastructure_service.py | ryanbsaunders/phantom-apps | 74 | 1667 | # File: infrastructure_service.py
#
# Licensed under Apache 2.0 (https://www.apache.org/licenses/LICENSE-2.0.txt)
#
from .ds_base_service import DSBaseService
from .ds_find_service import DSFindService
from ..model.infrastructure import Infrastructure
class InfrastructureService(DSFindService):
def __init__(self, ds_api_key, ds_api_secret_key, proxy=None):
super(InfrastructureService, self).__init__(ds_api_key, ds_api_secret_key, proxy=proxy)
def find_all(self, view=None):
"""
Streams all infrastructure objects retrieved from the Digital Shadows API.
:param view: InfrastructureView
:return: Infrastructure generator
"""
if view is None:
view = InfrastructureService.infrastructure_view()
return self._find_all('/api/ip-ports',
view,
Infrastructure)
def find_all_pages(self, view=None):
"""
Streams all infrastructure objects retrieved from the Digital Shadows API in page groups.
:param view: InfrastructureView
:return: Infrastructure generator
"""
if view is None:
view = Infrastructure.infrastructure_view()
return self._find_all_pages('/api/ip-ports',
view,
Infrastructure)
@staticmethod
@DSBaseService.paginated(size=500)
@DSBaseService.sorted('published')
def infrastructure_view(detectedopen='ALL', domainname=None, detectedclosed=False, markedclosed=False,
severities=None, alerted=False, reverse=None):
view = {
'filter': {
'detectedOpen': detectedopen,
'severities': [] if severities is None else severities,
'alerted': 'true' if alerted else 'false',
'markedClosed': 'true' if markedclosed else 'false',
'detectedClosed': 'true' if detectedclosed else 'false'
}
}
if domainname is not None:
view['filter']['domainName'] = domainname
if reverse is not None:
view['sort'] = {
'direction': 'ASCENDING' if reverse else 'DESCENDING',
'property': 'published'
}
return view
| 2.03125 | 2 |
src/find_genes_by_location/find_genes_by_location.py | NCBI-Codeathons/Identify-antiphage-defense-systems-in-the-bacterial-pangenome | 3 | 1668 | import argparse
from collections import defaultdict
import csv
from dataclasses import dataclass, field
from enum import Enum, unique, auto
import os
import sys
import tempfile
import yaml
import zipfile
import gffutils
from google.protobuf import json_format
from ncbi.datasets.v1alpha1 import dataset_catalog_pb2
from ncbi.datasets.v1alpha1.reports import assembly_pb2
from ncbi.datasets.reports.report_reader import DatasetsReportReader
def retrieve_assembly_report(zip_in, catalog, assm_acc: str) -> assembly_pb2.AssemblyDataReport:
report_files = get_catalog_files_for_assembly(catalog, dataset_catalog_pb2.File.FileType.DATA_REPORT, assm_acc)
for path in report_files:
yaml = zip_in.read(path)
rpt_rdr = DatasetsReportReader()
return rpt_rdr.assembly_report(yaml)
def retrieve_data_catalog(zip_in) -> dataset_catalog_pb2.Catalog:
catalog_json = zip_in.read('ncbi_dataset/data/dataset_catalog.json')
return json_format.Parse(catalog_json, dataset_catalog_pb2.Catalog())
def get_catalog_files_for_assembly(catalog: dataset_catalog_pb2.Catalog, desired_filetype: dataset_catalog_pb2.File.FileType, assm_acc: str):
report_files = get_catalog_files(catalog, desired_filetype, assm_acc)
filepaths = []
for assm_acc, paths in report_files.items():
filepaths.extend(paths)
return filepaths
def get_catalog_files(catalog: dataset_catalog_pb2.Catalog, desired_filetype: dataset_catalog_pb2.File.FileType, assm_acc: str = None):
files = defaultdict(list)
for assm in catalog.assemblies:
acc = assm.accession
if assm_acc and assm_acc != acc:
continue
for f in assm.files:
filepath = os.path.join('ncbi_dataset', 'data', f.file_path)
if f.file_type == desired_filetype:
files[acc].append(filepath)
return files
def get_zip_file_for_acc(acc, path):
fname = os.path.join(path, f'{acc}.zip')
if os.path.isfile(fname):
return fname
return None
@dataclass
class Gene:
id: str
feat_type: str
name: str
chrom: str
strand: str
range_start: int
range_stop: int
protein_accession: str = ""
def get_fields(self):
return [self.feat_type, self.name, self.range_start, self.range_stop, self.protein_accession]
def name_val(self):
return self.protein_accession if self.protein_accession else self.name
def find_genes_by_loc(gff3_db, csvout, assm_acc, seq_acc, start, stop, extra_fields):
found_genes = []
feat_types = ('gene', 'pseudogene')
for gene in gff3_db.region(seqid=seq_acc, start=start, end=stop, featuretype=feat_types, completely_within=False):
gene_name = gene.attributes.get('Name', None)[0]
prot_acc = ""
if gene.attributes['gene_biotype'][0] == 'protein_coding':
cds = list(gff3_db.children(gene, featuretype='CDS'))
prot_acc = cds[0].attributes.get('protein_id', None)[0]
geneobj = Gene(
gene.id,
gene.featuretype,
gene_name,
gene.chrom,
gene.strand,
gene.start,
gene.stop,
prot_acc,
)
csvout.writerow([assm_acc, seq_acc, start, stop, *extra_fields, *geneobj.get_fields()])
found_genes.append(geneobj)
return found_genes
class FindGenesByLoc:
default_packages_dir = os.path.join('var', 'data', 'packages')
def __init__(self):
parser = argparse.ArgumentParser()
parser.add_argument('--packages-dir', type=str, default=self.default_packages_dir,
help=f'root of input data directory [{self.default_packages_dir}]')
parser.add_argument('--locs', type=str, help='file containing genomic locations')
self.args = parser.parse_args()
self.writer = csv.writer(sys.stdout, dialect='excel-tab')
def read_data(self):
for row in csv.reader(iter(sys.stdin.readline, ''), dialect='excel-tab'):
yield row
def run(self):
for assm_acc, seq_acc, start, stop, *extra in self.read_data():
self.find_all_for_location(assm_acc, seq_acc, start, stop, extra)
def process_loc_for_gff(self, zin, gff_fname, assm_acc, seq_acc, start, stop, extra_fields):
with tempfile.NamedTemporaryFile() as tmpfile:
tmpfile.write(zin.read(gff_fname))
db = gffutils.create_db(
tmpfile.name,
dbfn=':memory:',
force=True,
keep_order=True,
merge_strategy='merge',
sort_attribute_values=True
)
find_genes_by_loc(db, self.writer, assm_acc, seq_acc, start, stop, extra_fields)
def find_all_for_location(self, assm_acc, seq_acc, start, stop, extra_fields):
zip_file = get_zip_file_for_acc(assm_acc, self.args.packages_dir)
try:
with zipfile.ZipFile(zip_file, 'r') as zin:
catalog = retrieve_data_catalog(zin)
gff_files = get_catalog_files(catalog, dataset_catalog_pb2.File.FileType.GFF3)
for assm_acc, gff_files in gff_files.items():
report = retrieve_assembly_report(zin, catalog, assm_acc)
for gff_fname in gff_files:
self.process_loc_for_gff(zin, gff_fname, assm_acc, seq_acc, start, stop, extra_fields)
except zipfile.BadZipFile:
print(f'{zip_file} is not a zip file')
if __name__ == '__main__':
FindGenesByLoc().run()
| 2.265625 | 2 |
web/backend/backend_django/apps/capacity/models.py | tOverney/ADA-Project | 0 | 1669 | <filename>web/backend/backend_django/apps/capacity/models.py
from django.db import models
from multigtfs.models import (
Block, Fare, FareRule, Feed, Frequency, Route, Service, ServiceDate, Shape,
ShapePoint, Stop, StopTime, Trip, Agency)
class Path(models.Model):
trip = models.ForeignKey(Trip)
stop = models.ForeignKey(Stop)
path = models.CharField(max_length=1024, null=True, blank=True)
class Meta:
unique_together = ('trip', 'stop',)
class Capacity(models.Model):
trip = models.ForeignKey(Trip)
stop_time = models.ForeignKey(StopTime)
service_date = models.ForeignKey(ServiceDate)
capacity1st = models.IntegerField('capacity1st', null=True, blank=True)
capacity2nd = models.IntegerField('capacity2nd', null=True, blank=True)
class Meta:
unique_together = ('trip', 'stop_time', 'service_date') | 2.203125 | 2 |
apps/interface/settings/config.py | rainydaygit/testtcloudserver | 349 | 1670 | try:
from public_config import *
except ImportError:
pass
PORT = 9028
SERVICE_NAME = 'interface'
| 1.226563 | 1 |
api/api/pokemon/views.py | farnswj1/PokemonAPI | 0 | 1671 | <filename>api/api/pokemon/views.py
from django.utils.decorators import method_decorator
from django.views.decorators.cache import cache_page
from rest_framework.generics import (
ListAPIView,
RetrieveAPIView,
CreateAPIView,
UpdateAPIView,
DestroyAPIView
)
from .models import Pokemon
from .serializers import PokemonSerializer
from .filters import PokemonFilterSet
# Create your views here.
class PokemonListAPIView(ListAPIView):
queryset = Pokemon.objects.all()
serializer_class = PokemonSerializer
filterset_class = PokemonFilterSet
@method_decorator(cache_page(7200))
def get(self, request, *args, **kwargs):
return super().get(request, *args, **kwargs)
class PokemonDetailAPIView(RetrieveAPIView):
queryset = Pokemon.objects.all()
serializer_class = PokemonSerializer
@method_decorator(cache_page(7200))
def get(self, request, *args, **kwargs):
return super().get(request, *args, **kwargs)
class PokemonCreateAPIView(CreateAPIView):
queryset = Pokemon.objects.all()
serializer_class = PokemonSerializer
class PokemonUpdateAPIView(UpdateAPIView):
queryset = Pokemon.objects.all()
serializer_class = PokemonSerializer
class PokemonDeleteAPIView(DestroyAPIView):
queryset = Pokemon.objects.all()
serializer_class = PokemonSerializer
| 2.1875 | 2 |
plugins/action/normalize_gitlab_cfg.py | sma-de/ansible-collections-gitlab | 0 | 1672 | <gh_stars>0
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.errors import AnsibleOptionsError
from ansible.module_utils.six import iteritems, string_types
from ansible_collections.smabot.base.plugins.module_utils.plugins.config_normalizing.base import ConfigNormalizerBaseMerger, NormalizerBase, NormalizerNamed, DefaultSetterConstant, DefaultSetterOtherKey
from ansible_collections.smabot.base.plugins.module_utils.utils.dicting import setdefault_none, SUBDICT_METAKEY_ANY, get_subdict
from ansible_collections.smabot.base.plugins.module_utils.utils.utils import ansible_assert
def user_role_to_cfg(username, urole, cfg):
tmp = ['roles', 'subroles'] \
+ urole['path'].replace('/', '/subroles/').split('/')
tmp = get_subdict(cfg, tmp, default_empty=True)
setdefault_none(setdefault_none(tmp, 'members', {}),
urole['level'], []
).append(username)
class ConfigRootNormalizer(NormalizerBase):
def __init__(self, pluginref, *args, **kwargs):
self._add_defaultsetter(kwargs,
'random_pwlen', DefaultSetterConstant(80)
)
subnorms = kwargs.setdefault('sub_normalizers', [])
subnorms += [
ServersNormalizer(pluginref),
]
super(ConfigRootNormalizer, self).__init__(pluginref, *args, **kwargs)
class ServersNormalizer(NormalizerBase):
def __init__(self, pluginref, *args, **kwargs):
subnorms = kwargs.setdefault('sub_normalizers', [])
subnorms += [
ServerInstancesNormalizer(pluginref),
]
super(ServersNormalizer, self).__init__(
pluginref, *args, **kwargs
)
@property
def config_path(self):
return ['servers']
class ServerInstancesNormalizer(NormalizerBase):
def __init__(self, pluginref, *args, **kwargs):
subnorms = kwargs.setdefault('sub_normalizers', [])
subnorms += [
SrvInstNormalizer(pluginref),
]
super(ServerInstancesNormalizer, self).__init__(
pluginref, *args, **kwargs
)
@property
def config_path(self):
return ['instances']
class SrvInstNormalizer(NormalizerBase):
def __init__(self, pluginref, *args, **kwargs):
subnorms = kwargs.setdefault('sub_normalizers', [])
subnorms += [
ServerUsersNormalizer(pluginref),
SrvRolesNormalizer(pluginref),
]
super(SrvInstNormalizer, self).__init__(
pluginref, *args, **kwargs
)
@property
def config_path(self):
return [SUBDICT_METAKEY_ANY]
class SrvRolesBaseNormalizer(NormalizerBase):
def __init__(self, pluginref, *args, **kwargs):
subnorms = kwargs.setdefault('sub_normalizers', [])
subnorms += [
SrvRolesMembersNormalizer(pluginref),
## note: for recursive structures, the sub normalizers can only
## be instantiated if the corresponding key actually exists
## to avoid indefinite recursions of death
(SrvSubRolesNormalizer, True),
]
super(SrvRolesBaseNormalizer, self).__init__(
pluginref, *args, **kwargs
)
def _handle_specifics_presub(self, cfg, my_subcfg, cfgpath_abs):
# do config subkey
c = setdefault_none(my_subcfg, 'config', defval={})
setdefault_none(c, 'name', defval=cfgpath_abs[-1])
# build role hierarchy path and parent
if cfgpath_abs[-1] == 'roles':
## top level
parent = []
else:
## subrole
parent = get_subdict(cfg, cfgpath_abs[:-2])
parent = parent['role_abspath']
my_subcfg['role_abspath'] = parent + [c['name']]
c['parent'] = '/'.join(parent)
return my_subcfg
class SrvRolesNormalizer(SrvRolesBaseNormalizer):
def __init__(self, pluginref, *args, **kwargs):
super(SrvRolesNormalizer, self).__init__(
pluginref, *args, **kwargs
)
@property
def config_path(self):
return ['roles']
class SrvSubRolesNormalizer(NormalizerBase):
NORMER_CONFIG_PATH = ['subroles']
def __init__(self, pluginref, *args, **kwargs):
subnorms = kwargs.setdefault('sub_normalizers', [])
subnorms += [
SrvRoleInstNormalizer(pluginref),
]
super(SrvSubRolesNormalizer, self).__init__(
pluginref, *args, **kwargs
)
@property
def config_path(self):
return type(self).NORMER_CONFIG_PATH
class SrvRoleInstNormalizer(SrvRolesBaseNormalizer):
def __init__(self, pluginref, *args, **kwargs):
super(SrvRoleInstNormalizer, self).__init__(
pluginref, *args, **kwargs
)
@property
def config_path(self):
return [SUBDICT_METAKEY_ANY]
class SrvRolesMembersNormalizer(NormalizerBase):
def __init__(self, pluginref, *args, **kwargs):
super(SrvRolesMembersNormalizer, self).__init__(
pluginref, *args, **kwargs
)
@property
def config_path(self):
return ['members']
def _handle_specifics_presub(self, cfg, my_subcfg, cfgpath_abs):
if not my_subcfg:
return my_subcfg
## if it exists, members should be a dict where the keys are
## valid gitlab access levels (like guest or developer) and
## the values should be a list of users
exportcfg = []
my_group = self.get_parentcfg(cfg, cfgpath_abs)
my_group = '/'.join(my_group['role_abspath'])
for (k,ul) in iteritems(my_subcfg):
for u in ul:
exportcfg.append({
'gitlab_group': my_group, 'gitlab_user': u, 'access_level': k
})
my_subcfg['_exportcfg'] = exportcfg
return my_subcfg
class ServerUsersNormalizer(NormalizerBase):
def __init__(self, pluginref, *args, **kwargs):
subnorms = kwargs.setdefault('sub_normalizers', [])
subnorms += [
ServerBotsNormalizer(pluginref),
ServerHumansNormalizer(pluginref),
]
super(ServerUsersNormalizer, self).__init__(
pluginref, *args, **kwargs
)
@property
def config_path(self):
return ['users']
class ServerUsrBaseNormalizer(NormalizerBase):
def __init__(self, pluginref, *args, **kwargs):
subnorms = kwargs.setdefault('sub_normalizers', [])
subnorms += [
SrvUsrNormalizer(pluginref),
]
super(ServerUsrBaseNormalizer, self).__init__(
pluginref, *args, **kwargs
)
class ServerBotsNormalizer(ServerUsrBaseNormalizer):
def __init__(self, pluginref, *args, **kwargs):
super(ServerBotsNormalizer, self).__init__(
pluginref, *args, **kwargs
)
@property
def config_path(self):
return ['bots']
class ServerHumansNormalizer(ServerUsrBaseNormalizer):
def __init__(self, pluginref, *args, **kwargs):
super(ServerHumansNormalizer, self).__init__(
pluginref, *args, **kwargs
)
@property
def config_path(self):
return ['humans']
class SrvUsrNormalizer(NormalizerBase):
def __init__(self, pluginref, *args, **kwargs):
subnorms = kwargs.setdefault('sub_normalizers', [])
subnorms += [
SrvUsrCfgNormalizer(pluginref),
]
self._add_defaultsetter(kwargs,
'pw_access', DefaultSetterConstant(True)
)
super(SrvUsrNormalizer, self).__init__(
pluginref, *args, **kwargs
)
@property
def config_path(self):
return [SUBDICT_METAKEY_ANY]
def _handle_specifics_postsub(self, cfg, my_subcfg, cfgpath_abs):
usr_roles = my_subcfg.get('roles', None)
if usr_roles:
for ur in usr_roles:
user_role_to_cfg(my_subcfg['config']['username'], ur,
self.get_parentcfg(cfg, cfgpath_abs, level=3)
)
return my_subcfg
class SrvUsrCfgNormalizer(NormalizerNamed):
def __init__(self, pluginref, *args, **kwargs):
super(SrvUsrCfgNormalizer, self).__init__(
pluginref, *args, mapkey_lvl=-2, **kwargs
)
self.default_setters['name'] = DefaultSetterOtherKey('username')
@property
def config_path(self):
return ['config']
@property
def name_key(self):
return 'username'
def _handle_specifics_presub(self, cfg, my_subcfg, cfgpath_abs):
mail = my_subcfg.get('email', None)
if not mail:
# if not mail address is explicitly given, check if mail
# template is specified for server, if so use this to
# create address with username as param
tmp = self.get_parentcfg(
cfg, cfgpath_abs, level=3
).get('mail_template', None)
if tmp:
my_subcfg['email'] = tmp.format(
my_subcfg['username'].replace('_', '-')
)
return my_subcfg
class ActionModule(ConfigNormalizerBaseMerger):
def __init__(self, *args, **kwargs):
super(ActionModule, self).__init__(ConfigRootNormalizer(self),
*args, default_merge_vars=['gitlab_cfg_defaults'],
extra_merge_vars_ans=['extra_gitlab_config_maps'],
**kwargs
)
self._supports_check_mode = False
self._supports_async = False
@property
def my_ansvar(self):
return 'gitlab_cfg'
| 1.8125 | 2 |
microservices/validate/tools/dynamodb.py | clodonil/pipeline_aws_custom | 0 | 1673 | <filename>microservices/validate/tools/dynamodb.py
"""
Tools de integração com o Dynamodb
"""
import boto3
import botocore
import logging
import datetime
import json
import copy
import time
import os
class DyConnect:
def __init__(self, table, region):
self.table = table
self.region = region
def connect(self):
try:
dydb = boto3.resource('dynamodb', region_name=self.region)
conn = dydb.Table(self.table)
return conn
except:
print("Problema na conexao com DynamoDB")
logging.CRITICAL("Problema na conexao com DynamoDB")
return False
def dynamodb_save(self, dados):
conn = self.connect()
if conn:
retorno = conn.put_item(Item=dados)
def dynamodb_query(self, query):
conn = self.connect()
return conn.get_item(Key=query)
def get_dy_template(template_name):
newtemplate = DyConnect(dynamodb['template'], aws_region)
query = {'name': template_name}
stages = newtemplate.dynamodb_query(query)
if 'Item' in stages:
if 'details' in stages['Item']:
return stages['Item']['details']
return False
def get_sharedlibrary_release():
newtemplate = DyConnect(dynamodb['template'], aws_region)
query = {'name': 'sharedlibrary'}
version = newtemplate.dynamodb_query(query)
if 'Item' in version:
return version['Item']['release']
return False
| 2.25 | 2 |
scrapy_ddiy/spiders/GlidedSky/glided_sky_001.py | LZC6244/scrapy_ddiy | 9 | 1674 | # -*- coding: utf-8 -*-
from scrapy import Request
from scrapy_ddiy.utils.spiders.ddiy_base import DdiyBaseSpider
class GlidedSky001Spider(DdiyBaseSpider):
name = 'glided_sky_001'
description = 'GlidedSky 爬虫-基础1'
start_url = 'http://www.glidedsky.com/level/web/crawler-basic-1'
custom_settings = {
'COOKIES_ENABLED': True,
'DOWNLOADER_MIDDLEWARES': {
'scrapy_ddiy.spiders.GlidedSky.glided_sky_downloadmiddleware.GlidedSkyMiddleware': 589,
},
}
def start_requests(self):
yield Request(url=self.start_url, callback=self.parse)
def parse(self, response, **kwargs):
all_number = [int(i) for i in
response.xpath('//div[@class="card-body"]//div[@class="col-md-1"]/text()').getall()]
self.logger.info(f'Sum or web number is {sum(all_number)}')
| 2.703125 | 3 |
datasets/celeba/celeba_dataset.py | google/joint_vae | 35 | 1675 | #
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Provides data for the mnist with attributes dataset.
Provide data loading utilities for an augmented version of the
MNIST dataset which contains the following attributes:
1. Location (digits are translated on a canvas and placed around
one of four locations/regions in the canvas). Each location
is a gaussian placed at four quadrants of the canvas.
2. Scale (We vary scale from 0.4 to 1.0), with two gaussians
placed at 0.5 +- 0.1 and 0.9 +- 0.1 repsectively.
3. Orientation: we vary orientation from -90 to +90 degrees,
sampling actual values from gaussians at +30 +- 10 and
-30 +-10. On a third of the occasions we dont orient the
digit at all which means a rotation of 0 degrees.
The original data after transformations is binarized as per the
procedure described in the following paper:
Salakhutdinov, Ruslan, and <NAME>. 2008. ``On the Quantitative Analysis of
Deep Belief Networks.'' In Proceedings of the 25th International Conference on
Machine Learning, 872-79.
Author: vrama@
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tensorflow as tf
from tensorflow.contrib.slim.python.slim.data import dataset
from tensorflow.contrib.slim.python.slim.data import tfexample_decoder
from datasets.celeba.image_decoder import ImageDecodeProcess
# Only provides option to load the binarized version of the dataset.
_FILE_PATTERN = '%s-*'
_SPLIT_TYPE = 'iid'
_DATASET_DIR = '/srv/share/datasets/celeba_for_tf_ig'
_SPLITS_TO_SIZES = {'train': 162770, 'val': 19867, 'test': 19962}
_ITEMS_TO_DESCRIPTIONS = {
'image': 'A [218 x 178 x 3] RGB image.',
'labels': 'Attributes corresponding to the image.',
}
_NUM_CLASSES_PER_ATTRIBUTE = tuple([2]*18)
def get_split(split_name='train',
split_type="iid",
dataset_dir=None,
image_length=64,
num_classes_per_attribute=None):
"""Gets a dataset tuple with instructions for reading 2D shapes data.
Args:
split_name: A train/test split name.
split_type: str, type of split being loaded "iid" or "comp"
dataset_dir: The base directory of the dataset sources.
num_classes_per_attribute: The number of labels for the classfication
problem corresponding to each attribute. For example, if the first
attribute is "shape" and there are three possible shapes, then
then provide a value 3 in the first index, and so on.
Returns:
A `Dataset` namedtuple.
metadata: A dictionary with some metadata about the dataset we just
constructed.
Raises:
ValueError: if `split_name` is not a valid train/test split.
"""
if split_name not in _SPLITS_TO_SIZES:
raise ValueError('split name %s was not recognized.' % split_name)
if split_type is not "iid":
raise ValueError("Only IID split available for CelebA.")
if num_classes_per_attribute is None:
num_classes_per_attribute = _NUM_CLASSES_PER_ATTRIBUTE
if dataset_dir is None or dataset_dir == '':
dataset_dir = _DATASET_DIR
# Load attribute label map file.
label_map_json = os.path.join(dataset_dir,
'attribute_label_map.json')
file_pattern = os.path.join(dataset_dir, _FILE_PATTERN % split_name)
tf.logging.info('Loading from %s file.' % (file_pattern))
keys_to_features = {
'image/encoded': tf.FixedLenFeature((), tf.string, default_value=''),
'image/format': tf.FixedLenFeature((), tf.string, default_value='raw'),
'image/labels': tf.FixedLenFeature([len(num_classes_per_attribute)], tf.int64),
}
# TODO(vrama): See
# https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/slim/python/slim/data/tfexample_decoder.py#L270
# For where changes would need to be made to preprocess the images which
# get loaded.
items_to_handlers = {
'image': ImageDecodeProcess(shape=[218, 178, 3], image_length=64),
'labels': tfexample_decoder.Tensor('image/labels'),
}
decoder = tfexample_decoder.TFExampleDecoder(keys_to_features,
items_to_handlers)
metadata = {
'num_classes_per_attribute': num_classes_per_attribute,
'split_type': _SPLIT_TYPE,
'label_map_json': label_map_json,
}
return dataset.Dataset(
data_sources=file_pattern,
reader=tf.TFRecordReader,
decoder=decoder,
num_samples=_SPLITS_TO_SIZES[split_name],
items_to_descriptions=_ITEMS_TO_DESCRIPTIONS), metadata
| 2.328125 | 2 |
jorldy/manager/log_manager.py | kan-s0/JORLDY | 0 | 1676 | <filename>jorldy/manager/log_manager.py
import os
import datetime, time
import imageio
from pygifsicle import optimize
from torch.utils.tensorboard import SummaryWriter
class LogManager:
def __init__(self, env, id, experiment=None):
self.id = id
now = datetime.datetime.now().strftime("%Y%m%d%H%M%S%f")
self.path = (
f"./logs/{experiment}/{env}/{id}/{now}/"
if experiment
else f"./logs/{env}/{id}/{now}/"
)
self.writer = SummaryWriter(self.path)
self.stamp = time.time()
def write(self, scalar_dict, frames, step):
for key, value in scalar_dict.items():
self.writer.add_scalar(f"{self.id}/" + key, value, step)
self.writer.add_scalar("all/" + key, value, step)
if "score" in key:
time_delta = int(time.time() - self.stamp)
self.writer.add_scalar(f"{self.id}/{key}_per_time", value, time_delta)
self.writer.add_scalar(f"all/{key}_per_time", value, time_delta)
if len(frames) > 0:
score = scalar_dict["score"]
write_path = os.path.join(self.path, f"{step:010d}_{score}.gif")
imageio.mimwrite(write_path, frames, fps=60)
optimize(write_path)
print(f"...Record episode to {write_path}...")
| 2.265625 | 2 |
lib/SeparateDriver/ASRDriverParts/UNIInterface.py | multi-service-fabric/element-manager | 0 | 1677 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright(c) 2019 Nippon Telegraph and Telephone Corporation
# Filename: ASRDriverParts/UNIInterface.py
'''
Parts Module for ASR driver UNI interface configuraton
'''
import GlobalModule
from EmCommonLog import decorater_log
from ASRDriverParts.InterfaceBase import InterfaceBase
class UNIInterface(InterfaceBase):
'''
Parts class for ASR driver UNI interface configuraton
'''
@decorater_log
def __init__(self,
vrf_name=None,
if_name=None,
vlan_id=None,
ip_address=None,
subnet_mask=None,
vip_ip_address=None,
hsrp_id=None,
mtu=None,
is_active=True):
'''
Costructor
'''
super(UNIInterface, self).__init__(vrf_name=vrf_name,
if_name=if_name)
self.vlan_id = vlan_id
self.ip_address = ip_address
self.subnet_mask = subnet_mask
self.vip_ip_address = vip_ip_address
self.hsrp_id = hsrp_id
self.mtu = mtu
self.is_active = is_active
@decorater_log
def output_add_command(self):
'''
Command line to add configuration is output.
'''
parame = self._get_param()
self._interface_common_start()
self._append_add_command("standby version 2")
comm_txt = "standby %(hsrp_id)s ip %(vip_ip_address)s"
self._append_add_command(comm_txt, parame)
if self.is_active:
comm_txt = "standby %(hsrp_id)s priority 105"
self._append_add_command(comm_txt, parame)
comm_txt = "standby %(hsrp_id)s preempt"
self._append_add_command(comm_txt, parame)
comm_txt = "ip mtu %(mtu)s"
self._append_add_command(comm_txt, parame)
self._interface_common_end()
GlobalModule.EM_LOGGER.debug(
"uni if command = %s" % (self._tmp_add_command,))
return self._tmp_add_command
@decorater_log
def _get_param(self):
'''
Parameter is acquired from attribute.(dict type)
'''
tmp_param = super(UNIInterface, self)._get_param()
tmp_param.update(
{
"vlan_id": self.vlan_id,
"ip_address": self.ip_address,
"subnet_mask": self.subnet_mask,
"vip_ip_address": self.vip_ip_address,
"hsrp_id": self.hsrp_id,
"mtu": self.mtu,
}
)
return tmp_param
| 1.992188 | 2 |
tools/accuracy_checker/accuracy_checker/annotation_converters/mnist.py | AnthonyQuantum/open_model_zoo | 4 | 1678 | """
Copyright (c) 2019 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
from ..config import PathField, BoolField
from ..representation import ClassificationAnnotation
from ..utils import read_csv, check_file_existence, read_json
from .format_converter import BaseFormatConverter, ConverterReturn
try:
from PIL import Image
except ImportError:
Image = None
class MNISTCSVFormatConverter(BaseFormatConverter):
"""
MNIST CSV dataset converter. All annotation converters should be derived from BaseFormatConverter class.
"""
# register name for this converter
# this name will be used for converter class look up
__provider__ = 'mnist_csv'
annotation_types = (ClassificationAnnotation, )
@classmethod
def parameters(cls):
configuration_parameters = super().parameters()
configuration_parameters.update({
'annotation_file': PathField(description="Path to csv file which contain dataset."),
'convert_images': BoolField(
optional=True,
default=False,
description="Allows to convert images from pickle file to user specified directory."
),
'converted_images_dir': PathField(
optional=True, is_directory=True, check_exists=False, description="Path to converted images location."
),
'dataset_meta_file': PathField(
description='path to json file with dataset meta (e.g. label_map, color_encoding)', optional=True
)
})
return configuration_parameters
def configure(self):
"""
This method is responsible for obtaining the necessary parameters
for converting from the command line or config.
"""
self.test_csv_file = self.get_value_from_config('annotation_file')
self.converted_images_dir = self.get_value_from_config('converted_images_dir')
self.convert_images = self.get_value_from_config('convert_images')
if self.convert_images and not self.converted_images_dir:
self.converted_images_dir = self.test_csv_file.parent / 'converted_images'
if not self.converted_images_dir.exists():
self.converted_images_dir.mkdir(parents=True)
if self.convert_images and Image is None:
raise ValueError(
"conversion mnist images requires Pillow installation, please install it before usage"
)
self.dataset_meta = self.get_value_from_config('dataset_meta_file')
def convert(self, check_content=False, progress_callback=None, progress_interval=100, **kwargs):
"""
This method is executed automatically when convert.py is started.
All arguments are automatically got from command line arguments or config file in method configure
Returns:
annotations: list of annotation representation objects.
meta: dictionary with additional dataset level metadata.
"""
annotations = []
check_images = check_content and not self.convert_images
meta = self.generate_meta()
labels_to_id = meta['label_map']
content_errors = None
if check_content:
self.converted_images_dir = self.converted_images_dir or self.test_csv_file.parent / 'converted_images'
if self.converted_images_dir and check_content:
if not self.converted_images_dir.exists():
content_errors = ['{}: does not exist'.format(self.converted_images_dir)]
check_images = False
# read original dataset annotation
annotation_table = read_csv(self.test_csv_file)
num_iterations = len(annotation_table)
for index, annotation in enumerate(annotation_table):
identifier = '{}.png'.format(index)
label = labels_to_id.get(annotation['label'], int(annotation['label']))
if self.convert_images:
image = Image.fromarray(self.convert_image(annotation))
image = image.convert("L")
image.save(str(self.converted_images_dir / identifier))
annotations.append(ClassificationAnnotation(identifier, label))
if check_images:
if not check_file_existence(self.converted_images_dir / identifier):
# add error to errors list if file not found
content_errors.append('{}: does not exist'.format(self.converted_images_dir / identifier))
if progress_callback is not None and index % progress_interval == 0:
progress_callback(index / num_iterations * 100)
return ConverterReturn(annotations, meta, content_errors)
@staticmethod
def convert_image(features):
image = np.zeros((28, 28))
column_template = '{}x{}'
for x in range(28):
for y in range(28):
pixel = int(features[column_template.format(x+1, y+1)])
image[x, y] = pixel
return image
def generate_meta(self):
if not self.dataset_meta:
return {'label_map': {str(i): i for i in range(10)}}
dataset_meta = read_json(self.dataset_meta)
label_map = dataset_meta.get('label_map')
if 'labels' in dataset_meta:
label_map = dict(enumerate(dataset_meta['labels']))
dataset_meta['label_map'] = label_map or {str(i): i for i in range(10)}
return dataset_meta
| 2.359375 | 2 |
Libraries/Python/wxGlade/v0.9,5/wxGlade-0.9.5-py3.6.egg/wxglade/bugdialog.py | davidbrownell/Common_EnvironmentEx | 0 | 1679 | <reponame>davidbrownell/Common_EnvironmentEx
"""\
Dialog to show details of internal errors.
@copyright: 2014-2016 <NAME>
@copyright: 2017 <NAME>
@license: MIT (see LICENSE.txt) - THIS PROGRAM COMES WITH NO WARRANTY
"""
import bugdialog_ui
import config
import log
import logging
import sys
import wx
class BugReport(bugdialog_ui.UIBugDialog):
"Dialog to show details of internal errors"
_disabled = False # Flag to prevent dialog popups during test runs.
def __init__(self):
self._disabled = getattr(sys, '_called_from_test', False)
bugdialog_ui.UIBugDialog.__init__(self, None, -1, "")
def SetContent(self, msg, exc):
"""Prepare given exception information and show it as dialog content.
msg: Short description of the action that has raised this error
exc: Caught exception (Exception instance)
see: SetContentEI()"""
if self._disabled:
return
exc_type = exc.__class__.__name__
exc_msg = str(exc)
header = self.st_header.GetLabel() % {'action': msg}
log.exception_orig(header)
self._fill_dialog(exc_msg, exc_type, header)
def SetContentEI(self, exc_type, exc_value, exc_tb, msg=_('An internal error occurred')):
"""Format given exception and add details to dialog.
exc_type: Exception type
exc_value: Exception value
exc_tb: Exception traceback
msg: Short description of the exception
see: SetContent()"""
if self._disabled:
return
# don't use exception() because it overwrites exc_info with 1
logging.error(msg, exc_info=(exc_type, exc_value, exc_tb))
self._fill_dialog(msg, exc_type, _('An internal error occurred'))
def _fill_dialog(self, exc_msg, exc_type, header):
"""Fill the bug dialog
exc_msg: Short exception summary
exc_type: Exception type as string
header: Initial message
see: L{SetContent(), SetContentEI()"""
details = log.getBufferAsString()
if not exc_msg:
exc_msg = _('No summary available')
summary = self.st_summary.GetLabel() % { 'exc_type':exc_type, 'exc_msg':exc_msg }
self.st_header.SetLabel(header)
self.st_summary.SetLabel(summary)
self.tc_details.SetValue(details)
howto = self.tc_howto_report.GetValue()
howto = howto % {'log_file': config.log_file}
self.tc_howto_report.SetValue(howto)
def OnCopy(self, event):
"Copy the dialog content to the clipboard"
text = self.tc_details.GetValue()
if not text:
return
data = wx.TextDataObject(text)
if wx.TheClipboard.Open():
wx.TheClipboard.SetData(data)
wx.TheClipboard.Close()
else:
wx.MessageBox("Unable to open the clipboard", "Error")
def ShowModal(self, **kwargs):
if getattr(sys, '_called_from_test', False):
return wx.ID_OK
super(BugReport, self).ShowModal(**kwargs)
def Show(msg, exc):
"""Wrapper for creating a L{BugReport} dialog and show the details of the given exception instance.
msg: Short description of the action that has raised this error
exc: Caught exception
see ShowEI(), BugReport.SetContent()"""
dialog = BugReport()
dialog.SetContent(msg, exc)
dialog.ShowModal()
dialog.Destroy()
def ShowEI(exc_type, exc_value, exc_tb, msg=None):
"""Wrapper for creating a L{BugReport} dialog and show the given exception details.
exc_type: Exception type
exc_value: Exception value
exc_tb: Exception traceback
msg: Short description of the exception
see: L{Show(), BugReport.SetContent()"""
dialog = BugReport()
dialog.SetContentEI(exc_type, exc_value, exc_tb, msg)
dialog.ShowModal()
dialog.Destroy()
def ShowEnvironmentError(msg, inst):
"""Show EnvironmentError exceptions detailed and user-friendly
msg: Error message
inst: The caught exception"""
details = {'msg':msg, 'type':inst.__class__.__name__}
if inst.filename:
details['filename'] = _('Filename: %s') % inst.filename
if inst.errno is not None and inst.strerror is not None:
details['error'] = '%s - %s' % (inst.errno, inst.strerror)
else:
details['error'] = str(inst.args)
text = _("""%(msg)s
Error type: %(type)s
Error code: %(error)s
%(filename)s""") % details
wx.MessageBox(text, _('Error'), wx.OK | wx.CENTRE | wx.ICON_ERROR)
| 2.109375 | 2 |
core/views.py | Neelamegam2000/QRcode-for-license | 0 | 1680 | <filename>core/views.py
from django.shortcuts import render, redirect
from django.conf import settings
from django.core.files.storage import FileSystemStorage,default_storage
from django.core.mail import send_mail, EmailMessage
from core.models import Document
from core.forms import DocumentForm
from django.contrib import messages
import os
import pyqrcode
import png
import random
import base64
import cv2
import numpy as np
import pyzbar.pyzbar as pyzbar
def home(request):
documents= Document.objects.all()
return render(request, 'home.html', { 'documents': documents })
"""def simple_upload(request):
if request.method == 'POST' and request.FILES['myfile']:
myfile = request.FILES['myfile']
fs = FileSystemStorage()
filename = fs.save(myfile.name, myfile)
uploaded_file_url = fs.url(filename)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
media_path = os.path.join(BASE_DIR,'media')
full_path=os.path.join(media_path,myfile.name)
qr=pyqrcode.create(uploaded_file_url)
filename_before=filename.rsplit(".")
filename1=filename_before[0]+".png"
s=qr.png(filename1,scale=6)
'''from fpdf import FPDF
pdf=FPDF()
pdf.add_page()
pdf.image(filename1,x=50,y=None,w=60,h=60,type="",link=uploaded_file_url)'''
return render(request, 'simple_upload.html', {
'uploaded_file_url': uploaded_file_url
})
return render(request, 'simple_upload.html')"""
def model_form_upload(request):
id=""
msg=""
if request.method == 'POST':
form = DocumentForm(request.POST, request.FILES,request.POST)
if form.is_valid():
form.save()
email=form.cleaned_data['Email']
document_count=Document.objects.values_list('document').count()
document_last=Document.objects.values_list('document')[document_count-1]
document_name=document_last[0]
print(email)
t=Document.objects.last()
num_list=['0','1','2','3','4','5','6','7','8','9']
password1=""
for i in range(0,8):
password1=<PASSWORD>+random.<PASSWORD>(num_list)
t.password=<PASSWORD>
print(type(document_name))
document_name1=document_name.encode('ascii')
document_encode=str(base64.b64encode(document_name1))
ax=document_encode[2:-1]
t.file_url=ax
print(ax)
t.save()
qr=pyqrcode.create(ax)
filename=document_name.rsplit(".")
filename1=filename[0].split("/")
filename2=filename1[1]+".png"
qr.png(filename2,scale=6)
"""mail=EmailMessage('QR',password1,'<EMAIL>',[email])
#mail.attach(filename2,filename2.content_type)
mail.send()"""
subject = 'QRcode scanner for license'
message = password1
email_from = settings.EMAIL_HOST_USER
recipient_list = [email, ]
mail=EmailMessage( subject, message, email_from, recipient_list )
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
mail.attach_file(os.path.join(BASE_DIR,filename2))
mail.send()
msg="your successfully uploaded"
return redirect('model_form_upload')
else:
form = DocumentForm()
return render(request, 'model_form_upload.html', {'form': form,'msg':msg})
def mypass(request):
m=""
if(request.POST.get("pswd")==request.POST.get("pswd3")):
user_data=Document.objects.filter(Email=request.POST.get("email"),password=request.POST.get("old_pswd")).update(password=request.POST.get("pswd"))
user_data1=Document.objects.filter(Email=request.POST.get("email"),password=request.POST.get("pswd"))
"""if(len_user_data==1):
userdata.password=request.POST.get("pswd")
return render(request,'mypass.html',{u:"you have change the password successfully"})
else:"""
c=0
if(user_data1):
subject = 'QRcode scanner for license'
message = "Password has succesfully changed"+" "+request.POST.get("pswd")
email_from = settings.EMAIL_HOST_USER
recipient_list = [request.POST.get("email"), ]
mail=EmailMessage( subject, message, email_from, recipient_list )
mail.send()
c=1
m="your password is changed succesfully"
elif(len(Document.objects.filter(Email=request.POST.get("email"),password=request.POST.get("old_pswd")))==0 and request.method=="POST"):
m="your email or password is incorrect"
else:
m=""
print(m)
return render(request,'mypass.html',{"m":m})
def user_req(request):
if("scanner" in request.POST and request.method=="POST"):
cap = cv2.VideoCapture(0+cv2.CAP_DSHOW)
font = cv2.FONT_HERSHEY_PLAIN
decodedObjects=[]
while decodedObjects==[]:
_, frame = cap.read()
decodedObjects = pyzbar.decode(frame)
for obj in decodedObjects:
points = obj.polygon
(x,y,w,h) = obj.rect
pts = np.array(points, np.int32)
pts = pts.reshape((-1, 1, 2))
cv2.polylines(frame, [pts], True, (0, 255, 0), 3)
cv2.putText(frame, str(obj.data), (50, 50), font, 2,
(255, 0, 0), 3)
id =obj.data.decode("utf-8")
cv2.imshow("QR Reader", frame)
key = cv2.waitKey(10) & 0xFF
if decodedObjects!=[] :
cv2.destroyAllWindows()
return render(request,"user_req.html",{"id":id})
if('proceed' in request.POST and request.method=="POST"):
userdata=Document.objects.filter(file_url=request.POST.get("id1")).filter(password=request.POST.get("password1"))
return render(request,"user_req.html",{"userdata":userdata})
return render(request,"user_req.html",)
def user(request):
return render(request,"user.html",)
def forget_pass(request):
msg=""
if(request.method=="POST"):
num_list=['0','1','2','3','4','5','6','7','8','9']
password1=""
for i in range(0,8):
password1=<PASSWORD>(num_list)
user_data=Document.objects.filter(Email=request.POST.get("email")).update(password=<PASSWORD>)
subject = 'QRcode scanner for license Forget password'
message = "Password has succesfully changed"+" "+<PASSWORD>
email_from = settings.EMAIL_HOST_USER
recipient_list = [request.POST.get("email"), ]
mail=EmailMessage( subject, message, email_from, recipient_list )
mail.send()
if(user_data>0):
msg="your password is changed succesfully and mail sent"
elif(user_data==0):
msg="your email is incorrect or not found"
return render(request,"forget_pass.html",{"msg":msg})
def qrcode_miss(request):
msg=""
if(request.method=='POST' and Document.objects.filter(Email=request.POST.get('email'),password=request.POST.get('password1'))):
user_data=Document.objects.values_list('document').filter(Email=request.POST.get('email'),password=request.POST.get('password1'))
m=user_data[0][0]
p=m.split('/')
print(p)
t=p[1]
print(t)
subject = 'QRcode scanner for license'
message = "resend"
email_from = settings.EMAIL_HOST_USER
recipient_list = [request.POST.get('email'),]
mail=EmailMessage( subject, message, email_from, recipient_list )
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
k=os.path.join(BASE_DIR,t)
print(k)
mail.attach_file(k)
mail.send()
msg="your qrcode is sent to your email"
elif(request.method=='POST'and Document.objects.values_list('document').filter(Email=request.POST.get('email'),password=request.POST.get('<PASSWORD>')).count()==0):
msg="your email or password is incorrect"
return render(request,'qrcode_miss.html',{"msg":msg})
| 2.21875 | 2 |
docassemble/MACourts/__init__.py | nonprofittechy/docassemble-MACourts | 2 | 1681 | <filename>docassemble/MACourts/__init__.py<gh_stars>1-10
__version__ = '0.0.58.2'
| 1.148438 | 1 |
main.py | Meat0Project/ChatBot | 4 | 1682 | '''
Made by - <NAME>
Purpose - Python mini project
Date - 18 october 2020
'''
from chatterbot import ChatBot
from chatterbot.trainers import ChatterBotCorpusTrainer
form termcolor import cprint
import time
chatbot = ChatBot('Bot')
trainer = ChatterBotCorpusTrainer(chatbot)
trainer.train('chatterbot.corpus.english')
cprint("#" * 50, "magenta")
cprint((f"A Chatot ").center(50), "yellow")
cprint("#" * 50, "magenta")
print('You can exit by type exit\n')
while True:
query = input(">> ")
if 'exit' in query:
exit()
else:
print(chatbot.get_response(query))
| 2.9375 | 3 |
challenges/day14.py | Jeffreyo3/AdventOfCode2020 | 0 | 1683 | <reponame>Jeffreyo3/AdventOfCode2020
"""
--- Day 14: Docking Data ---
As your ferry approaches the sea port, the captain asks for your help again. The computer system that runs this port isn't compatible with the docking program on the ferry, so the docking parameters aren't being correctly initialized in the docking program's memory.
After a brief inspection, you discover that the sea port's computer system uses a strange bitmask system in its initialization program. Although you don't have the correct decoder chip handy, you can emulate it in software!
The initialization program (your puzzle input) can either update the bitmask or write a value to memory. Values and memory addresses are both 36-bit unsigned integers. For example, ignoring bitmasks for a moment, a line like mem[8] = 11 would write the value 11 to memory address 8.
The bitmask is always given as a string of 36 bits, written with the most significant bit (representing 2^35) on the left and the least significant bit (2^0, that is, the 1s bit) on the right. The current bitmask is applied to values immediately before they are written to memory: a 0 or 1 overwrites the corresponding bit in the value, while an X leaves the bit in the value unchanged.
For example, consider the following program:
mask = XXXXXXXXXXXXXXXXXXXXXXXXXXXXX1XXXX0X
mem[8] = 11
mem[7] = 101
mem[8] = 0
This program starts by specifying a bitmask (mask = ....). The mask it specifies will overwrite two bits in every written value: the 2s bit is overwritten with 0, and the 64s bit is overwritten with 1.
The program then attempts to write the value 11 to memory address 8. By expanding everything out to individual bits, the mask is applied as follows:
value: 000000000000000000000000000000001011 (decimal 11)
mask: XXXXXXXXXXXXXXXXXXXXXXXXXXXXX1XXXX0X
result: 000000000000000000000000000001001001 (decimal 73)
So, because of the mask, the value 73 is written to memory address 8 instead. Then, the program tries to write 101 to address 7:
value: 000000000000000000000000000001100101 (decimal 101)
mask: XXXXXXXXXXXXXXXXXXXXXXXXXXXXX1XXXX0X
result: 000000000000000000000000000001100101 (decimal 101)
This time, the mask has no effect, as the bits it overwrote were already the values the mask tried to set. Finally, the program tries to write 0 to address 8:
value: 000000000000000000000000000000000000 (decimal 0)
mask: XXXXXXXXXXXXXXXXXXXXXXXXXXXXX1XXXX0X
result: 000000000000000000000000000001000000 (decimal 64)
64 is written to address 8 instead, overwriting the value that was there previously.
To initialize your ferry's docking program, you need the sum of all values left in memory after the initialization program completes. (The entire 36-bit address space begins initialized to the value 0 at every address.) In the above example, only two values in memory are not zero - 101 (at address 7) and 64 (at address 8) - producing a sum of 165.
Execute the initialization program. What is the sum of all values left in memory after it completes?
"""
f = open("challenges\data\day14data.txt", "r")
def processData(file):
data = []
for x in f:
x=x.strip().replace('\n', '').split(" = ")
data.append((x[0], x[1]))
return data
# Function to convert Decimal number
# to Binary number
def decimalToBinary(n):
return bin(n).replace("0b", "")
def leadingZeros(length, bin_num):
leadingZeros = length - len(bin_num)
return "0"*leadingZeros + bin_num
def initialize(commands):
memory = {}
mask = "X"*36
for c in commands:
if c[0] == "mask":
mask = c[1]
else:
address = c[0][c[0].index("[")+1:len(c[0])-1]
binaryValue = decimalToBinary(int(c[1]))
binary36 = leadingZeros(36, binaryValue)
memory[address] = ""
for i in range(len(mask)):
if mask[i] == "X":
memory[address] += binary36[i]
else:
memory[address] += mask[i]
sum = 0
for val in memory.values():
sum += int("".join(val), 2)
return sum
"""
--- Part Two ---
For some reason, the sea port's computer system still can't communicate with your ferry's docking program. It must be using version 2 of the decoder chip!
A version 2 decoder chip doesn't modify the values being written at all. Instead, it acts as a memory address decoder. Immediately before a value is written to memory, each bit in the bitmask modifies the corresponding bit of the destination memory address in the following way:
If the bitmask bit is 0, the corresponding memory address bit is unchanged.
If the bitmask bit is 1, the corresponding memory address bit is overwritten with 1.
If the bitmask bit is X, the corresponding memory address bit is floating.
A floating bit is not connected to anything and instead fluctuates unpredictably. In practice, this means the floating bits will take on all possible values, potentially causing many memory addresses to be written all at once!
For example, consider the following program:
mask = 000000000000000000000000000000X1001X
mem[42] = 100
mask = 00000000000000000000000000000000X0XX
mem[26] = 1
When this program goes to write to memory address 42, it first applies the bitmask:
address: 000000000000000000000000000000101010 (decimal 42)
mask: 000000000000000000000000000000X1001X
result: 000000000000000000000000000000X1101X
After applying the mask, four bits are overwritten, three of which are different, and two of which are floating. Floating bits take on every possible combination of values; with two floating bits, four actual memory addresses are written:
000000000000000000000000000000011010 (decimal 26)
000000000000000000000000000000011011 (decimal 27)
000000000000000000000000000000111010 (decimal 58)
000000000000000000000000000000111011 (decimal 59)
Next, the program is about to write to memory address 26 with a different bitmask:
address: 000000000000000000000000000000011010 (decimal 26)
mask: 00000000000000000000000000000000X0XX
result: 00000000000000000000000000000001X0XX
This results in an address with three floating bits, causing writes to eight memory addresses:
000000000000000000000000000000010000 (decimal 16)
000000000000000000000000000000010001 (decimal 17)
000000000000000000000000000000010010 (decimal 18)
000000000000000000000000000000010011 (decimal 19)
000000000000000000000000000000011000 (decimal 24)
000000000000000000000000000000011001 (decimal 25)
000000000000000000000000000000011010 (decimal 26)
000000000000000000000000000000011011 (decimal 27)
The entire 36-bit address space still begins initialized to the value 0 at every address, and you still need the sum of all values left in memory at the end of the program. In this example, the sum is 208.
Execute the initialization program using an emulator for a version 2 decoder chip. What is the sum of all values left in memory after it completes?
"""
def calculateCombinations(bin_address):
combinations = []
# xCount = 0
xPositions = []
for i in range(len(bin_address)):
# find each X and add its idx to a list
if bin_address[i] == "X":
xPositions.append(i)
# xCount += 1
if len(xPositions) > 0:
for i in range(2**(len(xPositions))):
# need to generate all possible combos of 0s & 1s
# w/ leading 0s
possible = decimalToBinary(i)
while len(possible) < len(xPositions):
possible = "0"+possible
combinations.append(possible)
addresses = []
for c in combinations:
# need to insert combination[i] into binary number
# current combo associated idx is in xPositions[i]
newAddress = ""
currPos = 0
for i in range(len(bin_address)):
if currPos < len(xPositions) and i == xPositions[currPos]:
newAddress += c[currPos]
currPos += 1
else:
newAddress += bin_address[i]
addresses.append(newAddress)
return addresses
def initialize_v2(commands):
memory = {}
mask = "X"*36
for c in commands:
if c[0] == "mask":
mask = c[1]
else:
address = c[0][c[0].index("[")+1:len(c[0])-1]
binaryAddress = decimalToBinary(int(address))
binary36 = leadingZeros(36, binaryAddress)
newVal = ""
for i in range(len(mask)):
if mask[i] != "0":
newVal += mask[i]
else:
newVal += binary36[i]
addresses = calculateCombinations(newVal)
for a in addresses:
memory[a] = int(c[1])
sum = 0
for val in memory.values():
sum += val
# print(memory)
return sum
data = processData(f)
# [print(d) for d in data]
sumAllValues = initialize(data)
print("Part 1:", sumAllValues)
sumAllValuesV2 = initialize_v2(data)
print("Part 2:", sumAllValuesV2)
# binary = decimalToBinary(33323)
# binary = leadingZeros(36, binary)
# print(binary)
# combos = initialize_v2([("mask", "100X100X101011111X100000100X11010011"),
# ("mem[33323]", "349380")])
# print(combos) | 3.75 | 4 |
src/Dialogs/RegularPolygonDialog.py | Lovely-XPP/tkzgeom | 41 | 1684 | from PyQt5 import QtWidgets, uic
from Factory import Factory
from Dialogs.DialogMacros import turn_into_free_point, free_point_checkbox
from Fill.ListWidget import fill_listWidget_with_data, set_selected_id_in_listWidget
import Constant as c
class RegularPolygonDialog(QtWidgets.QDialog):
def __init__(self, scene, data):
"""Construct RegularPolygonDialog."""
super(RegularPolygonDialog, self).__init__()
self.ui = uic.loadUi('regularpolygon.ui', self)
self.scene = scene
self.sides = 3
self.free_point = False
self.data = data
self.ui.buttonBox.accepted.connect(self.accepted)
self.ui.buttonBox.rejected.connect(self.rejected)
self.ui.sides_slider.valueChanged.connect(self.hslider_sides_func)
self.ui.checkBox.stateChanged.connect(lambda x: free_point_checkbox(self, x))
def hslider_sides_func(self, value):
"""Be slider callback function to set sides."""
self.sides = value
self.ui.sides_spin.setValue(value)
def accepted(self):
"""Create new regular polygon with settings."""
A, B = self.data
angle = -(self.sides - 2) * 180 / self.sides
polygon = [A, B]
for _ in range(self.sides - 2):
item = Factory.create_empty_item('point', c.Point.Definition.ROTATION)
definition = {'A': A, 'B': B, 'angle': angle}
id_ = Factory.next_id(item, definition, self.scene.project_data.items)
item.item["id"] = id_
item.item["definition"] = definition
if self.free_point:
item = turn_into_free_point(item, self.scene)
self.scene.project_data.add(item)
A = B
B = item.item["id"]
polygon.append(item.item["id"])
item = Factory.create_empty_item('polygon', None)
definition = polygon
item.item["id"] = Factory.next_id(item, definition, self.scene.project_data.items)
item.item["definition"] = definition
self.scene.project_data.add(item)
self.scene.project_data.recompute_canvas(*self.scene.init_canvas_dims)
current_row_old = self.scene.ui.listWidget.currentRow()
fill_listWidget_with_data(self.scene.project_data, self.scene.ui.listWidget, self.scene.current_tab_idx)
set_selected_id_in_listWidget(self.scene, current_row_old)
self.scene.edit.add_undo_item(self.scene)
def rejected(self):
"""Add no new regular polygon."""
pass
| 2.328125 | 2 |
tests/test_networks.py | UCY-LINC-LAB/5G-Slicer | 0 | 1685 | import unittest
from networks.QoS import QoS
from networks.connections.mathematical_connections import FunctionalDegradation
from networks.slicing import SliceConceptualGraph
from utils.location import Location
class TestBaseStationLinear(unittest.TestCase):
def setUp(self):
self.name = "network"
self.wireless_connection_type = "LinearDegradation"
self.backhaul_qos = {'latency': {'delay': '3.0ms', 'deviation': '1.0ms'}, 'bandwidth': '100.0mbps',
'error_rate': '1.0%'}
self.midhaul_qos = {'latency': {'delay': '3.0ms', 'deviation': '1.0ms'}, 'bandwidth': '100.0mbps',
'error_rate': '1.0%'}
self.parameters = dict(
best_qos={'latency': {'delay': '5.0ms', 'deviation': '2.0ms'}, 'bandwidth': '10.0mbps',
'error_rate': '1.0%'},
worst_qos={'latency': {'delay': '100.0ms', 'deviation': '20.0ms'}, 'bandwidth': '5.0mbps',
'error_rate': '2.0%'}, radius="5km")
self.network = SliceConceptualGraph(self.name, self.midhaul_qos, self.backhaul_qos, self.parameters)
def test_creation(self):
self.assertEqual(self.network.get_name(), "network")
def test_get_empty_nodes(self):
self.assertEqual(self.network.get_nodes(), {})
def test_add_node(self):
name, lat, lon = 'node', 33, 40
lat, lon = 33, 40
self.network.set_RU(lat, lon)
self.network.add_node(name, lat, lon)
self.assertEqual(self.network.get_nodes(), {'node': Location(lat, lon)})
with self.assertRaises(SliceConceptualGraph.NetworkSliceException):
self.network.add_node('node', 33, 40)
def test_get_empty_RUs(self):
self.assertEqual(self.network.get_RUs(), {})
def test_set_basetastion(self):
lat, lon = 33, 40
self.network.set_RU(lat, lon)
self.assertEqual(self.network.get_RUs(), {f'{lat}-{lon}': Location(lat, lon)})
with self.assertRaises(SliceConceptualGraph.NetworkSliceException):
self.network.set_RU(lat, lon)
def test_constructor(self):
with self.assertRaises(FunctionalDegradation.FunctionDegradationNetworkException):
SliceConceptualGraph('test', {}, {}, {})
SliceConceptualGraph('test', self.midhaul_qos, {}, {})
SliceConceptualGraph('test', {}, self.backhaul_qos, {})
SliceConceptualGraph('test', {}, {}, self.parameters)
def test_get_qos(self):
self.assertEqual(self.network.get_backhaul(), QoS(self.backhaul_qos))
def test_set_qos(self):
self.network.set_backhaul(QoS.minimum_qos_dict)
self.assertEqual(self.network.get_backhaul(), QoS(QoS.minimum_qos_dict))
def test_qos_from_distance(self):
self.assertEqual(self.network.get_qos_from(5).get_formated_qos(), self.parameters.get('worst_qos'))
self.assertEqual(self.network.get_qos_from(0.0).get_formated_qos(), self.parameters.get('best_qos'))
def test_get_node_location(self):
lat, lon = 33, 40
self.network.set_RU(lat, lon)
self.network.add_node('test', 10, 10)
self.assertEqual(self.network.get_node_location('test2'), None)
self.assertEqual(self.network.get_node_location('test'), Location(10, 10))
def test_has_to_pass_through_backhaul(self):
self.network.set_RU(10, 10)
self.network.set_RU(20, 20)
self.network.add_node('source1', 10, 10)
self.network.add_node('destination1', 10, 10)
self.network.add_node('destination2', 20, 20)
def test_set_RUs(self):
self.network.set_RUs([{'lat': 10, 'lon': 10}, {'lat': 5, 'lon': 5}])
self.assertEqual(self.network.get_RUs(),
{'10-10': Location(**{'lat': 10, 'lon': 10}), '5-5': Location(**{'lat': 5, 'lon': 5})})
lat, lon = 33, 40
self.network.set_RU(lat, lon)
with self.assertRaises(SliceConceptualGraph.NetworkSliceException):
self.network.set_RUs([{'lat': 10, 'lon': 10}, {'lat': 5, 'lon': 5}])
def test_set_node_location(self):
lat, lon = 33, 40
self.network.set_RU(lat, lon)
self.network.add_node('destination1', 10, 10)
self.network.set_node_location('destination1', 20, 20)
self.assertEqual(self.network.get_node_location('destination1'), Location(20, 20))
with self.assertRaises(Location.LocationException):
self.network.set_node_location('destination1', 'test', 20)
with self.assertRaises(Location.LocationException):
self.network.set_node_location('destination1', 20, 'test')
class TestBaseLog2Degradation(unittest.TestCase):
def setUp(self):
self.name = "network"
self.wireless_connection_type = "Log2Degradation"
self.midhaul_qos = {'latency': {'delay': '3.0ms', 'deviation': '1.0ms'}, 'bandwidth': '100.0mbps',
'error_rate': '1.0%'}
self.backhaul_qos = {'latency': {'delay': '3.0ms', 'deviation': '1.0ms'}, 'bandwidth': '100.0mbps',
'error_rate': '1.0%'}
self.parameters = dict(
best_qos={'latency': {'delay': '5.0ms', 'deviation': '2.0ms'}, 'bandwidth': '10.0mbps',
'error_rate': '1.0%'},
worst_qos={'latency': {'delay': '100.0ms', 'deviation': '20.0ms'}, 'bandwidth': '5.0mbps',
'error_rate': '2.0%'}, radius="5km")
self.network = SliceConceptualGraph(self.name, self.midhaul_qos, self.backhaul_qos, self.parameters)
def test_creation(self):
self.assertEqual(self.network.get_name(), "network")
def test_get_empty_nodes(self):
self.assertEqual(self.network.get_nodes(), {})
def test_add_node(self):
name, lat, lon = 'node', 33, 40
with self.assertRaises(SliceConceptualGraph.NetworkSliceException):
self.network.add_node(name, lat, lon)
self.network.set_RU(33, 40, 0)
self.network.add_node(name, lat, lon)
self.assertEqual(self.network.get_nodes(), {'node': Location(lat, lon)})
with self.assertRaises(SliceConceptualGraph.NetworkSliceException):
self.network.add_node('node', 33, 40)
def test_get_empty_RUs(self):
self.assertEqual(self.network.get_RUs(), {})
def test_set_basetastion(self):
lat, lon = 33, 40
self.network.set_RU(lat, lon)
self.assertEqual(self.network.get_RUs(), {f'{lat}-{lon}': Location(lat, lon)})
with self.assertRaises(SliceConceptualGraph.NetworkSliceException):
self.network.set_RU(lat, lon)
def test_constructor(self):
with self.assertRaises(FunctionalDegradation.FunctionDegradationNetworkException):
SliceConceptualGraph('test', {} ,{}, {})
SliceConceptualGraph('test', self.midhaul_qos, {}, {})
SliceConceptualGraph('test', {}, self.backhaul_qos, {})
SliceConceptualGraph('test', {}, {}, self.parameters)
def test_get_qos(self):
self.assertEqual(self.network.get_backhaul(), QoS(self.backhaul_qos))
def test_set_qos(self):
self.network.set_backhaul(QoS.minimum_qos_dict)
self.assertEqual(self.network.get_backhaul(), QoS(QoS.minimum_qos_dict))
def test_qos_from_distance(self):
self.assertEqual(self.network.get_qos_from(5).get_formated_qos(), self.parameters.get('worst_qos'))
self.assertEqual(self.network.get_qos_from(0.0).get_formated_qos(), self.parameters.get('best_qos'))
def test_get_node_location(self):
lat, lon = 33, 40
self.network.set_RU(lat, lon)
self.network.add_node('test', 10, 10)
self.assertEqual(self.network.get_node_location('test2'), None)
self.assertEqual(self.network.get_node_location('test'), Location(10, 10))
def test_set_RUs(self):
self.network.set_RUs([{'lat': 10, 'lon': 10}, {'lat': 5, 'lon': 5}])
self.assertEqual(self.network.get_RUs(),
{'10-10': Location(**{'lat': 10, 'lon': 10}), '5-5': Location(**{'lat': 5, 'lon': 5})})
with self.assertRaises(SliceConceptualGraph.NetworkSliceException):
self.network.set_RUs([{'lat': 10, 'lon': 10}, {'lat': 5, 'lon': 5}])
def test_set_node_location(self):
lat, lon = 33, 40
self.network.set_RU(lat, lon)
self.network.add_node('destination1', 10, 10)
self.network.set_node_location('destination1', 20, 20)
self.assertEqual(self.network.get_node_location('destination1'), Location(20, 20))
with self.assertRaises(Location.LocationException):
self.network.set_node_location('destination1', 'test', 20)
with self.assertRaises(Location.LocationException):
self.network.set_node_location('destination1', 20, 'test')
| 2.9375 | 3 |
backend/api/management/commands/create_testdb.py | INSRapperswil/nornir-web | 2 | 1686 | """
Setup DB with example data for tests
"""
from django.contrib.auth.hashers import make_password
from django.contrib.auth.models import User, Group
from django.core.management.base import BaseCommand
from api import models
class Command(BaseCommand):
help = 'Setup DB with example data for tests'
def handle(self, *args, **options):
print('---- Creating Users ----')
User.objects.get_or_create(username='thomastest', password=make_password('<PASSWORD>'))
thomas = User.objects.get(username='thomastest')
User.objects.get_or_create(username='norbert', password=make_password('<PASSWORD>'))
norbert = User.objects.get(username='norbert')
User.objects.get_or_create(username='stefan', password=make_password('<PASSWORD>'))
stefan = User.objects.get(username='stefan')
superuser = Group.objects.get(name='superuser')
superuser.user_set.add(thomas)
netadmin = Group.objects.get(name='netadmin')
netadmin.user_set.add(norbert)
support = Group.objects.get(name='support')
support.user_set.add(stefan)
print('---- Creating Inventory ----')
models.Inventory.objects.create(name='Example', hosts_file='web_nornir/nornir_config/example_config/hosts.yaml',
groups_file='web_nornir/nornir_config/example_config/groups.yaml', type=1)
models.Inventory.objects.create(name='INS Lab', hosts_file='web_nornir/nornir_config/inslab_config/hosts.yaml',
groups_file='web_nornir/nornir_config/inslab_config/groups.yaml', type=1)
print('---- Creating Job Templates ----')
models.JobTemplate.objects.create(name='hello_world', description='This prints a hello world',
file_name='hello_world.py', created_by_id=1)
models.JobTemplate.objects.create(name='Get CDP Neighbors', description='Lists all CDP neighbors',
file_name='get_cdp_neighbors.py', created_by_id=1)
models.JobTemplate.objects.create(name='Get Interfaces',
description='Gets brief information about all interfaces, sh ip int br',
file_name='get_interfaces.py', created_by_id=1)
models.JobTemplate.objects.create(name='Ping Device',
description='Pings a chosen network device and reports if reachable',
file_name='ping.py', variables=['target'], created_by_id=1)
models.JobTemplate.objects.create(name='Get Configuration', description='Gets all configuration from device',
file_name='get_configuration.py', created_by_id=1)
print('---- Creating Tasks ----')
models.Task.objects.create(name='Get Hello World', created_by_id=1, template_id=1, inventory_id=1)
models.Task.objects.create(name='Get CDP neighbors of INS lab', created_by_id=2, template_id=2, inventory_id=2)
models.Task.objects.create(name='Get interfaces of INS lab', created_by_id=2, template_id=3, inventory_id=2)
print('---- ALL DONE!! ----')
| 2.421875 | 2 |
pyinfra/facts/util/distro.py | charles-l/pyinfra | 1 | 1687 | from __future__ import absolute_import, unicode_literals
import os
import distro
def get_distro_info(root_dir):
# We point _UNIXCONFDIR to root_dir
old_value = distro._UNIXCONFDIR
distro._UNIXCONFDIR = os.path.join(root_dir, 'etc')
obj = distro.LinuxDistribution(include_lsb=False, include_uname=False)
# NOTE: The parsing of LinuxDistribution distro information is done in a lazy way.
# This will force the parsing to happen before we restore the old value of _UNIXCONFDIR.
_ = obj.info()
distro._UNIXCONFDIR = old_value
return obj
| 2.15625 | 2 |
appium/webdriver/common/multi_action.py | salabogdan/python-client | 1 | 1688 | <reponame>salabogdan/python-client
#!/usr/bin/env python
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# The Selenium team implemented something like the Multi Action API in the form of
# "action chains" (https://code.google.com/p/selenium/source/browse/py/selenium/webdriver/common/action_chains.py).
# These do not quite work for this situation, and do not allow for ad hoc action
# chaining as the spec requires.
import copy
from typing import TYPE_CHECKING, Dict, List, Optional, TypeVar, Union
from appium.webdriver.mobilecommand import MobileCommand as Command
if TYPE_CHECKING:
from appium.webdriver.common.touch_action import TouchAction
from appium.webdriver.webdriver import WebDriver
from appium.webdriver.webelement import WebElement
T = TypeVar('T', bound='MultiAction')
class MultiAction:
def __init__(self, driver: 'WebDriver', element: Optional['WebElement'] = None) -> None:
self._driver = driver
self._element = element
self._touch_actions: List['TouchAction'] = []
def add(self, *touch_actions: 'TouchAction') -> None:
"""Add TouchAction objects to the MultiAction, to be performed later.
Args:
touch_actions: one or more TouchAction objects describing a chain of actions to be performed by one finger
Usage:
| a1 = TouchAction(driver)
| a1.press(el1).move_to(el2).release()
| a2 = TouchAction(driver)
| a2.press(el2).move_to(el1).release()
| MultiAction(driver).add(a1, a2)
Returns:
`MultiAction`: Self instance
"""
for touch_action in touch_actions:
if self._touch_actions is None:
self._touch_actions = []
self._touch_actions.append(copy.copy(touch_action))
def perform(self: T) -> T:
"""Perform the actions stored in the object.
Usage:
| a1 = TouchAction(driver)
| a1.press(el1).move_to(el2).release()
| a2 = TouchAction(driver)
| a2.press(el2).move_to(el1).release()
| MultiAction(driver).add(a1, a2).perform()
Returns:
`MultiAction`: Self instance
"""
self._driver.execute(Command.MULTI_ACTION, self.json_wire_gestures)
# clean up and be ready for the next batch
self._touch_actions = []
return self
@property
def json_wire_gestures(self) -> Dict[str, Union[List, str]]:
actions = []
for action in self._touch_actions:
actions.append(action.json_wire_gestures)
if self._element is not None:
return {'actions': actions, 'elementId': self._element.id}
return {'actions': actions}
| 2.421875 | 2 |
src/visu/visualizer.py | JonasFrey96/PLR2 | 0 | 1689 | import numpy as np
import sys
import os
from PIL import Image
from visu.helper_functions import save_image
from scipy.spatial.transform import Rotation as R
from helper import re_quat
import copy
import torch
import numpy as np
import k3d
class Visualizer():
def __init__(self, p_visu, writer=None):
if p_visu[-1] != '/':
p_visu = p_visu + '/'
self.p_visu = p_visu
self.writer = writer
if not os.path.exists(self.p_visu):
os.makedirs(self.p_visu)
def plot_estimated_pose(self, tag, epoch, img, points, trans=[[0, 0, 0]], rot_mat=[[1, 0, 0], [0, 1, 0], [0, 0, 1]], cam_cx=0, cam_cy=0, cam_fx=0, cam_fy=0, store=False, jupyter=False, w=2):
"""
tag := tensorboard tag
epoch := tensorboard epoche
store := ture -> stores the image to standard path
path := != None creats the path and store to it path/tag.png
img:= original_image, [widht,height,RGB]
points:= points of the object model [length,x,y,z]
trans: [1,3]
rot: [3,3]
"""
img_d = copy.deepcopy(img)
points = np.dot(points, rot_mat.T)
points = np.add(points, trans[0, :])
for i in range(0, points.shape[0]):
p_x = points[i, 0]
p_y = points[i, 1]
p_z = points[i, 2]
u = int(((p_x / p_z) * cam_fx) + cam_cx)
v = int(((p_y / p_z) * cam_fy) + cam_cy)
try:
img_d[v - w:v + w + 1, u - w:u + w + 1, 0] = 0
img_d[v - w:v + w + 1, u - w:u + w + 1, 1] = 255
img_d[v - w:v + w + 1, u - w:u + w + 1, 0] = 0
except:
#print("out of bounce")
pass
if jupyter:
display(Image.fromarray(img_d))
if store:
#store_ar = (img_d* 255).round().astype(np.uint8)
#print("IMAGE D:" ,img_d,img_d.shape )
save_image(img_d, tag=str(epoch) + tag, p_store=self.p_visu)
if self.writer is not None:
self.writer.add_image(tag, img_d.astype(
np.uint8), global_step=epoch, dataformats='HWC')
def plot_bounding_box(self, tag, epoch, img, rmin=0, rmax=0, cmin=0, cmax=0, str_width=2, store=False, jupyter=False, b=None):
"""
tag := tensorboard tag
epoch := tensorboard epoche
store := ture -> stores the image to standard path
path := != None creats the path and store to it path/tag.png
img:= original_image, [widht,height,RGB]
"""
if isinstance(b, dict):
rmin = b['rmin']
rmax = b['rmax']
cmin = b['cmin']
cmax = b['cmax']
# ToDo check Input data
img_d = np.array(copy.deepcopy(img))
c = [0, 0, 255]
rmin_mi = max(0, rmin - str_width)
rmin_ma = min(img_d.shape[0], rmin + str_width)
rmax_mi = max(0, rmax - str_width)
rmax_ma = min(img_d.shape[0], rmax + str_width)
cmin_mi = max(0, cmin - str_width)
cmin_ma = min(img_d.shape[1], cmin + str_width)
cmax_mi = max(0, cmax - str_width)
cmax_ma = min(img_d.shape[1], cmax + str_width)
img_d[rmin_mi:rmin_ma, cmin:cmax, :] = c
img_d[rmax_mi:rmax_ma, cmin:cmax, :] = c
img_d[rmin:rmax, cmin_mi:cmin_ma, :] = c
img_d[rmin:rmax, cmax_mi:cmax_ma, :] = c
print("STORE", store)
img_d = img_d.astype(np.uint8)
if store:
#store_ar = (img_d* 255).round().astype(np.uint8)
save_image(img_d, tag=str(epoch) + tag, p_store=self.p_visu)
if jupyter:
display(Image.fromarray(img_d))
if self.writer is not None:
self.writer.add_image(tag, img_d.astype(
np.uint8), global_step=epoch, dataformats='HWC')
def plot_pcd(x, point_size=0.005, c='g'):
"""
x: point_nr,3
"""
if c == 'b':
k = 245
elif c == 'g':
k = 25811000
elif c == 'r':
k = 11801000
elif c == 'black':
k = 2580
else:
k = 2580
colors = np.ones(x.shape[0]) * k
plot = k3d.plot(name='points')
plt_points = k3d.points(x, colors.astype(np.uint32), point_size=point_size)
plot += plt_points
plt_points.shader = '3d'
plot.display()
def plot_two_pcd(x, y, point_size=0.005, c1='g', c2='r'):
if c1 == 'b':
k = 245
elif c1 == 'g':
k = 25811000
elif c1 == 'r':
k = 11801000
elif c1 == 'black':
k = 2580
else:
k = 2580
if c2 == 'b':
k2 = 245
elif c2 == 'g':
k2 = 25811000
elif c2 == 'r':
k2 = 11801000
elif c2 == 'black':
k2 = 2580
else:
k2 = 2580
col1 = np.ones(x.shape[0]) * k
col2 = np.ones(y.shape[0]) * k2
plot = k3d.plot(name='points')
plt_points = k3d.points(x, col1.astype(np.uint32), point_size=point_size)
plot += plt_points
plt_points = k3d.points(y, col2.astype(np.uint32), point_size=point_size)
plot += plt_points
plt_points.shader = '3d'
plot.display()
class SequenceVisualizer():
def __init__(self, seq_data, images_path, output_path=None):
self.seq_data = seq_data
self.images_path = images_path
self.output_path = output_path
def plot_points_on_image(self, seq_no, frame_no, jupyter=False, store=False, pose_type='filtered'):
seq_data = self.seq_data
images_path = self.images_path
output_path = self.output_path
frame = seq_data[seq_no][frame_no]
unique_desig = frame['dl_dict']['unique_desig'][0]
if pose_type == 'ground_truth':
# ground truth
t = frame['dl_dict']['gt_trans'].reshape(1, 3)
rot_quat = re_quat(copy.deepcopy(
frame['dl_dict']['gt_rot_wxyz'][0]), 'wxyz')
rot = R.from_quat(rot_quat).as_matrix()
elif pose_type == 'filtered':
# filter pred
t = np.array(frame['filter_pred']['t']).reshape(1, 3)
rot_quat = re_quat(copy.deepcopy(
frame['filter_pred']['r_wxyz']), 'wxyz')
rot = R.from_quat(rot_quat).as_matrix()
elif pose_type == 'final_pred_obs':
# final pred
t = np.array(frame['final_pred_obs']['t']).reshape(1, 3)
rot_quat = re_quat(copy.deepcopy(
frame['final_pred_obs']['r_wxyz']), 'wxyz')
rot = R.from_quat(rot_quat).as_matrix()
else:
raise Exception('Pose type not implemented.')
w = 2
if type(unique_desig) != str:
im = np.array(Image.open(
images_path + unique_desig[0] + '-color.png')) # ycb
else:
im = np.array(Image.open(
images_path + unique_desig + '.png')) # laval
img_d = copy.deepcopy(im)
dl_dict = frame['dl_dict']
points = copy.deepcopy(
seq_data[seq_no][0]['dl_dict']['model_points'][0, :, :])
points = np.dot(points, rot.T)
points = np.add(points, t[0, :])
cam_cx = dl_dict['cam_cal'][0][0]
cam_cy = dl_dict['cam_cal'][0][1]
cam_fx = dl_dict['cam_cal'][0][2]
cam_fy = dl_dict['cam_cal'][0][3]
for i in range(0, points.shape[0]):
p_x = points[i, 0]
p_y = points[i, 1]
p_z = points[i, 2]
u = int(((p_x / p_z) * cam_fx) + cam_cx)
v = int(((p_y / p_z) * cam_fy) + cam_cy)
try:
img_d[v - w:v + w + 1, u - w:u + w + 1, 0] = 0
img_d[v - w:v + w + 1, u - w:u + w + 1, 1] = 255
img_d[v - w:v + w + 1, u - w:u + w + 1, 0] = 0
except:
#print("out of bounds")
pass
img_disp = Image.fromarray(img_d)
if jupyter:
display(img_disp)
if store:
outpath = output_path + \
'{}_{}_{}.png'.format(pose_type, seq_no, frame_no)
img_disp.save(outpath, "PNG", compress_level=1)
print("Saved image to {}".format(outpath))
def save_sequence(self, seq_no, pose_type='filtered', name=''):
for fn in range(len(self.seq_data)):
self.plot_points_on_image(seq_no, fn, False, True, pose_type)
if name:
video_name = '{}_{}_{}'.format(name, pose_type, seq_no)
else:
video_name = '{}_{}'.format(pose_type, seq_no)
cmd = "cd {} && ffmpeg -r 10 -i ./filtered_{}_%d.png -vcodec mpeg4 -y {}.mp4".format(
self.output_path, seq_no, video_name)
os.system(cmd)
| 2.203125 | 2 |
leetCode_Q37_serializeTree.py | FreesiaLikesPomelo/-offer | 0 | 1690 | <reponame>FreesiaLikesPomelo/-offer
'''
面试题37. 序列化二叉树
请实现两个函数,分别用来序列化和反序列化二叉树。
示例:
你可以将以下二叉树:
1
/ \
2 3
/ \
4 5
序列化为 "[1,2,3,null,null,4,5]"
'''
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
# 执行用时 :240 ms, 在所有 Python3 提交中击败了22.75%的用户
# 内存消耗 :31 MB, 在所有 Python3 提交中击败了100.00%的用户
class Codec:
def __init__(self):
self.tree = []
self.temp = []
self.flag = 1 # non-None element appears again add self.temp to self.tree
def traByLayer(self, tree: List[TreeNode]):
if tree==[]:
return
else:
temp = tree.pop(0)
if temp!=None:
self.tree+=self.temp
self.temp = []
self.tree.append(temp.val)
tree.append(temp.left)
tree.append(temp.right)
else:
self.temp.append(None)
#print("trabylary",self.tree)
self.traByLayer(tree)
def serialize(self, root):
"""Encodes a tree to a single string.
:type root: TreeNode
:rtype: str
"""
if root==None:
return ''
tree = [root]
self.traByLayer(tree)
print(str(self.tree))
return str(self.tree)
def deserialize(self, data):
"""Decodes your encoded data to tree.
:type data: str
:rtype: TreeNode
"""
#data = '[1, 2, 3, 1, 3, 2, 4]'
if data=='':
return None
start = 0
end = 0
tree = []
for i in range(len(data)):
if data[i]==',' or data[i]==']':
start = end+1
end = i
if data[start:end]!=' None':
#print(start,end,data[start:end])
tree.append(int(data[start:end]))
else:
tree.append(None)
#print("Tree",tree,"then build the Tree")
root = TreeNode(tree.pop(0))
self.buildTreeByList([root],tree)
return root
def buildTreeByList(self,r:List[TreeNode], data: List[int]):
if r==[] or data==[]:
return
root = r.pop(0)
datalen = len(data)
if datalen==0:
return
elif datalen<=2:
#print("root",root.val,"tree",data,"datalen",datalen)
temp = data.pop(0)
if temp!=None:
root.left = TreeNode(temp)
r.append(root.left)
if data!=[]:
temp = data.pop(0)
if temp!=None:
root.right = TreeNode(temp)
r.append(root.right)
return
else:
#print("root",root.val,"tree",data,"datalen",datalen)
temp = data.pop(0)
if temp!=None:
root.left = TreeNode(temp)
r.append(root.left)
temp = data.pop(0)
if temp!=None:
root.right = TreeNode(temp)
r.append(root.right)
self.buildTreeByList(r,data)
# Your Codec object will be instantiated and called as such:
# codec = Codec()
# codec.deserialize(codec.serialize(root))
| 3.265625 | 3 |
ipuz/puzzlekinds/__init__.py | maiamcc/ipuz | 5 | 1691 | <gh_stars>1-10
from .acrostic import IPUZ_ACROSTIC_VALIDATORS
from .answer import IPUZ_ANSWER_VALIDATORS
from .block import IPUZ_BLOCK_VALIDATORS
from .crossword import IPUZ_CROSSWORD_VALIDATORS
from .fill import IPUZ_FILL_VALIDATORS
from .sudoku import IPUZ_SUDOKU_VALIDATORS
from .wordsearch import IPUZ_WORDSEARCH_VALIDATORS
IPUZ_PUZZLEKINDS = {
"http://ipuz.org/acrostic": {
"mandatory": (
"puzzle",
),
"validators": {
1: IPUZ_ACROSTIC_VALIDATORS,
},
},
"http://ipuz.org/answer": {
"mandatory": (),
"validators": {
1: IPUZ_ANSWER_VALIDATORS,
},
},
"http://ipuz.org/block": {
"mandatory": (
"dimensions",
),
"validators": {
1: IPUZ_BLOCK_VALIDATORS,
},
},
"http://ipuz.org/crossword": {
"mandatory": (
"dimensions",
"puzzle",
),
"validators": {
1: IPUZ_CROSSWORD_VALIDATORS,
},
},
"http://ipuz.org/fill": {
"mandatory": (),
"validators": {
1: IPUZ_FILL_VALIDATORS,
},
},
"http://ipuz.org/sudoku": {
"mandatory": (
"puzzle",
),
"validators": {
1: IPUZ_SUDOKU_VALIDATORS,
},
},
"http://ipuz.org/wordsearch": {
"mandatory": (
"dimensions",
),
"validators": {
1: IPUZ_WORDSEARCH_VALIDATORS,
},
},
}
| 2.15625 | 2 |
CTFd/api/v1/users.py | MrQubo/CTFd | 0 | 1692 | <filename>CTFd/api/v1/users.py
from flask import session, request, abort
from flask_restplus import Namespace, Resource
from CTFd.models import (
db,
Users,
Solves,
Awards,
Tracking,
Unlocks,
Submissions,
Notifications,
)
from CTFd.utils.decorators import authed_only, admins_only, ratelimit
from CTFd.cache import clear_standings
from CTFd.utils.user import get_current_user, is_admin
from CTFd.utils.decorators.visibility import (
check_account_visibility,
check_score_visibility,
)
from CTFd.schemas.submissions import SubmissionSchema
from CTFd.schemas.awards import AwardSchema
from CTFd.schemas.users import UserSchema
users_namespace = Namespace("users", description="Endpoint to retrieve Users")
@users_namespace.route("")
class UserList(Resource):
@check_account_visibility
def get(self):
users = Users.query.filter_by(banned=False, hidden=False)
response = UserSchema(view="user", many=True).dump(users)
if response.errors:
return {"success": False, "errors": response.errors}, 400
return {"success": True, "data": response.data}
@admins_only
def post(self):
req = request.get_json()
schema = UserSchema("admin")
response = schema.load(req)
if response.errors:
return {"success": False, "errors": response.errors}, 400
db.session.add(response.data)
db.session.commit()
if request.args.get("notify"):
name = response.data.name
password = <PASSWORD>")
clear_standings()
response = schema.dump(response.data)
return {"success": True, "data": response.data}
@users_namespace.route("/<int:user_id>")
@users_namespace.param("user_id", "User ID")
class UserPublic(Resource):
@check_account_visibility
def get(self, user_id):
user = Users.query.filter_by(id=user_id).first_or_404()
if (user.banned or user.hidden) and is_admin() is False:
abort(404)
response = UserSchema(view=session.get("type", "user")).dump(user)
if response.errors:
return {"success": False, "errors": response.errors}, 400
response.data["place"] = user.place
response.data["score"] = user.score
return {"success": True, "data": response.data}
@admins_only
def patch(self, user_id):
user = Users.query.filter_by(id=user_id).first_or_404()
data = request.get_json()
data["id"] = user_id
schema = UserSchema(view="admin", instance=user, partial=True)
response = schema.load(data)
if response.errors:
return {"success": False, "errors": response.errors}, 400
db.session.commit()
response = schema.dump(response.data)
db.session.close()
clear_standings()
return {"success": True, "data": response}
@admins_only
def delete(self, user_id):
Notifications.query.filter_by(user_id=user_id).delete()
Awards.query.filter_by(user_id=user_id).delete()
Unlocks.query.filter_by(user_id=user_id).delete()
Submissions.query.filter_by(user_id=user_id).delete()
Solves.query.filter_by(user_id=user_id).delete()
Tracking.query.filter_by(user_id=user_id).delete()
Users.query.filter_by(id=user_id).delete()
db.session.commit()
db.session.close()
clear_standings()
return {"success": True}
@users_namespace.route("/me")
class UserPrivate(Resource):
@authed_only
def get(self):
user = get_current_user()
response = UserSchema("self").dump(user).data
response["place"] = user.place
response["score"] = user.score
return {"success": True, "data": response}
@authed_only
def patch(self):
user = get_current_user()
data = request.get_json()
schema = UserSchema(view="self", instance=user, partial=True)
response = schema.load(data)
if response.errors:
return {"success": False, "errors": response.errors}, 400
db.session.commit()
response = schema.dump(response.data)
db.session.close()
clear_standings()
return {"success": True, "data": response.data}
@users_namespace.route("/me/solves")
class UserPrivateSolves(Resource):
@authed_only
def get(self):
user = get_current_user()
solves = user.get_solves(admin=True)
view = "user" if not is_admin() else "admin"
response = SubmissionSchema(view=view, many=True).dump(solves)
if response.errors:
return {"success": False, "errors": response.errors}, 400
return {"success": True, "data": response.data}
@users_namespace.route("/me/fails")
class UserPrivateFails(Resource):
@authed_only
def get(self):
user = get_current_user()
fails = user.get_fails(admin=True)
view = "user" if not is_admin() else "admin"
response = SubmissionSchema(view=view, many=True).dump(fails)
if response.errors:
return {"success": False, "errors": response.errors}, 400
if is_admin():
data = response.data
else:
data = []
count = len(response.data)
return {"success": True, "data": data, "meta": {"count": count}}
@users_namespace.route("/me/awards")
@users_namespace.param("user_id", "User ID")
class UserPrivateAwards(Resource):
@authed_only
def get(self):
user = get_current_user()
awards = user.get_awards(admin=True)
view = "user" if not is_admin() else "admin"
response = AwardSchema(view=view, many=True).dump(awards)
if response.errors:
return {"success": False, "errors": response.errors}, 400
return {"success": True, "data": response.data}
@users_namespace.route("/<user_id>/solves")
@users_namespace.param("user_id", "User ID")
class UserPublicSolves(Resource):
@check_account_visibility
@check_score_visibility
def get(self, user_id):
user = Users.query.filter_by(id=user_id).first_or_404()
if (user.banned or user.hidden) and is_admin() is False:
abort(404)
solves = user.get_solves(admin=is_admin())
view = "user" if not is_admin() else "admin"
response = SubmissionSchema(view=view, many=True).dump(solves)
if response.errors:
return {"success": False, "errors": response.errors}, 400
# return {"success": True, "data": response.data}
return {"success": True, "data": None}
@users_namespace.route("/<user_id>/fails")
@users_namespace.param("user_id", "User ID")
class UserPublicFails(Resource):
@check_account_visibility
@check_score_visibility
def get(self, user_id):
user = Users.query.filter_by(id=user_id).first_or_404()
if (user.banned or user.hidden) and is_admin() is False:
abort(404)
fails = user.get_fails(admin=is_admin())
view = "user" if not is_admin() else "admin"
response = SubmissionSchema(view=view, many=True).dump(fails)
if response.errors:
return {"success": False, "errors": response.errors}, 400
if is_admin():
data = response.data
else:
data = []
count = len(response.data)
# return {"success": True, "data": data, "meta": {"count": count}}
return {"success": True, "data": None, "meta": {"count": None}}
@users_namespace.route("/<user_id>/awards")
@users_namespace.param("user_id", "User ID or 'me'")
class UserPublicAwards(Resource):
@check_account_visibility
@check_score_visibility
def get(self, user_id):
user = Users.query.filter_by(id=user_id).first_or_404()
if (user.banned or user.hidden) and is_admin() is False:
abort(404)
awards = user.get_awards(admin=is_admin())
view = "user" if not is_admin() else "admin"
response = AwardSchema(view=view, many=True).dump(awards)
if response.errors:
return {"success": False, "errors": response.errors}, 400
# return {"success": True, "data": response.data}
return {"success": True, "data": None}
| 2.265625 | 2 |
getting_started/pages.py | emilhe/dash-extensions-docs | 1 | 1693 | <filename>getting_started/pages.py
import dash_labs as dl
from dash_extensions.enrich import DashBlueprint, DashProxy, html, Output, Input
def page_name(i: int):
return f"page{i}"
def make_page(i: int):
page = DashBlueprint()
page.layout = html.Div([html.H2(f"Page {i}"), html.Button('Click me!', id='btn'), html.Div(id='log')])
@page.callback(Output('log', 'children'), Input('btn', 'n_clicks'))
def on_click(n_clicks):
return f"Hello world {n_clicks} from page {i}!"
return page
app = DashProxy(prevent_initial_callbacks=True, plugins=[dl.plugins.pages])
# Register a few pages.
n_pages = 5
for i in range(n_pages):
page = make_page(i)
page.register(app, page_name(i), prefix=str(i))
# Setup main app layout.
app_shell = [html.H1("App shell"), dl.plugins.page_container]
navigation = html.Ul([html.Li(html.A(page_name(i), href=page_name(i))) for i in range(n_pages)])
app.layout = html.Div(app_shell + [navigation], style=dict(display="block"))
if __name__ == '__main__':
app.run_server() | 2.40625 | 2 |
zaqar/transport/wsgi/v2_0/homedoc.py | vkmc/zaqar-websocket | 1 | 1694 | # Copyright (c) 2013 Rackspace, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
import json
# NOTE(kgriffs): http://tools.ietf.org/html/draft-nottingham-json-home-03
JSON_HOME = {
'resources': {
# -----------------------------------------------------------------
# Queues
# -----------------------------------------------------------------
'rel/queues': {
'href-template': '/v2/queues{?marker,limit,detailed}',
'href-vars': {
'marker': 'param/marker',
'limit': 'param/queue_limit',
'detailed': 'param/detailed',
},
'hints': {
'allow': ['GET'],
'formats': {
'application/json': {},
},
},
},
'rel/queue': {
'href-template': '/v2/queues/{queue_name}',
'href-vars': {
'queue_name': 'param/queue_name',
},
'hints': {
'allow': ['PUT', 'DELETE'],
'formats': {
'application/json': {},
},
},
},
'rel/queue_stats': {
'href-template': '/v2/queues/{queue_name}/stats',
'href-vars': {
'queue_name': 'param/queue_name',
},
'hints': {
'allow': ['GET'],
'formats': {
'application/json': {},
},
},
},
# -----------------------------------------------------------------
# Messages
# -----------------------------------------------------------------
'rel/messages': {
'href-template': ('/v2/queues/{queue_name}/messages'
'{?marker,limit,echo,include_claimed}'),
'href-vars': {
'queue_name': 'param/queue_name',
'marker': 'param/marker',
'limit': 'param/messages_limit',
'echo': 'param/echo',
'include_claimed': 'param/include_claimed',
},
'hints': {
'allow': ['GET'],
'formats': {
'application/json': {},
},
},
},
'rel/post_messages': {
'href-template': '/v2/queues/{queue_name}/messages',
'href-vars': {
'queue_name': 'param/queue_name',
},
'hints': {
'allow': ['POST'],
'formats': {
'application/json': {},
},
'accept-post': ['application/json'],
},
},
'rel/messages_delete': {
'href-template': '/v2/queues/{queue_name}/messages{?ids,pop}',
'href-vars': {
'queue_name': 'param/queue_name',
'ids': 'param/ids',
'pop': 'param/pop'
},
'hints': {
'allow': [
'DELETE'
],
'formats': {
'application/json': {}
}
}
},
'rel/message_delete': {
'href-template': '/v2/queues/{queue_name}/messages/{message_id}{?claim}', # noqa
'href-vars': {
'queue_name': 'param/queue_name',
'message_id': 'param/message_id',
'claim': 'param/claim_id'
},
'hints': {
'allow': [
'DELETE'
],
'formats': {
'application/json': {}
}
}
},
# -----------------------------------------------------------------
# Claims
# -----------------------------------------------------------------
'rel/claim': {
'href-template': '/v2/queues/{queue_name}/claims/{claim_id}',
'href-vars': {
'queue_name': 'param/queue_name',
'claim_id': 'param/claim_id',
},
'hints': {
'allow': ['GET'],
'formats': {
'application/json': {},
},
},
},
'rel/post_claim': {
'href-template': '/v2/queues/{queue_name}/claims{?limit}',
'href-vars': {
'queue_name': 'param/queue_name',
'limit': 'param/claim_limit',
},
'hints': {
'allow': ['POST'],
'formats': {
'application/json': {},
},
'accept-post': ['application/json']
},
},
'rel/patch_claim': {
'href-template': '/v2/queues/{queue_name}/claims/{claim_id}',
'href-vars': {
'queue_name': 'param/queue_name',
'claim_id': 'param/claim_id',
},
'hints': {
'allow': ['PATCH'],
'formats': {
'application/json': {},
},
'accept-post': ['application/json']
},
},
'rel/delete_claim': {
'href-template': '/v2/queues/{queue_name}/claims/{claim_id}',
'href-vars': {
'queue_name': 'param/queue_name',
'claim_id': 'param/claim_id',
},
'hints': {
'allow': ['DELETE'],
'formats': {
'application/json': {},
},
},
},
}
}
ADMIN_RESOURCES = {
# -----------------------------------------------------------------
# Pools
# -----------------------------------------------------------------
'rel/pools': {
'href-template': '/v2/pools{?detailed,limit,marker}',
'href-vars': {
'detailed': 'param/detailed',
'limit': 'param/pool_limit',
'marker': 'param/marker',
},
'hints': {
'allow': ['GET'],
'formats': {
'application/json': {},
},
},
},
'rel/pool': {
'href-template': '/v2/pools/{pool_name}',
'href-vars': {
'pool_name': 'param/pool_name',
},
'hints': {
'allow': ['GET', 'PUT', 'PATCH', 'DELETE'],
'formats': {
'application/json': {},
},
},
},
# -----------------------------------------------------------------
# Flavors
# -----------------------------------------------------------------
'rel/flavors': {
'href-template': '/v2/flavors{?detailed,limit,marker}',
'href-vars': {
'detailed': 'param/detailed',
'limit': 'param/flavor_limit',
'marker': 'param/marker',
},
'hints': {
'allow': ['GET'],
'formats': {
'application/json': {},
},
},
},
'rel/flavor': {
'href-template': '/v2/flavors/{flavor_name}',
'href-vars': {
'flavor_name': 'param/flavor_name',
},
'hints': {
'allow': ['GET', 'PUT', 'PATCH', 'DELETE'],
'formats': {
'application/json': {},
},
},
},
# -----------------------------------------------------------------
# Health
# -----------------------------------------------------------------
'rel/health': {
'href': '/v2/health',
'hints': {
'allow': ['GET'],
'formats': {
'application/json': {},
},
},
},
}
class Resource(object):
def __init__(self, conf):
if conf.admin_mode:
JSON_HOME['resources'].update(ADMIN_RESOURCES)
document = json.dumps(JSON_HOME, ensure_ascii=False, indent=4)
self.document_utf8 = document.encode('utf-8')
def on_get(self, req, resp, project_id):
resp.data = self.document_utf8
resp.content_type = 'application/json-home'
resp.cache_control = ['max-age=86400']
# status defaults to 200
| 1.65625 | 2 |
synapse/models/infotech.py | vertexproject/synapse | 216 | 1695 | import asyncio
import logging
import synapse.exc as s_exc
import synapse.lib.types as s_types
import synapse.lib.module as s_module
import synapse.lib.version as s_version
logger = logging.getLogger(__name__)
class Cpe23Str(s_types.Str):
'''
CPE 2.3 Formatted String
https://nvlpubs.nist.gov/nistpubs/Legacy/IR/nistir7695.pdf
(Section 6.2)
cpe:2.3: part : vendor : product : version : update : edition :
language : sw_edition : target_sw : target_hw : other
* = "any"
- = N/A
'''
def __init__(self, modl, name, info, opts):
opts['lower'] = True
s_types.Str.__init__(self, modl, name, info, opts)
def _splitCpe23(self, text):
part = ''
parts = []
genr = iter(text)
try:
while True:
c = next(genr)
if c == '\\':
c += next(genr)
if c == ':':
parts.append(part)
part = ''
continue
part += c
except StopIteration:
parts.append(part)
return parts
def _normPyStr(self, valu):
if not valu.startswith('cpe:2.3:'):
mesg = 'CPE 2.3 string is expected to start with "cpe:2.3:"'
raise s_exc.BadTypeValu(valu=valu, mesg=mesg)
text, info = s_types.Str._normPyStr(self, valu)
parts = self._splitCpe23(text)
if len(parts) != 13:
mesg = f'CPE 2.3 string has {len(parts)} parts, expected 13.'
raise s_exc.BadTypeValu(valu=valu, mesg=mesg)
subs = {
'part': parts[2],
'vendor': parts[3],
'product': parts[4],
'version': parts[5],
'update': parts[6],
'edition': parts[7],
'language': parts[8],
'sw_edition': parts[9],
'target_sw': parts[10],
'target_hw': parts[11],
'other': parts[12],
}
return ':'.join(parts), {'subs': subs}
class SemVer(s_types.Int):
'''
Provides support for parsing a semantic version string into its component
parts. This normalizes a version string into an integer to allow version
ordering. Prerelease information is disregarded for integer comparison
purposes, as we cannot map an arbitrary pre-release version into a integer
value
Major, minor and patch levels are represented as integers, with a max
width of 20 bits. The comparable integer value representing the semver
is the bitwise concatenation of the major, minor and patch levels.
Prerelease and build information will be parsed out and available as
strings if that information is present.
'''
def postTypeInit(self):
s_types.Int.postTypeInit(self)
self.setNormFunc(str, self._normPyStr)
self.setNormFunc(int, self._normPyInt)
def _normPyStr(self, valu):
valu = valu.strip()
if not valu:
raise s_exc.BadTypeValu(valu=valu, name=self.name,
mesg='No text left after stripping whitespace')
subs = s_version.parseSemver(valu)
if subs is None:
raise s_exc.BadTypeValu(valu=valu, name=self.name,
mesg='Unable to parse string as a semver.')
valu = s_version.packVersion(subs.get('major'), subs.get('minor'), subs.get('patch'))
return valu, {'subs': subs}
def _normPyInt(self, valu):
if valu < 0:
raise s_exc.BadTypeValu(valu=valu, name=self.name,
mesg='Cannot norm a negative integer as a semver.')
if valu > s_version.mask60:
raise s_exc.BadTypeValu(valu=valu, name=self.name,
mesg='Cannot norm a integer larger than 1152921504606846975 as a semver.')
major, minor, patch = s_version.unpackVersion(valu)
valu = s_version.packVersion(major, minor, patch)
subs = {'major': major,
'minor': minor,
'patch': patch}
return valu, {'subs': subs}
def repr(self, valu):
major, minor, patch = s_version.unpackVersion(valu)
valu = s_version.fmtVersion(major, minor, patch)
return valu
loglevels = (
(10, 'debug'),
(20, 'info'),
(30, 'notice'),
(40, 'warning'),
(50, 'err'),
(60, 'crit'),
(70, 'alert'),
(80, 'emerg'),
)
class ItModule(s_module.CoreModule):
async def initCoreModule(self):
self.model.form('it:dev:str').onAdd(self._onFormItDevStr)
self.model.form('it:dev:pipe').onAdd(self._onFormMakeDevStr)
self.model.form('it:dev:mutex').onAdd(self._onFormMakeDevStr)
self.model.form('it:dev:regkey').onAdd(self._onFormMakeDevStr)
self.model.prop('it:prod:softver:arch').onSet(self._onPropSoftverArch)
self.model.prop('it:prod:softver:vers').onSet(self._onPropSoftverVers)
self.model.prop('it:prod:softver:software').onSet(self._onPropSoftverSoft)
def bruteVersionStr(self, valu):
'''
Brute force the version out of a string.
Args:
valu (str): String to attempt to get version information for.
Notes:
This first attempts to parse strings using the it:semver normalization
before attempting to extract version parts out of the string.
Returns:
int, dict: The system normalized version integer and a subs dictionary.
'''
try:
valu, info = self.core.model.type('it:semver').norm(valu)
subs = info.get('subs')
return valu, subs
except s_exc.BadTypeValu:
# Try doing version part extraction by noming through the string
subs = s_version.parseVersionParts(valu)
if subs is None:
raise s_exc.BadTypeValu(valu=valu, name='bruteVersionStr',
mesg='Unable to brute force version parts out of the string')
if subs:
valu = s_version.packVersion(subs.get('major'),
subs.get('minor', 0),
subs.get('patch', 0))
return valu, subs
async def _onFormItDevStr(self, node):
await node.set('norm', node.ndef[1])
async def _onFormMakeDevStr(self, node):
pprop = node.ndef[1]
await node.snap.addNode('it:dev:str', pprop)
async def _onPropSoftverSoft(self, node, oldv):
# Check to see if name is available and set it if possible
prop = node.get('software')
if prop:
opts = {'vars': {'soft': prop}}
nodes = await node.snap.nodes('it:prod:soft=$soft', opts=opts)
if nodes:
name = nodes[0].get('name')
if name:
await node.set('software:name', name)
async def _onPropSoftverArch(self, node, oldv):
# make it:dev:str for arch
prop = node.get('arch')
if prop:
await node.snap.addNode('it:dev:str', prop)
async def _onPropSoftverVers(self, node, oldv):
# Set vers:norm and make it's normed valu
prop = node.get('vers')
if not prop:
return
await node.set('vers:norm', prop)
# Make it:dev:str from version str
await node.snap.addNode('it:dev:str', prop)
# form the semver properly or bruteforce parts
try:
valu, subs = self.bruteVersionStr(prop)
await node.set('semver', valu)
for k, v in subs.items():
await node.set(f'semver:{k}', v)
except asyncio.CancelledError: # pragma: no cover
raise
except Exception:
logger.exception('Failed to brute force version string [%s]', prop)
def getModelDefs(self):
modl = {
'ctors': (
('it:semver', 'synapse.models.infotech.SemVer', {}, {
'doc': 'Semantic Version type.',
}),
('it:sec:cpe', 'synapse.models.infotech.Cpe23Str', {}, {
'doc': 'A NIST CPE 2.3 Formatted String',
}),
),
'types': (
('it:hostname', ('str', {'strip': True, 'lower': True}), {
'doc': 'The name of a host or system.',
}),
('it:host', ('guid', {}), {
'doc': 'A GUID that represents a host or system.'
}),
('it:log:event', ('guid', {}), {
'doc': 'A GUID representing an individual log event.',
'interfaces': ('it:host:activity',),
}),
('it:network', ('guid', {}), {
'doc': 'A GUID that represents a logical network.'
}),
('it:domain', ('guid', {}), {
'doc': 'A logical boundary of authentication and configuration such as a windows domain.'
}),
('it:account', ('guid', {}), {
'doc': 'A GUID that represents an account on a host or network.'
}),
('it:group', ('guid', {}), {
'doc': 'A GUID that represents a group on a host or network.'
}),
('it:logon', ('guid', {}), {
'doc': 'A GUID that represents an individual logon/logoff event.'
}),
('it:hosturl', ('comp', {'fields': (('host', 'it:host'), ('url', 'inet:url'))}), {
'doc': 'A url hosted on or served by a host or system.',
}),
('it:sec:cve', ('str', {'lower': True, 'regex': r'(?i)^CVE-[0-9]{4}-[0-9]{4,}$'}), {
'doc': 'A vulnerability as designated by a Common Vulnerabilities and Exposures (CVE) number.',
'ex': 'cve-2012-0158'
}),
('it:sec:cwe', ('str', {'regex': r'^CWE-[0-9]{1,8}$'}), {
'doc': 'NIST NVD Common Weaknesses Enumeration Specification',
'ex': 'CWE-120',
}),
('it:mitre:attack:status', ('str', {'enums': 'current,deprecated,withdrawn'}), {
'doc': 'A Mitre ATT&CK element status.',
'ex': 'current',
}),
('it:mitre:attack:group', ('str', {'regex': r'^G[0-9]{4}$'}), {
'doc': 'A Mitre ATT&CK Group ID.',
'ex': 'G0100',
}),
('it:mitre:attack:tactic', ('str', {'regex': r'^TA[0-9]{4}$'}), {
'doc': 'A Mitre ATT&CK Tactic ID.',
'ex': 'TA0040',
}),
('it:mitre:attack:technique', ('str', {'regex': r'^T[0-9]{4}(.[0-9]{3})?$'}), {
'doc': 'A Mitre ATT&CK Technique ID.',
'ex': 'T1548',
}),
('it:mitre:attack:mitigation', ('str', {'regex': r'^M[0-9]{4}$'}), {
'doc': 'A Mitre ATT&CK Mitigation ID.',
'ex': 'M1036',
}),
('it:mitre:attack:software', ('str', {'regex': r'^S[0-9]{4}$'}), {
'doc': 'A Mitre ATT&CK Software ID.',
'ex': 'S0154',
}),
('it:dev:str', ('str', {}), {
'doc': 'A developer-selected string.'
}),
('it:dev:pipe', ('str', {}), {
'doc': 'A string representing a named pipe.',
}),
('it:dev:mutex', ('str', {}), {
'doc': 'A string representing a mutex.',
}),
('it:dev:int', ('int', {}), {
'doc': 'A developer selected integer constant.',
}),
('it:dev:regkey', ('str', {}), {
'doc': 'A Windows registry key.',
'ex': 'HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\Run',
}),
('it:dev:regval', ('guid', {}), {
'doc': 'A Windows registry key/value pair.',
}),
('it:prod:soft', ('guid', {}), {
'doc': 'A arbitrary, unversioned software product.',
}),
('it:adid', ('str', {'lower': True, 'strip': True}), {
'doc': 'An advertising identification string.'}),
('it:os:windows:sid', ('str', {'regex': r'^S-1-[0-59]-\d{2}-\d{8,10}-\d{8,10}-\d{8,10}-[1-9]\d{3}$'}), {
'doc': 'A Microsoft Windows Security Identifier.',
'ex': 'S-1-5-21-1220945662-1202665555-839525555-5555',
}),
('it:os:ios:idfa', ('it:adid', {}), {
'doc': 'An iOS advertising identification string.'}),
('it:os:android:aaid', ('it:adid', {}), {
'doc': 'An android advertising identification string.'}),
('it:os:android:perm', ('str', {}), {
'doc': 'An android permission string.'}),
('it:os:android:intent', ('str', {}), {
'doc': 'An android intent string.'}),
('it:os:android:reqperm', ('comp', {'fields': (
('app', 'it:prod:soft'),
('perm', 'it:os:android:perm'))}), {
'doc': 'The given software requests the android permission.'}),
('it:os:android:ilisten', ('comp', {'fields': (
('app', 'it:prod:soft'),
('intent', 'it:os:android:intent'))}), {
'doc': 'The given software listens for an android intent.'}),
('it:os:android:ibroadcast', ('comp', {'fields': (
('app', 'it:prod:soft'),
('intent', 'it:os:android:intent')
)}), {
'doc': 'The given software broadcasts the given Android intent.'}),
('it:prod:softver', ('guid', {}), {
'doc': 'A specific version of a software product.'}),
('it:prod:softfile', ('comp', {'fields': (
('soft', 'it:prod:softver'),
('file', 'file:bytes'))}), {
'doc': 'A file is distributed by a specific software version.'}),
('it:prod:softlib', ('comp', {'fields': (
('soft', 'it:prod:softver'),
('lib', 'it:prod:softver'))}), {
'doc': 'A software version contains a library software version.'}),
('it:prod:softos', ('comp', {'fields': (
('soft', 'it:prod:softver'),
('os', 'it:prod:softver'))}), {
'doc': 'The software version is known to be compatible with the given os software version.'}),
('it:hostsoft', ('comp', {'fields': (('host', 'it:host'), ('softver', 'it:prod:softver'))}), {
'doc': 'A version of a software product which is present on a given host.',
}),
('it:av:sig', ('comp', {'fields': (('soft', 'it:prod:soft'), ('name', ('str', {'lower': True})))}), {
'doc': 'A signature name within the namespace of an antivirus engine name.'
}),
('it:av:filehit', ('comp', {'fields': (('file', 'file:bytes'), ('sig', 'it:av:sig'))}), {
'doc': 'A file that triggered an alert on a specific antivirus signature.',
}),
('it:av:prochit', ('guid', {}), {
'doc': 'An instance of a process triggering an alert on a specific antivirus signature.'
}),
('it:auth:passwdhash', ('guid', {}), {
'doc': 'An instance of a password hash.',
}),
('it:exec:proc', ('guid', {}), {
'doc': 'A process executing on a host. May be an actual (e.g., endpoint) or virtual (e.g., malware sandbox) host.',
}),
('it:exec:thread', ('guid', {}), {
'doc': 'A thread executing in a process.',
}),
('it:exec:loadlib', ('guid', {}), {
'doc': 'A library load event in a process.',
}),
('it:exec:mmap', ('guid', {}), {
'doc': 'A memory mapped segment located in a process.',
}),
('it:cmd', ('str', {'strip': True}), {
'doc': 'A unique command-line string.',
'ex': 'foo.exe --dostuff bar',
}),
('it:exec:mutex', ('guid', {}), {
'doc': 'A mutex created by a process at runtime.',
}),
('it:exec:pipe', ('guid', {}), {
'doc': 'A named pipe created by a process at runtime.',
}),
('it:exec:url', ('guid', {}), {
'doc': 'An instance of a host requesting a URL.',
}),
('it:exec:bind', ('guid', {}), {
'doc': 'An instance of a host binding a listening port.',
}),
('it:fs:file', ('guid', {}), {
'doc': 'A file on a host.'
}),
('it:exec:file:add', ('guid', {}), {
'doc': 'An instance of a host adding a file to a filesystem.',
}),
('it:exec:file:del', ('guid', {}), {
'doc': 'An instance of a host deleting a file from a filesystem.',
}),
('it:exec:file:read', ('guid', {}), {
'doc': 'An instance of a host reading a file from a filesystem.',
}),
('it:exec:file:write', ('guid', {}), {
'doc': 'An instance of a host writing a file to a filesystem.',
}),
('it:exec:reg:get', ('guid', {}), {
'doc': 'An instance of a host getting a registry key.',
}),
('it:exec:reg:set', ('guid', {}), {
'doc': 'An instance of a host creating or setting a registry key.',
}),
('it:exec:reg:del', ('guid', {}), {
'doc': 'An instance of a host deleting a registry key.',
}),
('it:app:yara:rule', ('guid', {}), {
'doc': 'A YARA rule unique identifier.',
}),
('it:app:yara:match', ('comp', {'fields': (('rule', 'it:app:yara:rule'), ('file', 'file:bytes'))}), {
'doc': 'A YARA rule match to a file.',
}),
('it:app:yara:procmatch', ('guid', {}), {
'doc': 'An instance of a YARA rule match to a process.',
}),
('it:app:snort:rule', ('guid', {}), {
'doc': 'A snort rule unique identifier.',
}),
('it:app:snort:hit', ('guid', {}), {
'doc': 'An instance of a snort rule hit.',
}),
('it:reveng:function', ('guid', {}), {
'doc': 'A function inside an executable.',
}),
('it:reveng:filefunc', ('comp', {'fields': (('file', 'file:bytes'), ('function', 'it:reveng:function'))}), {
'doc': 'An instance of a function in an executable.',
}),
('it:reveng:funcstr', ('comp', {'fields': (('function', 'it:reveng:function'), ('string', 'str'))}), {
'deprecated': True,
'doc': 'A reference to a string inside a function.',
}),
('it:reveng:impfunc', ('str', {'lower': 1}), {
'doc': 'A function from an imported library.',
}),
),
'interfaces': (
('it:host:activity', {
'props': (
('exe', ('file:bytes', {}), {
'doc': 'The executable file which caused the activity.'}),
('proc', ('it:exec:proc', {}), {
'doc': 'The host process which caused the activity.'}),
('thread', ('it:exec:thread', {}), {
'doc': 'The host thread which caused the activity.'}),
('host', ('it:host', {}), {
'doc': 'The host on which the activity occurred.'}),
('time', ('time', {}), {
'doc': 'The time that the activity started.'}),
),
}),
),
'forms': (
('it:hostname', {}, ()),
('it:host', {}, (
('name', ('it:hostname', {}), {
'doc': 'The name of the host or system.',
}),
('desc', ('str', {}), {
'doc': 'A free-form description of the host.',
}),
('domain', ('it:domain', {}), {
'doc': 'The authentication domain that the host is a member of.',
}),
('ipv4', ('inet:ipv4', {}), {
'doc': 'The last known ipv4 address for the host.'
}),
('latlong', ('geo:latlong', {}), {
'doc': 'The last known location for the host.'
}),
('place', ('geo:place', {}), {
'doc': 'The place where the host resides.',
}),
('loc', ('loc', {}), {
'doc': 'The geo-political location string for the node.',
}),
('os', ('it:prod:softver', {}), {
'doc': 'The operating system of the host.'
}),
('manu', ('str', {}), {
'doc': 'The manufacturer of the host.',
}),
('model', ('str', {}), {
'doc': 'The product model of the host.',
}),
('serial', ('str', {}), {
'doc': 'The serial number of the host.',
}),
('operator', ('ps:contact', {}), {
'doc': 'The operator of the host.',
}),
('org', ('ou:org', {}), {
'doc': 'The org that operates the given host.',
}),
)),
('it:log:event', {}, (
('mesg', ('str', {}), {
'doc': 'The log messsage text.',
}),
('severity', ('int', {'enums': loglevels}), {
'doc': 'A log level integer that increases with severity.',
}),
('data', ('data', {}), {
'doc': 'A raw JSON record of the log event.',
}),
)),
('it:domain', {}, (
('name', ('str', {'lower': True, 'strip': True, 'onespace': True}), {
'doc': 'The name of the domain.',
}),
('desc', ('str', {}), {
'doc': 'A brief description of the domain.',
}),
('org', ('ou:org', {}), {
'doc': 'The org that operates the given domain.',
}),
)),
('it:network', {}, (
('name', ('str', {'lower': True, 'strip': True, 'onespace': True}), {
'doc': 'The name of the network.',
}),
('desc', ('str', {}), {
'doc': 'A brief description of the network.',
}),
('org', ('ou:org', {}), {
'doc': 'The org that owns/operates the network.',
}),
('net4', ('inet:net4', {}), {
'doc': 'The optional contiguous IPv4 address range of this network.',
}),
('net6', ('inet:net6', {}), {
'doc': 'The optional contiguous IPv6 address range of this network.',
}),
)),
('it:account', {}, (
('user', ('inet:user', {}), {
'doc': 'The username associated with the account',
}),
('contact', ('ps:contact', {}), {
'doc': 'Additional contact information associated with this account.',
}),
('host', ('it:host', {}), {
'doc': 'The host where the account is registered.',
}),
('domain', ('it:domain', {}), {
'doc': 'The authentication domain where the account is registered.',
}),
('posix:uid', ('int', {}), {
'doc': 'The user ID of the account.',
'ex': '1001',
}),
('posix:gid', ('int', {}), {
'doc': 'The primary group ID of the account.',
'ex': '1001',
}),
('posix:gecos', ('int', {}), {
'doc': 'The GECOS field for the POSIX account.',
}),
('posix:home', ('file:path', {}), {
'doc': "The path to the POSIX account's home directory.",
'ex': '/home/visi',
}),
('posix:shell', ('file:path', {}), {
'doc': "The path to the POSIX account's default shell.",
'ex': '/bin/bash',
}),
('windows:sid', ('it:os:windows:sid', {}), {
'doc': 'The Microsoft Windows Security Identifier of the account.',
}),
('groups', ('array', {'type': 'it:group'}), {
'doc': 'An array of groups that the account is a member of.',
}),
)),
('it:group', {}, (
('name', ('str', {'lower': True, 'strip': True, 'onespace': True}), {
'doc': 'The name of the group.',
}),
('desc', ('str', {}), {
'doc': 'A brief description of the group.',
}),
('host', ('it:host', {}), {
'doc': 'The host where the group is registered.',
}),
('domain', ('it:domain', {}), {
'doc': 'The authentication domain where the group is registered.',
}),
('groups', ('array', {'type': 'it:group'}), {
'doc': 'Groups that are a member of this group.',
}),
('posix:gid', ('int', {}), {
'doc': 'The primary group ID of the account.',
'ex': '1001',
}),
('windows:sid', ('it:os:windows:sid', {}), {
'doc': 'The Microsoft Windows Security Identifier of the group.',
}),
)),
('it:logon', {}, (
('time', ('time', {}), {
'doc': 'The time the logon occured.',
}),
('success', ('bool', {}), {
'doc': 'Set to false to indicate an unsuccessful logon attempt.',
}),
('logoff:time', ('time', {}), {
'doc': 'The time the logon session ended.',
}),
('host', ('it:host', {}), {
'doc': 'The host that the account logged in to.',
}),
('account', ('it:account', {}), {
'doc': 'The account that logged in.',
}),
('creds', ('auth:creds', {}), {
'doc': 'The credentials that were used for the logon.',
}),
('duration', ('duration', {}), {
'doc': 'The duration of the logon session.',
}),
('client:host', ('it:host', {}), {
'doc': 'The host where the logon originated.',
}),
('client:ipv4', ('inet:ipv4', {}), {
'doc': 'The IPv4 where the logon originated.',
}),
('client:ipv6', ('inet:ipv6', {}), {
'doc': 'The IPv6 where the logon originated.',
}),
)),
('it:hosturl', {}, (
('host', ('it:host', {}), {
'ro': True,
'doc': 'Host serving a url.',
}),
('url', ('inet:url', {}), {
'ro': True,
'doc': 'URL available on the host.',
}),
)),
('it:dev:str', {}, (
('norm', ('str', {'lower': True}), {
'doc': 'Lower case normalized version of the it:dev:str.',
}),
)),
('it:sec:cve', {}, (
('desc', ('str', {}), {
'doc': 'A free-form description of the CVE vulnerability.',
'disp': {'hint': 'text'},
}),
('url', ('inet:url', {}), {
'doc': 'A URL linking this CVE to a full description.',
}),
('references', ('array', {'type': 'inet:url', 'uniq': True}), {
'doc': 'An array of URLs that document the CVE ID.',
}),
)),
('it:sec:cpe', {}, (
('part', ('str', {'lower': True, 'strip': True}), {
'ro': True,
'doc': 'The "part" field from the CPE 2.3 string.'}),
('vendor', ('ou:name', {}), {
'ro': True,
'doc': 'The "vendor" field from the CPE 2.3 string.'}),
('product', ('str', {'lower': True, 'strip': True}), {
'ro': True,
'doc': 'The "product" field from the CPE 2.3 string.'}),
('version', ('str', {'lower': True, 'strip': True}), {
'ro': True,
'doc': 'The "version" field from the CPE 2.3 string.'}),
('update', ('str', {'lower': True, 'strip': True}), {
'ro': True,
'doc': 'The "update" field from the CPE 2.3 string.'}),
('edition', ('str', {'lower': True, 'strip': True}), {
'ro': True,
'doc': 'The "edition" field from the CPE 2.3 string.'}),
('language', ('str', {'lower': True, 'strip': True}), {
'ro': True,
'doc': 'The "language" field from the CPE 2.3 string.'}),
('sw_edition', ('str', {'lower': True, 'strip': True}), {
'ro': True,
'doc': 'The "sw_edition" field from the CPE 2.3 string.'}),
('target_sw', ('str', {'lower': True, 'strip': True}), {
'ro': True,
'doc': 'The "target_sw" field from the CPE 2.3 string.'}),
('target_hw', ('str', {'lower': True, 'strip': True}), {
'ro': True,
'doc': 'The "target_hw" field from the CPE 2.3 string.'}),
('other', ('str', {'lower': True, 'strip': True}), {
'ro': True,
'doc': 'The "other" field from the CPE 2.3 string.'}),
)),
('it:sec:cwe', {}, (
('name', ('str', {}), {
'doc': 'The CWE description field.',
'ex': 'Buffer Copy without Checking Size of Input (Classic Buffer Overflow)',
}),
('desc', ('str', {}), {
'doc': 'The CWE description field.',
'disp': {'hint': 'text'},
}),
('url', ('inet:url', {}), {
'doc': 'A URL linking this CWE to a full description.',
}),
('parents', ('array', {'type': 'it:sec:cwe',
'uniq': True, 'sorted': True, 'split': ','}), {
'doc': 'An array of ChildOf CWE Relationships.'
}),
)),
('it:mitre:attack:group', {}, (
('org', ('ou:org', {}), {
'doc': 'Used to map an ATT&CK group to a synapse ou:org.',
}),
('name', ('ou:name', {}), {
'doc': 'The primary name for the ATT&CK group.',
}),
('names', ('array', {'type': 'ou:name', 'uniq': True, 'sorted': True}), {
'doc': 'An array of alternate names for the ATT&CK group.',
}),
('desc', ('str', {}), {
'doc': 'A description of the ATT&CK group.',
'disp': {'hint': 'text'},
}),
('url', ('inet:url', {}), {
'doc': 'The URL that documents the ATT&CK group.',
}),
('tag', ('syn:tag', {}), {
'doc': 'The synapse tag used to annotate nodes included in this ATT&CK group ID.',
'ex': 'cno.mitre.g0100',
}),
('references', ('array', {'type': 'inet:url', 'uniq': True}), {
'doc': 'An array of URLs that document the ATT&CK group.',
}),
('techniques', ('array', {'type': 'it:mitre:attack:technique',
'uniq': True, 'sorted': True, 'split': ','}), {
'doc': 'An array of ATT&CK technique IDs used by the group.',
}),
('software', ('array', {'type': 'it:mitre:attack:software',
'uniq': True, 'sorted': True, 'split': ','}), {
'doc': 'An array of ATT&CK software IDs used by the group.',
}),
)),
('it:mitre:attack:tactic', {}, (
('name', ('str', {'strip': True}), {
'doc': 'The primary name for the ATT&CK tactic.',
}),
('desc', ('str', {}), {
'doc': 'A description of the ATT&CK tactic.',
'disp': {'hint': 'text'},
}),
('url', ('inet:url', {}), {
'doc': 'The URL that documents the ATT&CK tactic.',
}),
('tag', ('syn:tag', {}), {
'doc': 'The synapse tag used to annotate nodes included in this ATT&CK tactic.',
'ex': 'cno.mitre.ta0100',
}),
('references', ('array', {'type': 'inet:url', 'uniq': True}), {
'doc': 'An array of URLs that document the ATT&CK tactic.',
}),
)),
('it:mitre:attack:technique', {}, (
('name', ('str', {'strip': True}), {
'doc': 'The primary name for the ATT&CK technique.',
}),
('status', ('it:mitre:attack:status', {}), {
'doc': 'The status of this ATT&CK technique.',
}),
('isnow', ('it:mitre:attack:technique', {}), {
'doc': 'If deprecated, this field may contain the current value for the technique.',
}),
('desc', ('str', {'strip': True}), {
'doc': 'A description of the ATT&CK technique.',
'disp': {'hint': 'text'},
}),
('url', ('inet:url', {}), {
'doc': 'The URL that documents the ATT&CK technique.',
}),
('tag', ('syn:tag', {}), {
'doc': 'The synapse tag used to annotate nodes included in this ATT&CK technique.',
'ex': 'cno.mitre.t0100',
}),
('references', ('array', {'type': 'inet:url', 'uniq': True}), {
'doc': 'An array of URLs that document the ATT&CK technique.',
}),
('parent', ('it:mitre:attack:technique', {}), {
'doc': 'The parent ATT&CK technique on this sub-technique.',
}),
('tactics', ('array', {'type': 'it:mitre:attack:tactic',
'uniq': True, 'sorted': True, 'split': ','}), {
'doc': 'An array of ATT&CK tactics that include this technique.',
}),
)),
('it:mitre:attack:software', {}, (
('software', ('it:prod:soft', {}), {
'doc': 'Used to map an ATT&CK software to a synapse it:prod:soft.',
}),
('name', ('str', {'strip': True}), {
'doc': 'The primary name for the ATT&CK software.',
}),
('names', ('array', {'type': 'str', 'uniq': True, 'sorted': True}), {
'doc': 'Associated names for the ATT&CK software.',
}),
('desc', ('str', {'strip': True}), {
'doc': 'A description of the ATT&CK software.',
'disp': {'hint': 'text'},
}),
('url', ('inet:url', {}), {
'doc': 'The URL that documents the ATT&CK software.',
}),
('tag', ('syn:tag', {}), {
'doc': 'The synapse tag used to annotate nodes included in this ATT&CK software.',
'ex': 'cno.mitre.s0100',
}),
('references', ('array', {'type': 'inet:url', 'uniq': True}), {
'doc': 'An array of URLs that document the ATT&CK software.',
}),
('techniques', ('array', {'type': 'it:mitre:attack:technique',
'uniq': True, 'sorted': True, 'split': ','}), {
'doc': 'An array of techniques used by the software.',
}),
)),
('it:mitre:attack:mitigation', {}, (
# TODO map to an eventual risk:mitigation
('name', ('str', {'strip': True}), {
'doc': 'The primary name for the ATT&CK mitigation.',
}),
('desc', ('str', {'strip': True}), {
'doc': 'A description of the ATT&CK mitigation.',
'disp': {'hint': 'text'},
}),
('url', ('inet:url', {}), {
'doc': 'The URL that documents the ATT&CK mitigation.',
}),
('tag', ('syn:tag', {}), {
'doc': 'The synapse tag used to annotate nodes included in this ATT&CK mitigation.',
'ex': 'cno.mitre.m0100',
}),
('references', ('array', {'type': 'inet:url', 'uniq': True}), {
'doc': 'An array of URLs that document the ATT&CK mitigation.',
}),
('addresses', ('array', {'type': 'it:mitre:attack:technique',
'uniq': True, 'sorted': True, 'split': ','}), {
'doc': 'An array of ATT&CK technique IDs addressed by the mitigation.',
}),
)),
('it:dev:int', {}, ()),
('it:dev:pipe', {}, ()),
('it:dev:mutex', {}, ()),
('it:dev:regkey', {}, ()),
('it:dev:regval', {}, (
('key', ('it:dev:regkey', {}), {
'doc': 'The Windows registry key.',
}),
('str', ('it:dev:str', {}), {
'doc': 'The value of the registry key, if the value is a string.',
}),
('int', ('it:dev:int', {}), {
'doc': 'The value of the registry key, if the value is an integer.',
}),
('bytes', ('file:bytes', {}), {
'doc': 'The file representing the value of the registry key, if the value is binary data.',
}),
)),
('it:prod:soft', {}, (
('name', ('str', {'lower': True, 'strip': True}), {
'doc': 'Name of the software.',
}),
('names', ('array', {'type': 'it:dev:str', 'uniq': True, 'sorted': True}), {
'doc': 'Observed/variant names for this software.',
}),
('desc', ('str', {}), {
'doc': 'A description of the software.',
'disp': {'hint': 'text'},
}),
('desc:short', ('str', {'lower': True}), {
'doc': 'A short description of the software.',
}),
('cpe', ('it:sec:cpe', {}), {
'doc': 'The NIST CPE 2.3 string specifying this software.',
}),
('author', ('ps:contact', {}), {
'doc': 'The contact information of the org or person who authored the software.',
}),
('author:org', ('ou:org', {}), {
'deprecated': True,
'doc': 'Organization which authored the software.',
}),
('author:acct', ('inet:web:acct', {}), {
'deprecated': True,
'doc': 'Web account of the software author.',
}),
('author:email', ('inet:email', {}), {
'deprecated': True,
'doc': 'Email address of the sofware author.',
}),
('author:person', ('ps:person', {}), {
'deprecated': True,
'doc': 'Person who authored the software.',
}),
('url', ('inet:url', {}), {
'doc': 'URL relevant for the software.',
}),
('isos', ('bool', {}), {
'doc': 'Set to True if the software is an operating system.'}),
('islib', ('bool', {}), {
'doc': 'Set to True if the software is a library.'}),
)),
('it:adid', {}, ()),
('it:os:ios:idfa', {}, ()),
('it:os:android:aaid', {}, ()),
('it:os:android:perm', {}, ()),
('it:os:android:intent', {}, ()),
('it:os:android:reqperm', {}, (
('app', ('it:prod:softver', {}), {'ro': True,
'doc': 'The android app which requests the permission.'}),
('perm', ('it:os:android:perm', {}), {'ro': True,
'doc': 'The android permission requested by the app.'}),
)),
('it:prod:softos', {}, (
('soft', ('it:prod:softver', {}), {'ro': True,
'doc': 'The software which can run on the operating system.'}),
('os', ('it:prod:softver', {}), {'ro': True,
'doc': 'The operating system which the software can run on.'}),
)),
('it:os:android:ilisten', {}, (
('app', ('it:prod:softver', {}), {'ro': True,
'doc': 'The app software which listens for the android intent.'}),
('intent', ('it:os:android:intent', {}), {'ro': True,
'doc': 'The android intent which is listened for by the app.'}),
)),
('it:os:android:ibroadcast', {}, (
('app', ('it:prod:softver', {}), {'ro': True,
'doc': 'The app software which broadcasts the android intent.'}),
('intent', ('it:os:android:intent', {}), {'ro': True,
'doc': 'The android intent which is broadcast by the app.'}),
)),
('it:prod:softver', {}, (
('software', ('it:prod:soft', {}), {
'doc': 'Software associated with this version instance.',
}),
('software:name', ('str', {'lower': True, 'strip': True}), {
'doc': 'The name of the software at a particular version.',
}),
('names', ('array', {'type': 'it:dev:str', 'uniq': True, 'sorted': True}), {
'doc': 'Observed/variant names for this software version.',
}),
('cpe', ('it:sec:cpe', {}), {
'doc': 'The NIST CPE 2.3 string specifying this software version',
}),
('cves', ('array', {'type': 'it:sec:cve', 'uniq': True, 'sorted': True}), {
'doc': 'A list of CVEs that apply to this software version.',
}),
('vers', ('it:dev:str', {}), {
'doc': 'Version string associated with this version instance.',
}),
('vers:norm', ('str', {'lower': True}), {
'doc': 'Normalized version of the version string.',
}),
('arch', ('it:dev:str', {}), {
'doc': 'Software architecture.',
}),
('released', ('time', {}), {
'doc': 'Timestamp for when this version of the software was released.',
}),
('semver', ('it:semver', {}), {
'doc': 'System normalized semantic version number.',
}),
('semver:major', ('int', {}), {
'doc': 'Version major number.',
}),
('semver:minor', ('int', {}), {
'doc': 'Version minor number.',
}),
('semver:patch', ('int', {}), {
'doc': 'Version patch number.',
}),
('semver:pre', ('str', {}), {
'doc': 'Semver prerelease string.',
}),
('semver:build', ('str', {}), {
'doc': 'Semver build string.',
}),
('url', ('inet:url', {}), {
'doc': 'URL where a specific version of the software is available from.',
}),
)),
('it:prod:softlib', {}, (
('soft', ('it:prod:softver', {}), {'ro': True,
'doc': 'The software version that contains the library.'}),
('lib', ('it:prod:softver', {}), {'ro': True,
'doc': 'The library software version.'}),
)),
('it:prod:softfile', {}, (
('soft', ('it:prod:softver', {}), {'ro': True,
'doc': 'The software which distributes the file.'}),
('file', ('file:bytes', {}), {'ro': True,
'doc': 'The file distributed by the software.'}),
('path', ('file:path', {}), {
'doc': 'The default installation path of the file.'}),
)),
('it:hostsoft', {}, (
('host', ('it:host', {}), {'ro': True,
'doc': 'Host with the software.'}),
('softver', ('it:prod:softver', {}), {'ro': True,
'doc': 'Software on the host.'})
)),
('it:av:sig', {}, (
('soft', ('it:prod:soft', {}), {
'ro': True,
'doc': 'The anti-virus product which contains the signature.',
}),
('name', ('str', {'lower': True}), {
'ro': True,
'doc': 'The signature name.'
}),
('desc', ('str', {}), {
'doc': 'A free-form description of the signature.',
'disp': {'hint': 'text'},
}),
('url', ('inet:url', {}), {
'doc': 'A reference URL for information about the signature.',
})
)),
('it:av:filehit', {}, (
('file', ('file:bytes', {}), {
'ro': True,
'doc': 'The file that triggered the signature hit.',
}),
('sig', ('it:av:sig', {}), {
'ro': True,
'doc': 'The signature that the file triggered on.'
}),
('sig:name', ('str', {'lower': True}), {
'ro': True,
'doc': 'The signature name.',
}),
('sig:soft', ('it:prod:soft', {}), {
'ro': True,
'doc': 'The anti-virus product which contains the signature.',
}),
)),
('it:av:prochit', {}, (
('proc', ('it:exec:proc', {}), {
'doc': 'The file that triggered the signature hit.',
}),
('sig', ('it:av:sig', {}), {
'doc': 'The signature that the file triggered on.'
}),
('time', ('time', {}), {
'doc': 'The time that the AV engine detected the signature.'
}),
)),
('it:auth:passwdhash', {}, (
('salt', ('hex', {}), {
'doc': 'The (optional) hex encoded salt value used to calculate the password hash.',
}),
('hash:md5', ('hash:md5', {}), {
'doc': 'The MD5 password hash value.',
}),
('hash:sha1', ('hash:sha1', {}), {
'doc': 'The SHA1 password hash value.',
}),
('hash:sha256', ('hash:sha256', {}), {
'doc': 'The SHA256 password hash value.',
}),
('hash:sha512', ('hash:sha512', {}), {
'doc': 'The SHA512 password hash value.',
}),
('hash:lm', ('hash:lm', {}), {
'doc': 'The LM password hash value.',
}),
('hash:ntlm', ('hash:ntlm', {}), {
'doc': 'The NTLM password hash value.',
}),
('passwd', ('inet:passwd', {}), {
'doc': 'The (optional) clear text password for this password hash.',
}),
)),
('it:cmd', {}, ()),
('it:exec:proc', {}, (
('host', ('it:host', {}), {
'doc': 'The host that executed the process. May be an actual or a virtual / notional host.',
}),
('exe', ('file:bytes', {}), {
'doc': 'The file considered the "main" executable for the process. For example, rundll32.exe may be considered the "main" executable for DLLs loaded by that program.',
}),
('cmd', ('it:cmd', {}), {
'doc': 'The command string used to launch the process, including any command line parameters.',
'disp': {'hint': 'text'},
}),
('pid', ('int', {}), {
'doc': 'The process ID.',
}),
('time', ('time', {}), {
'doc': 'The start time for the process.',
}),
('exited', ('time', {}), {
'doc': 'The time the process exited.',
}),
('exitcode', ('int', {}), {
'doc': 'The exit code for the process.',
}),
('user', ('inet:user', {}), {
'doc': 'The user name of the process owner.',
}),
('path', ('file:path', {}), {
'doc': 'The path to the executable of the process.',
}),
('src:exe', ('file:path', {}), {
'doc': 'The path to the executable which started the process.',
}),
('src:proc', ('it:exec:proc', {}), {
'doc': 'The process which created the process.'
}),
('killedby', ('it:exec:proc', {}), {
'doc': 'The process which killed this process.',
}),
)),
('it:exec:thread', {}, (
('proc', ('it:exec:proc', {}), {
'doc': 'The process which contains the thread.',
}),
('created', ('time', {}), {
'doc': 'The time the thread was created.',
}),
('exited', ('time', {}), {
'doc': 'The time the thread exited.',
}),
('exitcode', ('int', {}), {
'doc': 'The exit code or return value for the thread.',
}),
('src:proc', ('it:exec:proc', {}), {
'doc': 'An external process which created the thread.',
}),
('src:thread', ('it:exec:thread', {}), {
'doc': 'The thread which created this thread.',
}),
)),
('it:exec:loadlib', {}, (
('proc', ('it:exec:proc', {}), {
'doc': 'The process where the library was loaded.',
}),
('va', ('int', {}), {
'doc': 'The base memory address where the library was loaded in the process.',
}),
('loaded', ('time', {}), {
'doc': 'The time the library was loaded.',
}),
('unloaded', ('time', {}), {
'doc': 'The time the library was unloaded.',
}),
('path', ('file:path', {}), {
'doc': 'The path that the library was loaded from.',
}),
('file', ('file:bytes', {}), {
'doc': 'The library file that was loaded.',
}),
)),
('it:exec:mmap', {}, (
('proc', ('it:exec:proc', {}), {
'doc': 'The process where the memory was mapped.',
}),
('va', ('int', {}), {
'doc': 'The base memory address where the map was created in the process.',
}),
('size', ('int', {}), {
'doc': 'The size of the memory map in bytes.',
}),
('perms:read', ('bool', {}), {
'doc': 'True if the mmap is mapped with read permissions.',
}),
('perms:write', ('bool', {}), {
'doc': 'True if the mmap is mapped with write permissions.',
}),
('perms:execute', ('bool', {}), {
'doc': 'True if the mmap is mapped with execute permissions.',
}),
('created', ('time', {}), {
'doc': 'The time the memory map was created.',
}),
('deleted', ('time', {}), {
'doc': 'The time the memory map was deleted.',
}),
('path', ('file:path', {}), {
'doc': 'The file path if the mmap is a mapped view of a file.',
}),
('hash:sha256', ('hash:sha256', {}), {
'doc': 'A SHA256 hash of the memory map. Bytes may optionally be present in the axon.',
}),
)),
('it:exec:mutex', {}, (
('proc', ('it:exec:proc', {}), {
'doc': 'The main process executing code that created the mutex.',
}),
('host', ('it:host', {}), {
'doc': 'The host running the process that created the mutex. Typically the same host referenced in :proc, if present.',
}),
('exe', ('file:bytes', {}), {
'doc': 'The specific file containing code that created the mutex. May or may not be the same :exe specified in :proc, if present.',
}),
('time', ('time', {}), {
'doc': 'The time the mutex was created.',
}),
('name', ('it:dev:mutex', {}), {
'doc': 'The mutex string.',
}),
)),
('it:exec:pipe', {}, (
('proc', ('it:exec:proc', {}), {
'doc': 'The main process executing code that created the named pipe.',
}),
('host', ('it:host', {}), {
'doc': 'The host running the process that created the named pipe. Typically the same host referenced in :proc, if present.',
}),
('exe', ('file:bytes', {}), {
'doc': 'The specific file containing code that created the named pipe. May or may not be the same :exe specified in :proc, if present.',
}),
('time', ('time', {}), {
'doc': 'The time the named pipe was created.',
}),
('name', ('it:dev:pipe', {}), {
'doc': 'The named pipe string.',
}),
)),
('it:exec:url', {}, (
('proc', ('it:exec:proc', {}), {
'doc': 'The main process executing code that requested the URL.',
}),
('host', ('it:host', {}), {
'doc': 'The host running the process that requested the URL. Typically the same host referenced in :proc, if present.',
}),
('exe', ('file:bytes', {}), {
'doc': 'The specific file containing code that requested the URL. May or may not be the same :exe specified in :proc, if present.',
}),
('time', ('time', {}), {
'doc': 'The time the URL was requested.',
}),
('url', ('inet:url', {}), {
'doc': 'The URL that was requested.',
}),
('client', ('inet:client', {}), {
'doc': 'The address of the client during the URL retrieval.'
}),
('client:ipv4', ('inet:ipv4', {}), {
'doc': 'The IPv4 of the client during the URL retrieval..'
}),
('client:ipv6', ('inet:ipv6', {}), {
'doc': 'The IPv6 of the client during the URL retrieval..'
}),
('client:port', ('inet:port', {}), {
'doc': 'The client port during the URL retrieval..'
}),
)),
('it:exec:bind', {}, (
('proc', ('it:exec:proc', {}), {
'doc': 'The main process executing code that bound the listening port.',
}),
('host', ('it:host', {}), {
'doc': 'The host running the process that bound the listening port. Typically the same host referenced in :proc, if present.',
}),
('exe', ('file:bytes', {}), {
'doc': 'The specific file containing code that bound the listening port. May or may not be the same :exe specified in :proc, if present.',
}),
('time', ('time', {}), {
'doc': 'The time the port was bound.',
}),
('server', ('inet:server', {}), {
'doc': 'The inet:addr of the server when binding the port.'
}),
('server:ipv4', ('inet:ipv4', {}), {
'doc': 'The IPv4 address specified to bind().'
}),
('server:ipv6', ('inet:ipv6', {}), {
'doc': 'The IPv6 address specified to bind().'
}),
('server:port', ('inet:port', {}), {
'doc': 'The bound (listening) TCP port.'
}),
)),
('it:fs:file', {}, (
('host', ('it:host', {}), {
'doc': 'The host containing the file.',
}),
('path', ('file:path', {}), {
'doc': 'The path for the file.',
}),
('path:dir', ('file:path', {}), {
'ro': True,
'doc': 'The parent directory of the file path (parsed from :path).',
}),
('path:ext', ('str', {'lower': True, 'strip': True}), {
'ro': True,
'doc': 'The file extension of the file name (parsed from :path).',
}),
('path:base', ('file:base', {}), {
'ro': True,
'doc': 'The final component of the file path (parsed from :path).',
}),
('file', ('file:bytes', {}), {
'doc': 'The file on the host.',
}),
('ctime', ('time', {}), {
'doc': 'The file creation time.',
}),
('mtime', ('time', {}), {
'doc': 'The file modification time.',
}),
('atime', ('time', {}), {
'doc': 'The file access time.',
}),
('user', ('inet:user', {}), {
'doc': 'The owner of the file.',
}),
('group', ('inet:user', {}), {
'doc': 'The group owner of the file.',
}),
)),
('it:exec:file:add', {}, (
('proc', ('it:exec:proc', {}), {
'doc': 'The main process executing code that created the new file.',
}),
('host', ('it:host', {}), {
'doc': 'The host running the process that created the new file. Typically the same host referenced in :proc, if present.',
}),
('exe', ('file:bytes', {}), {
'doc': 'The specific file containing code that created the new file. May or may not be the same :exe specified in :proc, if present.'}),
('time', ('time', {}), {
'doc': 'The time the file was created.',
}),
('path', ('file:path', {}), {
'doc': 'The path where the file was created.',
}),
('path:dir', ('file:path', {}), {
'ro': True,
'doc': 'The parent directory of the file path (parsed from :path).',
}),
('path:ext', ('str', {'lower': True, 'strip': True}), {
'ro': True,
'doc': 'The file extension of the file name (parsed from :path).',
}),
('path:base', ('file:base', {}), {
'ro': True,
'doc': 'The final component of the file path (parsed from :path).',
}),
('file', ('file:bytes', {}), {
'doc': 'The file that was created.',
}),
)),
('it:exec:file:del', {}, (
('proc', ('it:exec:proc', {}), {
'doc': 'The main process executing code that deleted the file.',
}),
('host', ('it:host', {}), {
'doc': 'The host running the process that deleted the file. Typically the same host referenced in :proc, if present.',
}),
('exe', ('file:bytes', {}), {
'doc': 'The specific file containing code that deleted the file. May or may not be the same :exe specified in :proc, if present.'}),
('time', ('time', {}), {
'doc': 'The time the file was deleted.',
}),
('path', ('file:path', {}), {
'doc': 'The path where the file was deleted.',
}),
('path:dir', ('file:path', {}), {
'ro': True,
'doc': 'The parent directory of the file path (parsed from :path).',
}),
('path:ext', ('str', {'lower': True, 'strip': True}), {
'ro': True,
'doc': 'The file extension of the file name (parsed from :path).',
}),
('path:base', ('file:base', {}), {
'ro': True,
'doc': 'The final component of the file path (parsed from :path).',
}),
('file', ('file:bytes', {}), {
'doc': 'The file that was deleted.',
}),
)),
('it:exec:file:read', {}, (
('proc', ('it:exec:proc', {}), {
'doc': 'The main process executing code that read the file.',
}),
('host', ('it:host', {}), {
'doc': 'The host running the process that read the file. Typically the same host referenced in :proc, if present.',
}),
('exe', ('file:bytes', {}), {
'doc': 'The specific file containing code that read the file. May or may not be the same :exe specified in :proc, if present.'}),
('time', ('time', {}), {
'doc': 'The time the file was read.',
}),
('path', ('file:path', {}), {
'doc': 'The path where the file was read.',
}),
('path:dir', ('file:path', {}), {
'ro': True,
'doc': 'The parent directory of the file path (parsed from :path).',
}),
('path:ext', ('str', {'lower': True, 'strip': True}), {
'ro': True,
'doc': 'The file extension of the file name (parsed from :path).',
}),
('path:base', ('file:base', {}), {
'ro': True,
'doc': 'The final component of the file path (parsed from :path).',
}),
('file', ('file:bytes', {}), {
'doc': 'The file that was read.',
}),
)),
('it:exec:file:write', {}, (
('proc', ('it:exec:proc', {}), {
'doc': 'The main process executing code that wrote to / modified the existing file.',
}),
('host', ('it:host', {}), {
'doc': 'The host running the process that wrote to the file. Typically the same host referenced in :proc, if present.',
}),
('exe', ('file:bytes', {}), {
'doc': 'The specific file containing code that wrote to the file. May or may not be the same :exe specified in :proc, if present.'}),
('time', ('time', {}), {
'doc': 'The time the file was written to/modified.',
}),
('path', ('file:path', {}), {
'doc': 'The path where the file was written to/modified.',
}),
('path:dir', ('file:path', {}), {
'ro': True,
'doc': 'The parent directory of the file path (parsed from :path).',
}),
('path:ext', ('str', {'lower': True, 'strip': True}), {
'ro': True,
'doc': 'The file extension of the file name (parsed from :path).',
}),
('path:base', ('file:base', {}), {
'ro': True,
'doc': 'The final component of the file path (parsed from :path).',
}),
('file', ('file:bytes', {}), {
'doc': 'The file that was modified.',
}),
)),
('it:exec:reg:get', {}, (
('proc', ('it:exec:proc', {}), {
'doc': 'The main process executing code that read the registry.',
}),
('host', ('it:host', {}), {
'doc': 'The host running the process that read the registry. Typically the same host referenced in :proc, if present.',
}),
('exe', ('file:bytes', {}), {
'doc': 'The specific file containing code that read the registry. May or may not be the same :exe referenced in :proc, if present.',
}),
('time', ('time', {}), {
'doc': 'The time the registry was read.',
}),
('reg', ('it:dev:regval', {}), {
'doc': 'The registry key or value that was read.',
}),
)),
('it:exec:reg:set', {}, (
('proc', ('it:exec:proc', {}), {
'doc': 'The main process executing code that wrote to the registry.',
}),
('host', ('it:host', {}), {
'doc': 'The host running the process that wrote to the registry. Typically the same host referenced in :proc, if present.',
}),
('exe', ('file:bytes', {}), {
'doc': 'The specific file containing code that wrote to the registry. May or may not be the same :exe referenced in :proc, if present.',
}),
('time', ('time', {}), {
'doc': 'The time the registry was written to.',
}),
('reg', ('it:dev:regval', {}), {
'doc': 'The registry key or value that was written to.',
}),
)),
('it:exec:reg:del', {}, (
('proc', ('it:exec:proc', {}), {
'doc': 'The main process executing code that deleted data from the registry.',
}),
('host', ('it:host', {}), {
'doc': 'The host running the process that deleted data from the registry. Typically the same host referenced in :proc, if present.',
}),
('exe', ('file:bytes', {}), {
'doc': 'The specific file containing code that deleted data from the registry. May or may not be the same :exe referenced in :proc, if present.',
}),
('time', ('time', {}), {
'doc': 'The time the data from the registry was deleted.',
}),
('reg', ('it:dev:regval', {}), {
'doc': 'The registry key or value that was deleted.',
}),
)),
('it:app:snort:rule', {}, (
('text', ('str', {}), {
'doc': 'The snort rule text.',
'disp': {'hint': 'text'},
}),
('name', ('str', {}), {
'doc': 'The name of the snort rule.'}),
('version', ('it:semver', {}), {
'doc': 'The current version of the rule.'}),
)),
('it:app:snort:hit', {}, (
('rule', ('it:app:snort:rule', {}), {
'doc': 'The snort rule that matched the file.'}),
('flow', ('inet:flow', {}), {
'doc': 'The inet:flow that matched the snort rule.'}),
('src', ('inet:addr', {}), {
'doc': 'The source address of flow that caused the hit.'}),
('src:ipv4', ('inet:ipv4', {}), {
'doc': 'The source IPv4 address of the flow that caused the hit.'}),
('src:ipv6', ('inet:ipv6', {}), {
'doc': 'The source IPv6 address of the flow that caused the hit.'}),
('src:port', ('inet:port', {}), {
'doc': 'The source port of the flow that caused the hit.'}),
('dst', ('inet:addr', {}), {
'doc': 'The destination address of the trigger.'}),
('dst:ipv4', ('inet:ipv4', {}), {
'doc': 'The destination IPv4 address of the flow that caused the hit.'}),
('dst:ipv6', ('inet:ipv6', {}), {
'doc': 'The destination IPv4 address of the flow that caused the hit.'}),
('dst:port', ('inet:port', {}), {
'doc': 'The destination port of the flow that caused the hit.'}),
('time', ('time', {}), {
'doc': 'The time of the network flow that caused the hit.'}),
('sensor', ('it:host', {}), {
'doc': 'The sensor host node that produced the hit.'}),
('version', ('it:semver', {}), {
'doc': 'The version of the rule at the time of match.'}),
)),
('it:app:yara:rule', {}, (
('text', ('str', {}), {
'doc': 'The YARA rule text.',
'disp': {'hint': 'text'},
}),
('name', ('str', {}), {
'doc': 'The name of the YARA rule.'}),
('author', ('ps:contact', {}), {
'doc': 'Contact info for the author of the YARA rule.'}),
('version', ('it:semver', {}), {
'doc': 'The current version of the rule.'}),
('enabled', ('bool', {}), {
'doc': 'The rule enabled status to be used for YARA evaluation engines.'}),
)),
('it:app:yara:match', {}, (
('rule', ('it:app:yara:rule', {}), {
'ro': True,
'doc': 'The YARA rule that matched the file.'}),
('file', ('file:bytes', {}), {
'ro': True,
'doc': 'The file that matched the YARA rule.'}),
('version', ('it:semver', {}), {
'doc': 'The most recent version of the rule evaluated as a match.'}),
)),
('it:app:yara:procmatch', {}, (
('rule', ('it:app:yara:rule', {}), {
'doc': 'The YARA rule that matched the file.'}),
('proc', ('it:exec:proc', {}), {
'doc': 'The process that matched the YARA rule.'}),
('time', ('time', {}), {
'doc': 'The time that the YARA engine matched the process to the rule.'}),
('version', ('it:semver', {}), {
'doc': 'The most recent version of the rule evaluated as a match.'}),
)),
('it:reveng:function', {}, (
('name', ('str', {}), {
'doc': 'The name of the function.'}),
('description', ('str', {}), {
'doc': 'Notes concerning the function.'}),
('impcalls', ('array', {'type': 'it:reveng:impfunc'}), {
'doc': 'Calls to imported library functions within the scope of the function.',
}),
('strings', ('array', {'type': 'it:dev:str', 'uniq': True}), {
'doc': 'An array of strings referenced within the function.',
}),
)),
('it:reveng:filefunc', {}, (
('function', ('it:reveng:function', {}), {
'ro': True,
'doc': 'The guid matching the function.'}),
('file', ('file:bytes', {}), {
'ro': True,
'doc': 'The file that contains the function.'}),
('va', ('int', {}), {
'doc': 'The virtual address of the first codeblock of the function.'}),
('rank', ('int', {}), {
'doc': 'The function rank score used to evaluate if it exhibits interesting behavior.'}),
('complexity', ('int', {}), {
'doc': 'The complexity of the function.'}),
('funccalls', ('array', {'type': 'it:reveng:filefunc'}), {
'doc': 'Other function calls within the scope of the function.',
}),
)),
('it:reveng:funcstr', {}, (
('function', ('it:reveng:function', {}), {
'ro': True,
'doc': 'The guid matching the function.'}),
('string', ('str', {}), {
'ro': True,
'doc': 'The string that the function references.'}),
)),
('it:reveng:impfunc', {}, ()),
),
}
name = 'it'
return ((name, modl), )
| 2.3125 | 2 |
test/test.py | bciar/ppp-web | 2 | 1696 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Unit tests."""
import os
import unittest
from copy import copy
from webui.app import create_app
class TestRoutes(unittest.TestCase):
"""Test routes."""
ignore_routes = ('/static/<path:filename>',)
ignore_end_patterns = ('>',)
def setUp(self):
"""Set up: Put Flask app in test mode."""
app = create_app()
self.initial_app = copy(app)
app.testing = True
self.app = app.test_client()
@staticmethod
def valid_route(route):
"""Validate route.
Args:
route (str): Route url pattern.
Returns:
bool: True if valid, else False.
"""
if route in TestRoutes.ignore_routes \
or route.endswith(TestRoutes.ignore_end_patterns):
return False
return True
def test_routes(self):
"""Smoke test routes to ensure no runtime errors.."""
routes = [route.rule for route in self.initial_app.url_map.iter_rules()
if self.valid_route(route.rule)]
for route in routes:
self.app.get(route)
if __name__ == '__main__':
from test.utils.doctest_unittest_runner import doctest_unittest_runner
TEST_DIR = os.path.dirname(os.path.realpath(__file__)) + '/'
doctest_unittest_runner(test_dir=TEST_DIR, relative_path_to_root='../',
package_names=['webui', 'test'])
| 2.546875 | 3 |
tests/sources/test_clang_format.py | Justin-Fisher/webots | 1,561 | 1697 | #!/usr/bin/env python
# Copyright 1996-2021 Cyberbotics Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test that the C, C++ and shader source code is compliant with ClangFormat."""
import unittest
import difflib
import os
import subprocess
from io import open
from distutils.spawn import find_executable
class TestClangFormat(unittest.TestCase):
"""Unit test for ClangFormat compliance."""
def setUp(self):
"""Set up called before each test."""
self.WEBOTS_HOME = os.environ['WEBOTS_HOME']
def _runClangFormat(self, f):
"""Run clang format on 'f' file."""
return subprocess.check_output(['clang-format', '-style=file', f])
def test_clang_format_is_correctly_installed(self):
"""Test ClangFormat is correctly installed."""
self.assertTrue(
find_executable('clang-format') is not None,
msg='ClangFormat is not installed on this computer.'
)
clangFormatConfigFile = self.WEBOTS_HOME + os.sep + '.clang-format'
self.assertTrue(
os.path.exists(clangFormatConfigFile),
msg=clangFormatConfigFile + ' not found.'
)
def test_sources_are_clang_format_compliant(self):
"""Test that sources are ClangFormat compliant."""
directories = [
'include/controller',
'projects',
'resources/projects',
'resources/wren/shaders',
'tests',
'include/wren',
'src/controller/c',
'src/controller/cpp',
'src/license/sign',
'src/webots',
'src/wren'
]
skippedPaths = [
'projects/default/controllers/ros/include',
'projects/robots/gctronic/e-puck/transfer',
'projects/robots/mobsya/thymio/controllers/thymio2_aseba/aseba',
'projects/robots/mobsya/thymio/libraries/dashel',
'projects/robots/mobsya/thymio/libraries/dashel-src',
'projects/robots/robotis/darwin-op/libraries/libssh',
'projects/robots/robotis/darwin-op/libraries/libzip',
'projects/robots/robotis/darwin-op/libraries/robotis-op2/robotis',
'projects/robots/robotis/darwin-op/remote_control/libjpeg-turbo',
'projects/vehicles/controllers/ros_automobile/include',
'src/webots/external'
]
skippedFiles = [
'projects/robots/robotis/darwin-op/plugins/remote_controls/robotis-op2_tcpip/stb_image.h'
]
skippedDirectories = [
'build',
'python',
'java'
]
extensions = ['c', 'h', 'cpp', 'hpp', 'cc', 'hh', 'c++', 'h++', 'vert', 'frag']
modified_files = os.path.join(self.WEBOTS_HOME, 'tests', 'sources', 'modified_files.txt')
sources = []
if os.path.isfile(modified_files):
with open(modified_files, 'r') as file:
for line in file:
line = line.strip()
extension = os.path.splitext(line)[1][1:].lower()
if extension not in extensions:
continue
found = False
for directory in directories:
if line.startswith(directory):
found = True
break
if not found:
continue
found = False
for directory in skippedPaths + skippedFiles:
if line.startswith(directory):
found = True
break
if found:
continue
for directory in skippedDirectories:
currentDirectories = line.split(os.sep)
if directory in currentDirectories:
found = True
if found:
continue
sources.append(line.replace('/', os.sep))
else:
for directory in directories:
path = self.WEBOTS_HOME + os.sep + directory.replace('/', os.sep)
for rootPath, dirNames, fileNames in os.walk(path):
shouldContinue = False
for path in skippedPaths:
if rootPath.startswith(self.WEBOTS_HOME + os.sep + path.replace('/', os.sep)):
shouldContinue = True
break
for directory in skippedDirectories:
currentDirectories = rootPath.replace(self.WEBOTS_HOME, '').split(os.sep)
if directory in currentDirectories:
shouldContinue = True
break
if shouldContinue:
continue
for fileName in fileNames:
extension = os.path.splitext(fileName)[1][1:].lower()
if extension not in extensions:
continue
path = os.path.normpath(os.path.join(rootPath, fileName))
skipFile = False
for file in skippedFiles:
if os.path.normpath((self.WEBOTS_HOME + os.sep + file.replace('/', os.sep))) == path:
skipFile = True
break
if not skipFile:
sources.append(path)
curdir = os.getcwd()
os.chdir(self.WEBOTS_HOME)
for source in sources:
diff = ''
with open(source, encoding='utf8') as file:
try:
for line in difflib.context_diff(self._runClangFormat(source).decode('utf-8').splitlines(),
file.read().splitlines()):
diff += line + '\n'
except UnicodeDecodeError:
self.assertTrue(False, msg='utf-8 decode problem in %s' % source)
self.assertTrue(
len(diff) == 0,
msg='Source file "%s" is not compliant with ClangFormat:\n\nDIFF:%s' % (source, diff)
)
os.chdir(curdir)
if __name__ == '__main__':
unittest.main()
| 2.25 | 2 |
src/python/tests/core/system/shell_test.py | sanketsaurav/clusterfuzz | 1 | 1698 | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""shell tests."""
import mock
import os
import unittest
from pyfakefs import fake_filesystem_unittest
from system import environment
from system import shell
from tests.test_libs import helpers as test_helpers
from tests.test_libs import test_utils
class RemoveEmptyFilesTest(fake_filesystem_unittest.TestCase):
"""Tests for remove_empty_files."""
def setUp(self):
# FIXME: Add support for Windows.
if not environment.is_posix():
self.skipTest('Process tests are only applicable for posix platforms.')
test_utils.set_up_pyfakefs(self)
def test_remove(self):
"""Test remove."""
self.fs.CreateFile('/test/aa/bb.txt', contents='s')
self.fs.CreateFile('/test/aa/cc.txt', contents='')
self.fs.CreateFile('/test/aa/aa/dd.txt', contents='s')
self.fs.CreateFile('/test/aa/aa/aa.txt', contents='')
shell.remove_empty_files('/test')
self.assertTrue(os.path.exists('/test/aa/bb.txt'))
self.assertTrue(os.path.exists('/test/aa/aa/dd.txt'))
self.assertFalse(os.path.exists('/test/aa/cc.txt'))
self.assertFalse(os.path.exists('/test/aa/aa/aa.txt'))
def test_ignore_file(self):
self.fs.CreateFile('/test/aa/cc.txt', contents='')
shell.remove_empty_files('/test/aa/cc.txt')
self.assertTrue(os.path.exists('/test/aa/cc.txt'))
@mock.patch('os.remove', autospec=True)
def test_exception(self, mock_remove):
# bypass pyfakefs's os.remove.
os.remove = mock_remove
mock_remove.side_effect = OSError()
self.fs.CreateFile('/test/aa/cc.txt', contents='')
shell.remove_empty_files('/test')
self.assertTrue(os.path.exists('/test/aa/cc.txt'))
class RemoveDirectoryTest(unittest.TestCase):
"""Tests for remove_directory."""
def setUp(self):
test_helpers.patch(self, [
'os.chmod',
'os.mkdir',
'os.path.exists',
'os.system',
'system.environment.platform',
'metrics.logs.log_error',
'metrics.logs.log_warn',
'shutil.rmtree',
])
def _test_remove_os_specific(self, platform, recreate, raise_mkdir_error):
"""Helper for testing removing dir with os-specific command."""
self.mock.platform.return_value = platform
self.mock.exists.side_effect = [True, False, False]
if raise_mkdir_error:
self.mock.mkdir.side_effect = OSError()
result = shell.remove_directory('dir', recreate=recreate)
if recreate:
self.assertEqual(not raise_mkdir_error, result)
else:
self.assertTrue(result)
self.mock.rmtree.assert_has_calls([])
if recreate:
self.mock.mkdir.assert_has_calls([mock.call('dir')])
else:
self.mock.mkdir.assert_has_calls([])
def test_remove_os_specific_windows(self):
"""Test remove with os-specific command on windows."""
self._test_remove_os_specific('WINDOWS', True, False)
self.mock.system.assert_has_calls([mock.call('rd /s /q "dir" > nul 2>&1')])
def test_remove_os_specific_non_windows(self):
"""Test remove with os-specific command on non-windows."""
self._test_remove_os_specific('LINUX', True, False)
self.mock.system.assert_has_calls(
[mock.call('rm -rf "dir" > /dev/null 2>&1')])
def test_remove_without_recreate(self):
"""Test remove without recreate."""
self._test_remove_os_specific('LINUX', False, True)
def test_remove_with_mkdir_error(self):
"""Test remove when mkdir errors."""
self._test_remove_os_specific('LINUX', True, True)
def test_remove_shutil_success(self):
"""Test remove with shutil."""
self.mock.exists.side_effect = [True, True, False]
self.assertTrue(shell.remove_directory('dir'))
self.mock.system.assert_has_calls(
[mock.call('rm -rf "dir" > /dev/null 2>&1')])
self.mock.rmtree.assert_has_calls([mock.call('dir', onerror=mock.ANY)])
def test_remove_shutil_failure(self):
"""Test remove with shutil but fails."""
self.mock.exists.side_effect = [True, True, True]
self.assertFalse(shell.remove_directory('dir'))
self.mock.log_error.assert_has_calls(
[mock.call('Failed to clear directory dir.')])
self.assertEqual(0, self.mock.log_warn.call_count)
self.mock.system.assert_has_calls(
[mock.call('rm -rf "dir" > /dev/null 2>&1')])
self.mock.rmtree.assert_has_calls([mock.call('dir', onerror=mock.ANY)])
def test_remove_shutil_failure_ignore_errors(self):
self.mock.exists.side_effect = [True, True, True]
self.assertFalse(shell.remove_directory('dir', ignore_errors=True))
self.mock.log_warn.assert_has_calls(
[mock.call('Failed to clear directory dir.')])
self.assertEqual(0, self.mock.log_error.call_count)
self.mock.system.assert_has_calls(
[mock.call('rm -rf "dir" > /dev/null 2>&1')])
self.mock.rmtree.assert_has_calls([mock.call('dir', onerror=mock.ANY)])
def test_remove_shutil_onerror(self):
"""Test shutil invoking onerror."""
self.mock.exists.side_effect = [True, True, False]
self.assertTrue(shell.remove_directory('dir'))
self.mock.system.assert_has_calls(
[mock.call('rm -rf "dir" > /dev/null 2>&1')])
self.mock.rmtree.assert_has_calls([mock.call('dir', onerror=mock.ANY)])
onerror = self.mock.rmtree.call_args[1]['onerror']
fake_fn = mock.MagicMock()
fake_fn.side_effect = OSError()
onerror(fake_fn, 'dir/child', ImportError())
self.mock.chmod.assert_has_calls([mock.call('dir/child', 0o750)])
fake_fn.assert_has_calls([mock.call('dir/child')])
class GetDirectoryFileCount(fake_filesystem_unittest.TestCase):
"""Tests for get_directory_file_count."""
def setUp(self):
test_utils.set_up_pyfakefs(self)
def test(self):
"""Test get_directory_file_count."""
self.fs.CreateFile('/test/aa/bb.txt', contents='abc')
self.fs.CreateFile('/test/aa/cc.txt', contents='def')
self.fs.CreateFile('/test/aa/aa/aa.txt', contents='ghi')
self.fs.CreateFile('/test/aa/aa/dd.txt', contents='t')
self.assertEqual(shell.get_directory_file_count('/test/aa'), 4)
class GetDirectorySizeTest(fake_filesystem_unittest.TestCase):
"""Tests for get_directory_size."""
def setUp(self):
test_utils.set_up_pyfakefs(self)
def test(self):
"""Test get_directory_size."""
self.fs.CreateFile('/test/aa/bb.txt', contents='abc')
self.fs.CreateFile('/test/aa/cc.txt', contents='def')
self.fs.CreateFile('/test/aa/aa/aa.txt', contents='ghi')
self.fs.CreateFile('/test/aa/aa/dd.txt', contents='t')
self.assertEqual(shell.get_directory_size('/test/aa'), 10)
class WhichTest(fake_filesystem_unittest.TestCase):
"""Tests for which (shutil.which)."""
def setUp(self):
# FIXME: Add support for Windows.
if not environment.is_posix():
self.skipTest('Which test is only supported on posix platforms.')
def test(self):
self.assertEqual('/bin/ls', shell.which('ls'))
class ClearSystemTempDirectoryTest(fake_filesystem_unittest.TestCase):
"""Tests for clear_system_temp_directory."""
def setUp(self):
test_helpers.patch(self, [
'tempfile.gettempdir',
])
self.mock.gettempdir.return_value = '/tmp'
test_utils.set_up_pyfakefs(self)
def test(self):
"""Test clear_system_temp_directory works as expected."""
self.fs.CreateFile('/tmp/aa/bb.txt', contents='abc')
self.fs.CreateFile('/tmp/cc/dd/ee.txt', contents='def')
self.fs.CreateDirectory('/tmp/ff/gg')
self.fs.CreateDirectory('/tmp/hh')
self.fs.CreateDirectory('/unrelated')
self.fs.CreateFile('/unrelated/zz.txt', contents='zzz')
os.symlink('/unrelated/zz.txt', '/tmp/hh/gg.txt')
os.symlink('/unrelated', '/tmp/ii')
shell.clear_system_temp_directory()
self.assertTrue(os.path.exists('/tmp'))
self.assertTrue(os.path.exists('/unrelated'))
self.assertEqual(shell.get_directory_file_count('/tmp'), 0)
self.assertEqual(shell.get_directory_file_count('/unrelated'), 1)
self.assertFalse(os.path.exists('/tmp/aa/bb.txt'))
self.assertFalse(os.path.exists('/tmp/cc/dd/ee.txt'))
self.assertFalse(os.path.exists('/tmp/ff/gg'))
self.assertFalse(os.path.exists('/tmp/hh'))
class GetExecuteCommand(unittest.TestCase):
"""Test that the correct commands to run files are returned."""
def call_and_assert_helper(self, expected_command, file_to_execute):
"""Call get_execute_command on |file_to_execute| and assert result equal to
|expected_command|."""
self.assertEqual(expected_command,
shell.get_execute_command(file_to_execute))
def test_standard_script(self):
"""Test correct command returned for python script."""
script_name = 'script.py'
expected_command = 'python %s' % script_name
self.call_and_assert_helper(expected_command, script_name)
def test_java(self):
"""Test correct launch command returned for Java class."""
script_name = 'javaclassfile.class'
expected_command = 'java javaclassfile'
self.call_and_assert_helper(expected_command, script_name)
def test_binary(self):
"""Test correct launch command returned for a binary (executable) file."""
executable_name = 'executable'
self.call_and_assert_helper(executable_name, executable_name)
executable_name += '.exe'
self.call_and_assert_helper(executable_name, executable_name)
class GetInterpreter(object):
"""Test that the correct interpreters to execute a file are returned."""
def get_interpreted_file_test(self):
"""Test correct interpreter is returned for a file that needs one."""
self.assertEqual('python', shell.get_interpreter('run.py'))
def get_non_interpreter_file_test(self):
"""Test that None is returned for a file that doesn't need one. We don't
want empty string since this is easier to than None. """
self.assertIsNone(shell.get_interpreter('executable'))
| 2.109375 | 2 |
Language Model/birnn/model.py | osamaqureshi/NLP-for-Urdu | 1 | 1699 | <filename>Language Model/birnn/model.py<gh_stars>1-10
import numpy as np
import tensorflow as tf
class Bidirectional(tf.keras.Model):
def __init__(self, units: int,
projection_units: int):
super(Bidirectional, self).__init__()
self.units = units
self.projection_units = projection_units
self.Layers = [tf.keras.layers.Bidirectional(tf.keras.layers.GRU(self.units,
return_sequences=True,
return_state=True,
recurrent_initializer='glorot_uniform',
name='birnn')),
tf.keras.layers.Dense(self.projection_units, name='projection')]
def call(self, inp):
out, _, _ = self.Layers[0](inp)
out = self.Layers[1](out)
return out
class BiRNN(tf.keras.Model):
def __init__(self, units: int,projection_units: int,max_seq_length: int,
vocab_size: int,embedding_dim: int,embedding_matrix = None):
super(BiRNN, self).__init__()
self.units = units
self.projection_units=projection_units
self.max_seq_length = max_seq_length
self.vocab_size = vocab_size
self.embedding_dim = embedding_dim
self.embeddings = tf.keras.layers.Embedding(self.vocab_size, self.embedding_dim,
weights = [embedding_matrix],
trainable=False, name='embeddings')
self.Layers = [Bidirectional(units=self.units, projection_units=self.projection_units),
tf.keras.layers.Add(),
Bidirectional(units=self.units, projection_units=self.projection_units),
tf.keras.layers.Dense(self.vocab_size, activation='softmax', name='softmax')]
def call(self, inp, predict=False):
inp = self.embeddings(inp)
out1 = self.Layers[0](inp)
out2 = self.Layers[1]([inp, out1])
out3 = self.Layers[2](out2)
if predict is False:
return out3
else:
out4 = self.Layers[3](out3)
return out4
def loss_function(real, pred, loss_object):
mask = tf.math.logical_not(tf.math.equal(real, 0))
loss_ = loss_object(real, pred)
mask = tf.cast(mask, dtype=loss_.dtype)
loss_ *= mask
return tf.reduce_mean(loss_)
def mask_sequences(seq, t):
mask = np.zeros(seq.shape)
mask[:,:t] = 1
inp = tf.math.multiply(seq, mask)
mask[:,:t+1] = 1
tar = tf.math.multiply(seq, mask)
return inp, tar | 2.34375 | 2 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.