hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
sequencelengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
sequencelengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
sequencelengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4a1d55db0c3ec14d69b1a48cdfb666a9536bdece | 2,701 | py | Python | scripts/ensemble_eval.py | FragLegs/mrnet | a05eb8902463cbcf88126c616911bd9f69d019df | [
"MIT"
] | 1 | 2021-06-18T08:16:01.000Z | 2021-06-18T08:16:01.000Z | scripts/ensemble_eval.py | FragLegs/mrnet | a05eb8902463cbcf88126c616911bd9f69d019df | [
"MIT"
] | null | null | null | scripts/ensemble_eval.py | FragLegs/mrnet | a05eb8902463cbcf88126c616911bd9f69d019df | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import argparse
import logging
import os
import pickle
import pprint
import numpy as np
import pandas as pd
import sklearn.metrics as metrics
from ensemble_load_data import load_data
log = logging.getLogger(__name__)
def print_eval(preds, truth, threshold=0.5):
int_preds = (preds > threshold).astype(int)
int_truth = np.array(truth).astype(int)
conf = metrics.confusion_matrix(int_truth, int_preds)
TN = float(conf[0][0])
FN = float(conf[1][0])
TP = float(conf[1][1])
FP = float(conf[0][1])
specificity = TN / (TN + FP)
sensitivity = TP / (TP + FN)
accuracy = (TP + TN) / (TP + TN + FP + FN)
fpr, tpr, threshold = metrics.roc_curve(truth, preds)
auc = metrics.auc(fpr, tpr)
print(f'Sensitivity: {sensitivity}')
print(f'Specificity: {specificity}')
print(f'Accuracy: {accuracy}')
print(f'AUC: {auc}')
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('model_path')
parser.add_argument('output_path')
parser.add_argument('evals_path')
verbosity_help = 'Verbosity level (default: %(default)s)'
choices = [
logging.getLevelName(logging.DEBUG),
logging.getLevelName(logging.INFO),
logging.getLevelName(logging.WARN),
logging.getLevelName(logging.ERROR)
]
parser.add_argument(
'-v',
'--verbosity',
choices=choices,
help=verbosity_help,
default=logging.getLevelName(logging.INFO)
)
# Parse the command line arguments
args = parser.parse_args()
# Set the logging to console level
logging.basicConfig(level=args.verbosity)
return args
if __name__ == '__main__':
args = parse_args()
log.info(pprint.pformat(args.__dict__))
output_path = args.output_path
os.makedirs(output_path, exist_ok=True)
model_path = args.model_path
log.info(f'Loading ensembles from {model_path}')
with open(model_path, 'rb') as fin:
ensembles = pickle.load(fin)
model_name = ensembles['name']
log.info(f'Found {model_name} ensembles')
test_X, test_y = load_data(args.evals_path, model_name, split='test')
preds = {}
df = pd.DataFrame()
for diagnosis in ['abnormal', 'acl', 'meniscus']:
print(diagnosis)
model = ensembles[diagnosis]
preds = model.predict_proba(test_X[diagnosis])[:, 1].ravel()
print_eval(preds, test_y[diagnosis])
df[diagnosis] = preds
df[f'{diagnosis}_truth'] = test_y[diagnosis]
df_path = os.path.join(output_path, f'{model_name}_ensemble_preds.csv')
log.info(f'Saving predictions to {df_path}')
df.to_csv(df_path, index=False)
| 25.242991 | 75 | 0.656794 |
4a1d56ed6e8c8529f3ee021d75c4e5a58184cb9b | 1,072 | py | Python | examples/dot3k/advanced/automenu.py | axelsimon/displayotron | 42315c47eb50d2f95d1fcade389ff57ad73e1b83 | [
"MIT"
] | 49 | 2017-06-07T05:09:23.000Z | 2021-10-08T14:32:05.000Z | examples/dot3k/advanced/automenu.py | axelsimon/displayotron | 42315c47eb50d2f95d1fcade389ff57ad73e1b83 | [
"MIT"
] | 19 | 2017-08-07T21:17:00.000Z | 2022-01-07T09:02:42.000Z | examples/dot3k/advanced/automenu.py | axelsimon/displayotron | 42315c47eb50d2f95d1fcade389ff57ad73e1b83 | [
"MIT"
] | 22 | 2017-06-07T05:09:25.000Z | 2021-08-17T10:52:58.000Z | #!/usr/bin/env python
import sys
import time
import dot3k.backlight as backlight
import dot3k.joystick as nav
import dot3k.lcd as lcd
from dot3k.menu import Menu, MenuOption
# Add the root examples dir so Python can find the plugins
sys.path.append('../../')
from plugins.clock import Clock
from plugins.graph import IPAddress, GraphTemp, GraphCPU, GraphNetSpeed
from plugins.wlan import Wlan
print("""
This example uses automation to advance through each menu item.
You should see each menu item appear in turn. However use-input will not be accepted.
Press CTRL+C to exit.
""")
menu = Menu({
'Clock': Clock(),
'IP': IPAddress(),
'CPU': GraphCPU(),
'Temp': GraphTemp()
},
lcd,
None,
30)
def millis():
return int(round(time.time() * 1000.0))
def advance():
global last
if millis() > last + (delay * 1000.0):
menu.cancel()
menu.down()
menu.right()
last = millis()
last = millis()
delay = 2 # In seconds
while 1:
advance()
menu.redraw()
time.sleep(0.05)
| 18.807018 | 85 | 0.645522 |
4a1d570a01131633213c098a3d1edc1371cf6775 | 5,468 | py | Python | P01_PacmanGame/src/search/search.py | Jed-Z/artificial-intelligence-lab | ca5335b13e164230aab4e4a950e930b4d0d94d21 | [
"MIT"
] | 6 | 2020-11-05T04:49:10.000Z | 2022-01-06T06:17:13.000Z | P01_PacmanGame/src/search/search.py | csJed/artificial-intelligence-lab | ca5335b13e164230aab4e4a950e930b4d0d94d21 | [
"MIT"
] | null | null | null | P01_PacmanGame/src/search/search.py | csJed/artificial-intelligence-lab | ca5335b13e164230aab4e4a950e930b4d0d94d21 | [
"MIT"
] | 2 | 2020-11-30T09:14:46.000Z | 2021-10-23T01:03:37.000Z | # -*- coding: utf-8 -*-
# search.py
# ---------
# Licensing Information: You are free to use or extend these projects for
# educational purposes provided that (1) you do not distribute or publish
# solutions, (2) you retain this notice, and (3) you provide clear
# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
#
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
# The core projects and autograders were primarily created by John DeNero
# ([email protected]) and Dan Klein ([email protected]).
# Student side autograding was added by Brad Miller, Nick Hay, and
# Pieter Abbeel ([email protected]).
"""
In search.py, you will implement generic search algorithms which are called by
Pacman agents (in searchAgents.py).
"""
import util
class SearchProblem:
"""
This class outlines the structure of a search problem, but doesn't implement
any of the methods (in object-oriented terminology: an abstract class).
You do not need to change anything in this class, ever.
"""
def getStartState(self):
"""
Returns the start state for the search problem.
"""
util.raiseNotDefined()
def isGoalState(self, state):
"""
state: Search state
Returns True if and only if the state is a valid goal state.
"""
util.raiseNotDefined()
def getSuccessors(self, state):
"""
state: Search state
For a given state, this should return a list of triples, (successor,
action, stepCost), where 'successor' is a successor to the current
state, 'action' is the action required to get there, and 'stepCost' is
the incremental cost of expanding to that successor.
"""
util.raiseNotDefined()
def getCostOfActions(self, actions):
"""
actions: A list of actions to take
This method returns the total cost of a particular sequence of actions.
The sequence must be composed of legal moves.
"""
util.raiseNotDefined()
def tinyMazeSearch(problem):
"""
Returns a sequence of moves that solves tinyMaze. For any other maze, the
sequence of moves will be incorrect, so only use this for tinyMaze.
"""
from game import Directions
s = Directions.SOUTH
w = Directions.WEST
return [s, s, w, s, w, w, s, w]
def graphSearchAlgorithm(problem, frontier):
"""
通用的图搜索算法,其中第二个参数frontier是util模块中的一种结构,如栈、队列、优先队列等。
节点node是 (state, actions, cost) 的三元组,各元素如下:
* state:指一个状态;
* actions:是路径上每步移动方向的列表;
* cost:是路径上到当前节点的总代价。
"""
start_state = problem.getStartState()
start_node = (start_state, [], 0) # 初始节点
explored = set() # 存储已扩展过的state
frontier.push(start_node) # 初始时,边界中只有源节点、空路径
while not frontier.isEmpty():
current_state, current_actions, current_cost = frontier.pop()
if problem.isGoalState(current_state):
return current_actions # 返回actions
if current_state not in explored:
explored.add(current_state) # 环检测在【扩展】节点时进行,而不是在【访问】后继处进行
for succ in problem.getSuccessors(current_state):
succ_state, succ_action, succ_stepcost = succ
new_actions = current_actions + [succ_action]
new_cost = current_cost + succ_stepcost
frontier.push((succ_state, new_actions, new_cost)) # 将后继加入边界
return [] # 无解
def depthFirstSearch(problem):
"""
Search the deepest nodes in the search tree first.
Your search algorithm needs to return a list of actions that reaches the
goal. Make sure to implement a graph search algorithm.
To get started, you might want to try some of these simple commands to
understand the search problem that is being passed in:
print "Start:", problem.getStartState()
print "Is the start a goal?", problem.isGoalState(problem.getStartState())
print "Start's successors:", problem.getSuccessors(problem.getStartState())
"""
"*** MY CODE HERE ***"
return graphSearchAlgorithm(problem, util.Stack())
def breadthFirstSearch(problem):
"""Search the shallowest nodes in the search tree first."""
"*** MY CODE HERE ***"
return graphSearchAlgorithm(problem, util.Queue())
def uniformCostSearch(problem):
"""Search the node of least total cost first."""
"*** MY CODE HERE ***"
# 一致代价搜索中,节点按照当前路径上的总代价排序,也就是node三元组的第三个元素
return graphSearchAlgorithm(problem, util.PriorityQueueWithFunction(lambda node: node[2]))
def nullHeuristic(state, problem=None):
"""
A heuristic function estimates the cost from the current state to the nearest
goal in the provided SearchProblem. This heuristic is trivial.
"""
return 0
def aStarSearch(problem, heuristic=nullHeuristic):
"""Search the node that has the lowest combined cost and heuristic first."""
"*** MY CODE HERE ***"
# A*搜索只是通用图搜索算法的一种而已,所以我实现了一个 graphSearchAlgorithm(见本文件
# 上方),也不是很麻烦。向它传不同的frontier参数,就能实现DFS、BFS、UCS、A*等算法。这实
# 现了图搜索算法的统一,我觉得很好。
def f(node):
""" f(n) = g(n) + h(n). """
g_value = node[2]
h_value = heuristic(node[0], problem)
return g_value + h_value
return graphSearchAlgorithm(problem, util.PriorityQueueWithFunction(f))
# Abbreviations
bfs = breadthFirstSearch
dfs = depthFirstSearch
astar = aStarSearch
ucs = uniformCostSearch
| 32.939759 | 94 | 0.673738 |
4a1d572f2e45f1d50dbe9e80722629dbda10b3e5 | 5,961 | py | Python | google/cloud/dialogflow/v2/dialogflow-v2-py/google/cloud/dialogflow_v2/services/knowledge_bases/pagers.py | googleapis/googleapis-gen | d84824c78563d59b0e58d5664bfaa430e9ad7e7a | [
"Apache-2.0"
] | 7 | 2021-02-21T10:39:41.000Z | 2021-12-07T07:31:28.000Z | google/cloud/dialogflow/v2/dialogflow-v2-py/google/cloud/dialogflow_v2/services/knowledge_bases/pagers.py | googleapis/googleapis-gen | d84824c78563d59b0e58d5664bfaa430e9ad7e7a | [
"Apache-2.0"
] | 6 | 2021-02-02T23:46:11.000Z | 2021-11-15T01:46:02.000Z | google/cloud/dialogflow/v2/dialogflow-v2-py/google/cloud/dialogflow_v2/services/knowledge_bases/pagers.py | googleapis/googleapis-gen | d84824c78563d59b0e58d5664bfaa430e9ad7e7a | [
"Apache-2.0"
] | 4 | 2021-01-28T23:25:45.000Z | 2021-08-30T01:55:16.000Z | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import Any, AsyncIterator, Awaitable, Callable, Sequence, Tuple, Optional, Iterator
from google.cloud.dialogflow_v2.types import knowledge_base
class ListKnowledgeBasesPager:
"""A pager for iterating through ``list_knowledge_bases`` requests.
This class thinly wraps an initial
:class:`google.cloud.dialogflow_v2.types.ListKnowledgeBasesResponse` object, and
provides an ``__iter__`` method to iterate through its
``knowledge_bases`` field.
If there are more pages, the ``__iter__`` method will make additional
``ListKnowledgeBases`` requests and continue to iterate
through the ``knowledge_bases`` field on the
corresponding responses.
All the usual :class:`google.cloud.dialogflow_v2.types.ListKnowledgeBasesResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(self,
method: Callable[..., knowledge_base.ListKnowledgeBasesResponse],
request: knowledge_base.ListKnowledgeBasesRequest,
response: knowledge_base.ListKnowledgeBasesResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()):
"""Instantiate the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.dialogflow_v2.types.ListKnowledgeBasesRequest):
The initial request object.
response (google.cloud.dialogflow_v2.types.ListKnowledgeBasesResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = knowledge_base.ListKnowledgeBasesRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
def pages(self) -> Iterator[knowledge_base.ListKnowledgeBasesResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = self._method(self._request, metadata=self._metadata)
yield self._response
def __iter__(self) -> Iterator[knowledge_base.KnowledgeBase]:
for page in self.pages:
yield from page.knowledge_bases
def __repr__(self) -> str:
return '{0}<{1!r}>'.format(self.__class__.__name__, self._response)
class ListKnowledgeBasesAsyncPager:
"""A pager for iterating through ``list_knowledge_bases`` requests.
This class thinly wraps an initial
:class:`google.cloud.dialogflow_v2.types.ListKnowledgeBasesResponse` object, and
provides an ``__aiter__`` method to iterate through its
``knowledge_bases`` field.
If there are more pages, the ``__aiter__`` method will make additional
``ListKnowledgeBases`` requests and continue to iterate
through the ``knowledge_bases`` field on the
corresponding responses.
All the usual :class:`google.cloud.dialogflow_v2.types.ListKnowledgeBasesResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(self,
method: Callable[..., Awaitable[knowledge_base.ListKnowledgeBasesResponse]],
request: knowledge_base.ListKnowledgeBasesRequest,
response: knowledge_base.ListKnowledgeBasesResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()):
"""Instantiates the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.dialogflow_v2.types.ListKnowledgeBasesRequest):
The initial request object.
response (google.cloud.dialogflow_v2.types.ListKnowledgeBasesResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = knowledge_base.ListKnowledgeBasesRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
async def pages(self) -> AsyncIterator[knowledge_base.ListKnowledgeBasesResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = await self._method(self._request, metadata=self._metadata)
yield self._response
def __aiter__(self) -> AsyncIterator[knowledge_base.KnowledgeBase]:
async def async_generator():
async for page in self.pages:
for response in page.knowledge_bases:
yield response
return async_generator()
def __repr__(self) -> str:
return '{0}<{1!r}>'.format(self.__class__.__name__, self._response)
| 42.276596 | 95 | 0.688978 |
4a1d58fd9da1163bf2bab25eec15dc58398ce628 | 1,548 | py | Python | cursoemvideoPy/Mundo2/ex068.py | BrCarlini/exPython | 3bed986e5bfa5eae191b6b18306448926aed48fd | [
"MIT"
] | null | null | null | cursoemvideoPy/Mundo2/ex068.py | BrCarlini/exPython | 3bed986e5bfa5eae191b6b18306448926aed48fd | [
"MIT"
] | null | null | null | cursoemvideoPy/Mundo2/ex068.py | BrCarlini/exPython | 3bed986e5bfa5eae191b6b18306448926aed48fd | [
"MIT"
] | null | null | null | from random import randint
print('-=' * 30)
print('VAMOS JOGAR PAR OU ÍMPAR')
print('-=' * 30)
n = 0
c = 0
while True:
n = int(input('Digite um número: '))
aleat = randint(0, 10)
opcao = str(input('Par ou Ímpar [P/I] ? ')).upper()
calc = n + aleat
# print(f'Você jogou {n} e o computador {aleat}.')
if opcao == 'P' or opcao == 'I':
if opcao == 'P':
if calc % 2 == 0 and opcao == 'P':
print('Total de {}, DEU PAR.'.format(calc, end=''))
c += 1
print('Você VENCEU\n')
print('Vamos jogar novamente...')
print('-\n' * 3)
else:
print(f'Total de {calc}, DEU ÍMPAR')
print('Você PERDEU\n')
print('-=' * 30)
print(f'GAME OVER! Você venceu {c} vezes')
break
else:
if opcao == 'I':
if calc % 2 == 1:
print(f'Você jogou {n} e o computador {aleat}. Total de {calc}, DEU ÍMPAR.')
c += 1
print('Você VENCEU\n')
print('Vamos jogar novamente...')
print('-\n' * 3)
else:
print(f'Você jogou {n} e o computador {aleat}. Total de {calc}, DEU PAR.')
print('Você PERDEU\n')
print('-=' * 30)
print(f'GAME OVER! Você venceu {c} vezes')
break
else:
print('Opção Inválida.')
break
| 31.591837 | 96 | 0.422481 |
4a1d5a01243286b718dfc3b1b561e89a9ff2315f | 3,276 | py | Python | yolox/utils/model_utils.py | junhai0428/YOLOX-OBB | 9f0d745b89ba6559e692ff06ba09b433b2f4594c | [
"Apache-2.0"
] | null | null | null | yolox/utils/model_utils.py | junhai0428/YOLOX-OBB | 9f0d745b89ba6559e692ff06ba09b433b2f4594c | [
"Apache-2.0"
] | null | null | null | yolox/utils/model_utils.py | junhai0428/YOLOX-OBB | 9f0d745b89ba6559e692ff06ba09b433b2f4594c | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# -*- coding:utf-8 -*-
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
from copy import deepcopy
import torch
import torch.nn as nn
from thop import profile
__all__ = [
"fuse_conv_and_bn",
"fuse_model",
"get_model_info",
"replace_module",
]
def get_model_info(model, tsize):
stride = 64
img = torch.zeros((1, 3, stride, stride), device=next(model.parameters()).device)
flops, params = profile(deepcopy(model), inputs=(img,), verbose=False)
params /= 1e6
flops /= 1e9
flops *= tsize[0] * tsize[1] / stride / stride * 2 # Gflops
info = "Params: {:.2f}M, Gflops: {:.2f}".format(params, flops)
return info
def fuse_conv_and_bn(conv, bn):
# Fuse convolution and batchnorm layers https://tehnokv.com/posts/fusing-batchnorm-and-conv/
fusedconv = (
nn.Conv2d(
conv.in_channels,
conv.out_channels,
kernel_size=conv.kernel_size,
stride=conv.stride,
padding=conv.padding,
groups=conv.groups,
bias=True,
)
.requires_grad_(False)
.to(conv.weight.device)
)
# prepare filters
w_conv = conv.weight.clone().view(conv.out_channels, -1)
w_bn = torch.diag(bn.weight.div(torch.sqrt(bn.eps + bn.running_var)))
fusedconv.weight.copy_(torch.mm(w_bn, w_conv).view(fusedconv.weight.shape))
# prepare spatial bias
b_conv = (
torch.zeros(conv.weight.size(0), device=conv.weight.device)
if conv.bias is None
else conv.bias
)
b_bn = bn.bias - bn.weight.mul(bn.running_mean).div(
torch.sqrt(bn.running_var + bn.eps)
)
fusedconv.bias.copy_(torch.mm(w_bn, b_conv.reshape(-1, 1)).reshape(-1) + b_bn)
return fusedconv
def fuse_model(model):
from yolox.models.network_blocks import BaseConv
for m in model.modules():
if type(m) is BaseConv and hasattr(m, "bn"):
m.conv = fuse_conv_and_bn(m.conv, m.bn) # update conv
delattr(m, "bn") # remove batchnorm
m.forward = m.fuseforward # update forward
return model
def replace_module(module, replaced_module_type, new_module_type, replace_func=None):
"""
Replace given type in module to a new type. mostly used in deploy.
Args:
module (nn.Module): model to apply replace operation.
replaced_module_type (Type): module type to be replaced.
new_module_type (Type)
replace_func (function): python function to describe replace logic. Defalut value None.
Returns:
model (nn.Module): module that already been replaced.
"""
def default_replace_func(replaced_module_type, new_module_type):
return new_module_type()
if replace_func is None:
replace_func = default_replace_func
model = module
if isinstance(module, replaced_module_type):
model = replace_func(replaced_module_type, new_module_type)
else: # recurrsively replace
for name, child in module.named_children():
new_child = replace_module(child, replaced_module_type, new_module_type)
if new_child is not child: # child is already replaced
model.add_module(name, new_child)
return model
| 30.90566 | 96 | 0.649878 |
4a1d5a7630153c37211196adb419577be9afebe0 | 2,538 | py | Python | map_objects/minimap.py | delamorte/spiritquestRL | 158e510b82a3dc03bbcab5e6be08f94f7c9937b6 | [
"MIT"
] | 4 | 2019-08-13T04:56:13.000Z | 2021-01-23T21:13:50.000Z | map_objects/minimap.py | delamorte/spiritquestRL | 158e510b82a3dc03bbcab5e6be08f94f7c9937b6 | [
"MIT"
] | null | null | null | map_objects/minimap.py | delamorte/spiritquestRL | 158e510b82a3dc03bbcab5e6be08f94f7c9937b6 | [
"MIT"
] | null | null | null | # coding=utf-8
from __future__ import division
from ctypes import c_uint32, addressof
from bearlibterminal import terminal as blt
import numpy as np
import variables
from collections import namedtuple
def test_dynamic_sprites(game_map, ui_elements):
x0 = y0 = 1
view_height, view_width = ui_elements.screen_borders.h-1, ui_elements.screen_borders.w-1
def make_minimap():
minimap = np.ones_like(game_map.tiles, dtype=int)
for x in range(game_map.width):
for y in range(game_map.height):
minimap[y][x] = blt.color_from_name("dark gray")
if len(game_map.tiles[x][y].entities_on_tile) > 0:
if game_map.tiles[x][y].entities_on_tile[-1].name == "tree":
minimap[y][x] = blt.color_from_name("dark green")
elif "wall" in game_map.tiles[x][y].entities_on_tile[-1].name:
minimap[y][x] = blt.color_from_name("dark amber")
elif game_map.tiles[x][y].entities_on_tile[-1].name == "player":
minimap[y][x] = blt.color_from_name(None)
elif game_map.tiles[x][y].entities_on_tile[-1].fighter \
and game_map.tiles[x][y].entities_on_tile[-1].name != "player":
minimap[y][x] = blt.color_from_name("light red")
else:
minimap[y][x] = blt.color_from_name("light gray")
elif game_map.tiles[x][y].blocked:
minimap[y][x] = blt.color_from_name("light gray")
# if not game_map.tiles[x][y].explored:
# minimap[y][x] = blt.color_from_name("#000000")
minimap = minimap.flatten()
minimap = (c_uint32 * len(minimap))(*minimap)
blt.set(
"U+F900: %d, raw-size=%dx%d, resize=%dx%d, resize-filter=nearest" % (
addressof(minimap),
game_map.width, game_map.height,
500, 500)
)
while True:
blt.clear()
make_minimap()
blt.color("white")
#blt.put_ext(view_width * 4 + 1, 0, margin, margin, 0xF900)
blt.put(x0 * variables.ui_offset_x + 3, y0 * variables.ui_offset_y + 3, 0xF900)
#blt.puts(1, view_height * 2 + 1, "[color=orange]Tip:[/color] use arrow keys to move viewport over the map")
blt.refresh()
key = blt.read()
if key in (blt.TK_CLOSE, blt.TK_ESCAPE, blt.TK_TAB):
blt.clear()
break | 40.935484 | 116 | 0.56383 |
4a1d5a7ddd59e65a31645d40385c1540cc91cbd9 | 21,266 | py | Python | canine/localization/file_handlers.py | broadinstitute/canine | 4e367aab6d692f30b8199011dcf72894c066c57a | [
"BSD-3-Clause"
] | 2 | 2019-08-20T19:20:48.000Z | 2019-08-28T22:24:25.000Z | canine/localization/file_handlers.py | broadinstitute/canine | 4e367aab6d692f30b8199011dcf72894c066c57a | [
"BSD-3-Clause"
] | 49 | 2019-08-21T18:51:42.000Z | 2020-05-01T16:22:27.000Z | canine/localization/file_handlers.py | broadinstitute/canine | 4e367aab6d692f30b8199011dcf72894c066c57a | [
"BSD-3-Clause"
] | 3 | 2019-10-02T18:24:08.000Z | 2020-02-21T20:48:16.000Z | import abc
import google.cloud.storage
import glob, google_crc32c, json, hashlib, base64, binascii, os, re, requests, shlex, subprocess, threading
import pandas as pd
from ..utils import sha1_base32, canine_logging
class FileType(abc.ABC):
"""
Stores properties of and instructions for handling a given file type:
* localization command
* size
* hash
"""
localization_mode = None # to be overridden in child classes
def __init__(self, path, transport = None, **kwargs):
"""
path: path/URL to file
transport: Canine transport object for handling local/remote files (currently not used)
localization_mode: how this file will be handled in localization.job_setup_teardown
must be one of:
* url: path is a remote URL that must be handled with a special
download command
* stream: stream remote URL into a FIFO, rather than downloading
* ro_disk: path is a URL to mount a persistent disk read-only
* local: path is a local file
* string: path is a string literal
- None: path is a string literal (for backwards compatibility)
"""
self.path = path
self.localized_path = path # path where file got localized to. needs to be manually updated
self.transport = transport # currently not used
self.extra_args = kwargs
self._size = None
self._hash = None
@property
def size(self):
"""
Returns size of this file in bytes
"""
if self._size is None:
self._size = self._get_size()
return self._size
def _get_size(self):
pass
@property
def hash(self):
"""
Returns a hash for this file
"""
if self._hash is None:
self._hash = self._get_hash()
return self._hash
def _get_hash(self):
"""
Base class assume self.path is a string literal
"""
return sha1_base32(bytearray(self.path, "utf-8"), 4)
def localization_command(self, dest):
"""
Returns a command to localize this file
"""
pass
def __str__(self):
"""
Some functions (e.g. orchestrator.make_output_DF) may be passed FileType
objects, but expect strings corresponding to the file path.
"""
return self.path
class StringLiteral(FileType):
"""
Since the base FileType class also works for string literals, alias
the StringLiteral class for clarification
"""
localization_mode = "string"
def hash_set(x):
assert isinstance(x, set)
x = list(sorted(x))
return hashlib.md5(json.dumps(x).encode()).hexdigest()
#
# define file type handlers
## Google Cloud Storage {{{
STORAGE_CLIENT = None
storage_client_creation_lock = threading.Lock()
def gcloud_storage_client():
global STORAGE_CLIENT
with storage_client_creation_lock:
if STORAGE_CLIENT is None:
# this is the expensive operation
STORAGE_CLIENT = google.cloud.storage.Client()
return STORAGE_CLIENT
class GSFileNotExists(Exception):
pass
class HandleGSURL(FileType):
localization_mode = "url"
def get_requester_pays(self) -> bool:
"""
Returns True if the requested gs:// object or bucket resides in a
requester pays bucket
"""
bucket = re.match(r"gs://(.*?)/.*", self.path)[1]
ret = subprocess.run('gsutil requesterpays get gs://{}'.format(bucket), shell = True, capture_output = True)
if b'requester pays bucket but no user project provided' in ret.stderr:
return True
else:
# Try again ls-ing the object itself
# sometimes permissions can disallow bucket inspection
# but allow object inspection
ret = subprocess.run('gsutil ls {}'.format(self.path), shell = True, capture_output = True)
return b'requester pays bucket but no user project provided' in ret.stderr
if ret.returncode == 1 and b'BucketNotFoundException: 404' in ret.stderr:
canine_logging.error(ret.stderr.decode())
raise subprocess.CalledProcessError(ret.returncode, "")
def __init__(self, path, **kwargs):
super().__init__(path, **kwargs)
# remove any trailing slashes, in case path refers to a directory
self.path = path.strip("/")
# check if this bucket is requester pays
self.rp_string = ""
if self.get_requester_pays():
if "project" not in self.extra_args:
raise ValueError(f"File {self.path} resides in a requester-pays bucket but no user project provided")
self.rp_string = f' -u {self.extra_args["project"]}'
# is this URL a directory?
self.is_dir = False
def _get_size(self):
output = subprocess.check_output('gsutil {} du -s {}'.format(self.rp_string, shlex.quote(self.path.strip("/"))), shell=True).decode()
return int(output.split()[0])
def _get_hash(self):
assert self.path.startswith("gs://")
res = re.search("^gs://([^/]+)/(.*)$", self.path)
bucket = res[1]
obj_name = res[2]
gcs_cl = gcloud_storage_client()
bucket_obj = google.cloud.storage.Bucket(gcs_cl, bucket, user_project = self.extra_args["project"] if "project" in self.extra_args else None)
# check whether this path exists, and whether it's a directory
exists = False
blob_obj = None
# list_blobs is completely ignorant of "/" as a delimiter
# prefix = "dir/b" will list
# dir/b (may not even exist as a standalone "directory")
# dir/b/file1
# dir/b/file2
# dir/boy
for b in gcs_cl.list_blobs(bucket_obj, prefix = obj_name):
if b.name == obj_name:
exists = True
blob_obj = b
# a blob starting with <obj_name>/ is a directory
if b.name.startswith(obj_name + "/"):
exists = True
self.is_dir = True
blob_obj = b
break
if not exists:
raise GSFileNotExists("{} does not exist.".format(self.path))
# if it's a directory, hash the set of CRCs within
if self.is_dir:
canine_logging.info1(f"Hashing directory {self.path}. This may take some time.")
files = set()
for b in gcs_cl.list_blobs(bucket_obj, prefix = obj_name + "/"):
files.add(b.crc32c)
return hash_set(files)
# for backwards compatibility, if it's a file, return the file directly
# TODO: for cleaner code, we really should just always return a set and hash it
else:
return binascii.hexlify(base64.b64decode(blob_obj.crc32c)).decode().lower()
def localization_command(self, dest):
dest_dir = shlex.quote(os.path.dirname(dest))
dest_file = shlex.quote(os.path.basename(dest))
self.localized_path = os.path.join(dest_dir, dest_file)
return ("[ ! -d {dest_dir} ] && mkdir -p {dest_dir} || :; ".format(dest_dir = self.localized_path if self.is_dir else dest_dir)) + f'gsutil {self.rp_string} -o "GSUtil:state_dir={dest_dir}/.gsutil_state_dir" cp -r -n -L "{dest_dir}/.gsutil_manifest" {self.path} {dest_dir}/{dest_file if not self.is_dir else ""}'
class HandleGSURLStream(HandleGSURL):
localization_mode = "stream"
def localization_command(self, dest):
return "\n".join(['gsutil {} ls {} > /dev/null'.format(self.rp_string, shlex.quote(self.path)),
'if [[ -e {0} ]]; then rm {0}; fi'.format(dest),
'mkfifo {}'.format(dest),
"gsutil {} cat {} > {} &".format(
self.rp_string,
shlex.quote(self.path),
dest
)])
# }}}
## AWS S3 {{{
class HandleAWSURL(FileType):
localization_mode = "url"
# TODO: use boto3 API; overhead for calling out to aws shell command might be high
# this would also allow us to run on systems that don't have the aws tool installed
# TODO: support directories
def __init__(self, path, **kwargs):
"""
Optional arguments:
* aws_access_key_id
* aws_secret_access_key
* aws_endpoint_url
"""
super().__init__(path, **kwargs)
# remove any trailing slashes, in case path refers to a directory
self.path = path.strip("/")
# keys get passed via environment variable
self.command_env = {}
self.command_env["AWS_ACCESS_KEY_ID"] = self.extra_args["aws_access_key_id"] if "aws_access_key_id" in self.extra_args else None
self.command_env["AWS_SECRET_ACCESS_KEY"] = self.extra_args["aws_secret_access_key"] if "aws_secret_access_key" in self.extra_args else None
self.command_env_str = " ".join([f"{k}={v}" for k, v in self.command_env.items() if v is not None])
# compute extra arguments for s3 commands
# TODO: add requester pays check here
self.aws_endpoint_url = self.extra_args["aws_endpoint_url"] if "aws_endpoint_url" in self.extra_args else None
self.s3_extra_args = []
if self.command_env["AWS_ACCESS_KEY_ID"] is None and self.command_env["AWS_SECRET_ACCESS_KEY"] is None:
self.s3_extra_args += ["--no-sign-request" ]
if self.aws_endpoint_url is not None:
self.s3_extra_args += [f"--endpoint-url {self.aws_endpoint_url}"]
self.s3_extra_args_str = " ".join(self.s3_extra_args)
# get header for object
try:
res = re.search("^s3://([^/]+)/(.*)$", self.path)
bucket = res[1]
obj = res[2]
except:
raise ValueError(f"{self.path} is not a valid s3:// URL!")
head_resp = subprocess.run(
"{env} aws s3api {extra_args} head-object --bucket {bucket} --key {obj}".format(
env = self.command_env_str,
extra_args = self.s3_extra_args_str,
bucket = bucket,
obj = obj
),
shell = True,
capture_output = True
)
if head_resp.returncode == 254:
if b"(404)" in head_resp.stderr:
# check if it's truly a 404 or a directory; we do not yet support these
ls_resp = subprocess.run(
"{env} aws s3api {extra_args} list-objects-v2 --bucket {bucket} --prefix {obj} --max-items 2".format(
env = self.command_env_str,
extra_args = self.s3_extra_args_str,
bucket = bucket,
obj = obj
),
shell = True,
capture_output = True
)
if len(ls_resp.stdout) == 0:
raise ValueError(f"Object {self.path} does not exist in bucket!")
ls_resp_headers = json.loads(ls_resp.stdout)
if len(ls_resp_headers["Contents"]) > 1:
raise ValueError(f"Object {self.path} is a directory; we do not yet support localizing those from s3.")
elif b"(403)" in head_resp.stderr:
raise ValueError(f"You do not have permission to access {self.path}!")
else:
raise ValueError(f"Error accessing S3 file:\n{head_resp.stderr.decode()}")
elif head_resp.returncode != 0:
raise ValueError(f"Unknown AWS S3 error occurred:\n{head_resp.stderr.decode()}")
self.headers = json.loads(head_resp.stdout)
def _get_hash(self):
return self.headers["ETag"].replace('"', '')
def _get_size(self):
return self.headers["ContentLength"]
def localization_command(self, dest):
dest_dir = shlex.quote(os.path.dirname(dest))
dest_file = shlex.quote(os.path.basename(dest))
self.localized_path = os.path.join(dest_dir, dest_file)
return "\n".join([
f"[ ! -d {dest_dir} ] && mkdir -p {dest_dir} || :",
f"[ -f {self.localized_path} ] && SZ=$(stat --printf '%s' {self.localized_path}) || SZ=0",
f"if [ $SZ != {self.size} ]; then",
"{env} aws s3api {extra_args} get-object --bucket {bucket} --key {file} --range \"bytes=$SZ-\" >(cat >> {dest}) > /dev/null".format(
env = self.command_env_str,
extra_args = self.s3_extra_args_str,
bucket = self.path.split("/")[2],
file = "/".join(self.path.split("/")[3:]),
dest = self.localized_path
),
"fi"
])
class HandleAWSURLStream(HandleAWSURL):
localization_mode = "stream"
def localization_command(self, dest):
dest_dir = shlex.quote(os.path.dirname(dest))
dest_file = shlex.quote(os.path.basename(dest))
self.localized_path = os.path.join(dest_dir, dest_file)
return "\n".join([
f"if [[ -e {0} ]]; then rm {0}; fi".format(dest),
f"[ ! -d {dest_dir} ] && mkdir -p {dest_dir} || :",
'mkfifo {}'.format(dest),
"{env} aws s3 {extra_args} cp {url} {path} &".format(
env = self.command_env_str,
extra_args = self.s3_extra_args_str,
url = self.path,
path = dest
)
])
# }}}
## GDC HTTPS URLs {{{
class HandleGDCHTTPURL(FileType):
localization_mode = "url"
def __init__(self, path, **kwargs):
super().__init__(path, **kwargs)
self.token = self.extra_args["token"] if "token" in self.extra_args else None
self.token_flag = f'--header "X-Auth-Token: {self.token}"' if self.token is not None else ''
self.check_md5 = self.extra_args["check_md5"] if "check_md5" in self.extra_args else False
# parse URL
self.url = self.path
url_parse = re.match(r"^(https://api\.(?:awg\.)?gdc\.cancer\.gov)/(?:files|data)/([0-9a-f]{8}-(?:[0-9a-f]{4}-){3}[0-9a-f]{12})", self.url)
if url_parse is None:
raise ValueError("Invalid GDC ID '{}'".format(self.url))
self.prefix = url_parse[1]
self.uuid = url_parse[2]
# the actual filename is encoded in the content-disposition header;
# save this to self.path
# since the filesize and hashes are also encoded in the header, populate
# these fields now
resp_headers = subprocess.run(
'curl -s -D - -o /dev/full {token_flag} {file}'.format(
token_flag = self.token_flag,
file = self.path
),
shell = True,
capture_output = True
)
try:
headers = pd.DataFrame(
[x.split(": ") for x in resp_headers.stdout.decode().split("\r\n")[1:]],
columns=["header", "value"],
).set_index("header")["value"]
self.path = re.match(".*filename=(.*)$", headers["Content-Disposition"])[1]
self._size = int(headers["Content-Length"])
self._hash = headers["Content-MD5"]
except:
canine_logging.error("Error parsing GDC filename; see stack trace for details")
raise
self.localized_path = self.path
def localization_command(self, dest):
dest_dir = shlex.quote(os.path.dirname(dest))
dest_file = shlex.quote(os.path.basename(dest))
self.localized_path = os.path.join(dest_dir, dest_file)
cmd = []
if self.token is not None:
cmd += ["[ ! -d {dest_dir} ] && mkdir -p {dest_dir} || :; curl -C - -o {path} {token} '{url}'".format(dest_dir = dest_dir, path = self.localized_path, token = self.token_flag, url = self.url)]
else:
cmd += ["[ ! -d {dest_dir} ] && mkdir -p {dest_dir} || :; curl -C - -o {path} '{url}'".format(dest_dir = dest_dir, path = self.localized_path, url = self.url)]
# ensure that file downloaded properly
if self.check_md5:
cmd += [f"[ $(md5sum {self.localized_path} | sed -r 's/ .*$//') == {self.hash} ]"]
return "\n".join(cmd)
class HandleGDCHTTPURLStream(HandleGDCHTTPURL):
localization_mode="stream"
def localization_command(self, dest):
dest_dir = shlex.quote(os.path.dirname(dest))
dest_file = shlex.quote(os.path.basename(dest))
self.localized_path = os.path.join(dest_dir, dest_file)
cmd = []
#clean exisiting file if it exists
cmd += ['if [[ -e {0} ]]; then rm {0}; fi'.format(dest)]
#create dir if it doesnt exist
cmd += ["[ ! -d {dest_dir} ] && mkdir -p {dest_dir} || :;".format(dest_dir = dest_dir)]
#create fifo object
cmd += ['mkfifo {}'.format(dest)]
#stream into fifo object
if self.token is not None:
cmd += ["curl -C - -o {path} {token} '{url}' &".format(path = self.localized_path, token = self.token_flag, url = self.url)]
else:
cmd += ["curl -C - -o {path} '{url}' &".format(dest_dir = dest_dir, path = self.localized_path, url = self.url)]
return "\n".join(cmd)
# }}}
## Regular files {{{
class HandleRegularFile(FileType):
localization_mode = "local"
def _get_size(self):
return os.path.getsize(self.path)
def _get_hash(self):
# if Canine-generated checksum exists, use it
k9_crc = os.path.join(os.path.dirname(self.path), "." + os.path.basename(self.path) + ".crc32c")
if os.path.exists(k9_crc):
with open(k9_crc, "r") as f:
return f.read().rstrip()
# otherwise, compute it
hash_alg = google_crc32c.Checksum()
buffer_size = 8 * 1024
# check if it's a directory
isdir = False
if os.path.isdir(self.path):
files = glob.iglob(self.path + "/**", recursive = True)
isdir = True
else:
files = [self.path]
for f in files:
if os.path.isdir(f):
continue
file_size_MiB = int(os.path.getsize(self.path)/1024**2)
# if we are hashing a whole directory, output a message for each file
if isdir:
canine_logging.info1(f"Hashing file {f} ({file_size_MiB} MiB)")
ct = 0
with open(f, "rb") as fp:
while True:
# output message every 100 MiB
if ct > 0 and not ct % int(100*1024**2/buffer_size):
canine_logging.info1(f"Hashing file {self.path}; {int(buffer_size*ct/1024**2)}/{file_size_MiB} MiB completed")
data = fp.read(buffer_size)
if not data:
break
hash_alg.update(data)
ct += 1
return hash_alg.hexdigest().decode().lower()
# }}}
## Read-only disks {{{
class HandleRODISKURL(FileType):
localization_mode = "ro_disk"
# file size is unnknowable
# hash is be based on disk hash URL (if present) and/or filename
# * for single file RODISKS, hash will be disk name
# * for batch RODISKS, hash will be disk name + filename
def _get_hash(self):
roURL = re.match(r"rodisk://([^/]+)/(.*)", self.path)
if roURL is None or roURL[2] == "":
raise ValueError("Invalid RODISK URL specified ({})!".format(self.path))
# we can only compare RODISK URLs based on the URL string, since
# actually hashing the contents would entail mounting them.
# most RODISK URLs will contain a hash of their contents, but
# if they don't, then we warn the user that we may be inadvertently
# avoiding
if not roURL[1].startswith("canine-"):
canine_logging.warning("RODISK input {} cannot be hashed; this job may be inadvertently avoided.".format(self.path))
# single file/directory RODISKs will contain the CRC32C of the file(s)
if roURL[1].startswith("canine-crc32c-"):
return roURL[1][14:]
# for BatchLocalDisk multifile RODISKs (or non-hashed URLs), the whole URL
# serves as a hash for the file
return self.path
# handler will be command to attach/mount the RODISK
# (currently implemented in base.py)
# }}}
def get_file_handler(path, url_map = None, **kwargs):
url_map = {
r"^gs://" : HandleGSURL,
r"^s3://" : HandleAWSURL,
r"^https://api.gdc.cancer.gov" : HandleGDCHTTPURL,
r"^https://api.awg.gdc.cancer.gov" : HandleGDCHTTPURL,
r"^rodisk://" : HandleRODISKURL,
} if url_map is None else url_map
# zerothly, if path is already a FileType object, return as-is
if isinstance(path, FileType):
return path
# assume path is a string-like object from here on out
path = str(path)
# firstly, check if the path is a regular file
if os.path.exists(path):
return HandleRegularFile(path, **kwargs)
# next, consult the mapping of path URL -> handler
for pat, handler in url_map.items():
if re.match(pat, path) is not None:
return handler(path, **kwargs)
# otherwise, assume it's a string literal; use the base class
return StringLiteral(path, **kwargs)
| 37.839858 | 320 | 0.589344 |
4a1d5aeedf7eb9a368374e35d4842206fc2999cc | 8,531 | py | Python | setup.py | lmmx/sphinx | 10dba797f204bc00d1e170cf024b22bffe241f25 | [
"BSD-2-Clause"
] | 1 | 2022-02-08T23:30:27.000Z | 2022-02-08T23:30:27.000Z | setup.py | lmmx/sphinx | 10dba797f204bc00d1e170cf024b22bffe241f25 | [
"BSD-2-Clause"
] | 1 | 2021-10-16T06:34:21.000Z | 2021-10-16T06:34:21.000Z | setup.py | mrmeyagi5/sphinx | f59026865a8c658acbc6d5690c146f4be0851854 | [
"BSD-2-Clause"
] | null | null | null | import os
import sys
from distutils import log
from io import StringIO
from setuptools import find_packages, setup
import sphinx
with open('README.rst') as f:
long_desc = f.read()
if sys.version_info < (3, 6):
print('ERROR: Sphinx requires at least Python 3.6 to run.')
sys.exit(1)
install_requires = [
'sphinxcontrib-applehelp',
'sphinxcontrib-devhelp',
'sphinxcontrib-jsmath',
'sphinxcontrib-htmlhelp>=2.0.0',
'sphinxcontrib-serializinghtml>=1.1.5',
'sphinxcontrib-qthelp',
'Jinja2>=2.3',
'Pygments>=2.0',
'docutils>=0.14,<0.18',
'snowballstemmer>=1.1',
'babel>=1.3',
'alabaster>=0.7,<0.8',
'imagesize',
'requests>=2.5.0',
'setuptools',
'packaging',
]
extras_require = {
# Environment Marker works for wheel 0.24 or later
':sys_platform=="win32"': [
'colorama>=0.3.5',
],
'docs': [
'sphinxcontrib-websupport',
],
'lint': [
'flake8>=3.5.0',
'isort',
'mypy>=0.900',
'docutils-stubs',
"types-typed-ast",
"types-pkg_resources",
"types-requests",
],
'test': [
'pytest',
'pytest-cov',
'html5lib',
"typed_ast; python_version < '3.8'",
'cython',
],
}
# Provide a "compile_catalog" command that also creates the translated
# JavaScript files if Babel is available.
cmdclass = {}
class Tee:
def __init__(self, stream):
self.stream = stream
self.buffer = StringIO()
def write(self, s):
self.stream.write(s)
self.buffer.write(s)
def flush(self):
self.stream.flush()
try:
from json import dump
from babel.messages.frontend import compile_catalog
from babel.messages.pofile import read_po
except ImportError:
pass
else:
class compile_catalog_plusjs(compile_catalog):
"""
An extended command that writes all message strings that occur in
JavaScript files to a JavaScript file along with the .mo file.
Unfortunately, babel's setup command isn't built very extensible, so
most of the run() code is duplicated here.
"""
def run(self):
try:
sys.stderr = Tee(sys.stderr)
compile_catalog.run(self)
finally:
if sys.stderr.buffer.getvalue():
print("Compiling failed.")
sys.exit(1)
if isinstance(self.domain, list):
for domain in self.domain:
self._run_domain_js(domain)
else:
self._run_domain_js(self.domain)
def _run_domain_js(self, domain):
po_files = []
js_files = []
if not self.input_file:
if self.locale:
po_files.append((self.locale,
os.path.join(self.directory, self.locale,
'LC_MESSAGES',
domain + '.po')))
js_files.append(os.path.join(self.directory, self.locale,
'LC_MESSAGES',
domain + '.js'))
else:
for locale in os.listdir(self.directory):
po_file = os.path.join(self.directory, locale,
'LC_MESSAGES',
domain + '.po')
if os.path.exists(po_file):
po_files.append((locale, po_file))
js_files.append(os.path.join(self.directory, locale,
'LC_MESSAGES',
domain + '.js'))
else:
po_files.append((self.locale, self.input_file))
if self.output_file:
js_files.append(self.output_file)
else:
js_files.append(os.path.join(self.directory, self.locale,
'LC_MESSAGES',
domain + '.js'))
for js_file, (locale, po_file) in zip(js_files, po_files):
with open(po_file, encoding='utf8') as infile:
catalog = read_po(infile, locale)
if catalog.fuzzy and not self.use_fuzzy:
continue
log.info('writing JavaScript strings in catalog %r to %r',
po_file, js_file)
jscatalog = {}
for message in catalog:
if any(x[0].endswith(('.js', '.js_t', '.html'))
for x in message.locations):
msgid = message.id
if isinstance(msgid, (list, tuple)):
msgid = msgid[0]
jscatalog[msgid] = message.string
with open(js_file, 'wt', encoding='utf8') as outfile:
outfile.write('Documentation.addTranslations(')
dump({
'messages': jscatalog,
'plural_expr': catalog.plural_expr,
'locale': str(catalog.locale)
}, outfile, sort_keys=True, indent=4)
outfile.write(');')
cmdclass['compile_catalog'] = compile_catalog_plusjs
setup(
name='Sphinx',
version=sphinx.__version__,
url='https://sphinx-doc.org/',
download_url='https://pypi.org/project/Sphinx/',
license='BSD',
author='Georg Brandl',
author_email='[email protected]',
description='Python documentation generator',
long_description=long_desc,
long_description_content_type='text/x-rst',
project_urls={
"Code": "https://github.com/sphinx-doc/sphinx",
"Issue tracker": "https://github.com/sphinx-doc/sphinx/issues",
},
zip_safe=False,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: End Users/Desktop',
'Intended Audience :: Science/Research',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'Framework :: Setuptools Plugin',
'Framework :: Sphinx',
'Framework :: Sphinx :: Extension',
'Framework :: Sphinx :: Theme',
'Topic :: Documentation',
'Topic :: Documentation :: Sphinx',
'Topic :: Internet :: WWW/HTTP :: Site Management',
'Topic :: Printing',
'Topic :: Software Development',
'Topic :: Software Development :: Documentation',
'Topic :: Text Processing',
'Topic :: Text Processing :: General',
'Topic :: Text Processing :: Indexing',
'Topic :: Text Processing :: Markup',
'Topic :: Text Processing :: Markup :: HTML',
'Topic :: Text Processing :: Markup :: LaTeX',
'Topic :: Utilities',
],
platforms='any',
packages=find_packages(exclude=['tests', 'utils']),
package_data = {
'sphinx': ['py.typed'],
},
include_package_data=True,
entry_points={
'console_scripts': [
'sphinx-build = sphinx.cmd.build:main',
'sphinx-quickstart = sphinx.cmd.quickstart:main',
'sphinx-apidoc = sphinx.ext.apidoc:main',
'sphinx-autogen = sphinx.ext.autosummary.generate:main',
],
'distutils.commands': [
'build_sphinx = sphinx.setup_command:BuildDoc',
],
},
python_requires=">=3.6",
install_requires=install_requires,
extras_require=extras_require,
cmdclass=cmdclass,
)
| 33.853175 | 80 | 0.522213 |
4a1d5ba9bb0f37533d512e973006706bb676f0d1 | 1,578 | py | Python | src/test_samples.py | cmingjian/notes_antlr4 | 92c993f76e64c140167cdc1de942844e4d1b254c | [
"Apache-2.0"
] | null | null | null | src/test_samples.py | cmingjian/notes_antlr4 | 92c993f76e64c140167cdc1de942844e4d1b254c | [
"Apache-2.0"
] | null | null | null | src/test_samples.py | cmingjian/notes_antlr4 | 92c993f76e64c140167cdc1de942844e4d1b254c | [
"Apache-2.0"
] | null | null | null | import os
import re
import sys
import glob
if len(sys.argv) == 1:
print "missing dir arg"
exit()
os.chdir(sys.argv[1])
grammars = glob.glob('*.g4')
rigs = glob.glob('Test*.java')
inputs = glob.glob('*-input')
outputs = glob.glob('*-output')
for g in grammars:
print "processing", g
os.system("java -Xmx500M -cp .:/usr/local/lib/antlr4-complete.jar:$CLASSPATH org.antlr.v4.Tool " + g)
for rig in rigs:
match = re.search("Test([A-Za-z]+)(_[A-Za-z0-9]+)?\.java", rig)
g = match.group(1)
testName = match.group(1)
if match.group(2) is not None:
testName += match.group(2)
# print rig,':',g, testName
input = testName + '-input'
output = testName + '-output'
if not os.path.exists(input):
continue
print "# TEST", testName
cmd = "java -cp .:/usr/local/lib/antlr4-complete.jar:$CLASSPATH Test" + testName + \
' < ' + input + \
' &> /tmp/stdout'
# print cmd
os.system(cmd)
expected = file(output).read()
results = file('/tmp/stdout').read()
if results.strip() != expected.strip():
print "$ " + cmd
print "### unexpected output:"
for l in results.strip().split('\n'):
print '> ' + l
print 'expected:'
for l in expected.strip().split('\n'):
print '> ' + l
print "from input "
for l in file(input).read().strip().split('\n'):
print '< ' + l
print '---------'
# for input in inputs:
# os.system("java -cp /usr/local/lib/antlr4-complete.jar:$CLASSPATH Test" + g + input)
| 30.346154 | 105 | 0.56147 |
4a1d5bc721da25d7a7708bb9829696f3e6f67966 | 456 | py | Python | Lib/site-packages/PyInstaller/hooks/hook-lxml.etree.py | fhqjgd/python2.7 | 6533019b8b2fbe113aa552e44247c054bdd8a75b | [
"bzip2-1.0.6"
] | null | null | null | Lib/site-packages/PyInstaller/hooks/hook-lxml.etree.py | fhqjgd/python2.7 | 6533019b8b2fbe113aa552e44247c054bdd8a75b | [
"bzip2-1.0.6"
] | null | null | null | Lib/site-packages/PyInstaller/hooks/hook-lxml.etree.py | fhqjgd/python2.7 | 6533019b8b2fbe113aa552e44247c054bdd8a75b | [
"bzip2-1.0.6"
] | null | null | null | #-----------------------------------------------------------------------------
# Copyright (c) 2013-2016, PyInstaller Development Team.
#
# Distributed under the terms of the GNU General Public License with exception
# for distributing bootloader.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
hiddenimports = ['lxml._elementpath', 'gzip']
| 38 | 78 | 0.502193 |
4a1d5e5e313655d20d99c1a46d28a9d671b8398e | 3,913 | py | Python | src/druidry/intervals.py | bhumikadalal22/druidry | d2dc6398d4f91e3095f79e876e837791c354dd86 | [
"MIT"
] | null | null | null | src/druidry/intervals.py | bhumikadalal22/druidry | d2dc6398d4f91e3095f79e876e837791c354dd86 | [
"MIT"
] | null | null | null | src/druidry/intervals.py | bhumikadalal22/druidry | d2dc6398d4f91e3095f79e876e837791c354dd86 | [
"MIT"
] | null | null | null | """Creates ISO-8601 dates from a variety of inputs."""
import datetime
import isodate
import sys
from . import durations
if sys.version_info >= (3, 0):
basestring = str
def _datetime_now():
return datetime.datetime.now()
class Interval(str):
"""Interval subclasses str to allow flexibility but not complicate JSON encoding."""
def __new__(cls, interval=None, start=None, end=None, duration=None, **kwargs):
"""
Create a string interval either from a handful of possible formulations.
Like ISO-8601 intervals, this allows specifying start and end, start
and duration, duration and end, just duration or just interval.
"""
# First, check to see if duration kwargs (eg weeks=3, days=2) are specified
# without a duration string and if so, create a duration string.
if durations.has_duration_kwarg(**kwargs) and not duration:
duration = durations.duration_kwargs_to_isoformat(**kwargs)
# Now we have a duration, whether specified via kwargs or explicitly.
# Check the remaining args to figure out how to proceed.
# If start or end is specified, we have three possibilities:
# 1. start/end
# 2. start/duration
# 3. duration/end
# All are handled in `_create_two_part_interval`. Otherwise, we just
# want to make sure that if duration or interval are passed as dates,
# we coerce them to ISO-8601 strings.
interval_str = None
if start or duration:
interval_str = cls._create_two_part_interval(cls, start, end, duration)
elif interval:
interval_str = cls._create_date_str(interval)
else:
raise ValueError(
'Invalid interval arguments: '
'interval={interval}, start={start}, '
'end={end}, duration={duration}'.format(
interval=interval, start=start, end=end, duration=duration))
return str.__new__(cls, interval_str)
@staticmethod
def _create_two_part_interval(cls, start, end, duration):
if start and end:
parts = (cls._create_date_str(start), cls._create_date_str(end))
elif start and duration:
parts = (cls._create_date_str(start), cls._create_date_str(duration))
elif end and duration:
parts = (cls._create_date_str(duration), cls._create_date_str(end))
elif start:
parts = (cls._create_date_str(start), cls._create_date_str(_datetime_now()))
elif duration:
parts = (cls._create_date_str(duration), cls._create_date_str(_datetime_now()))
return '/'.join(parts)
@staticmethod
def _create_date_str(date_or_str):
"""Turn a datetime or timedelta into a date str."""
if isinstance(date_or_str, basestring):
return date_or_str
if type(date_or_str) in (datetime.datetime, datetime.date):
return date_or_str.isoformat()
if type(date_or_str) == datetime.timedelta:
return isodate.duration_isoformat(date_or_str)
raise ValueError('Invalid value for interval: {}'.format(date_or_str))
@staticmethod
def pad_interval_by_timedelta(interval, td):
"""
Given an interval and a timedelta, pad the interval by the timedelta.
For example, if the timedelta's largest unit is hours, we floor the
start to the nearest hour and ceil the end to the nearest hour.
"""
start, end, _ = durations.parse_interval(interval)
start_floor = durations.floor_datetime(start, td)
end_ceil = durations.ceil_datetime(end, td)
return Interval(start=start_floor, end=end_ceil)
def pad_by_timedelta(self, td):
"""Given a timedelta, pad this interval by the timedelta."""
return Interval.pad_interval_by_timedelta(self, td)
| 39.928571 | 91 | 0.655252 |
4a1d60479528d06fd050c8207981f9b9177399ff | 546 | py | Python | myproject/myapp/migrations/0002_auto_20170718_1150.py | Bossabossy/Rental | c3bbcd62a749f9591f6e2ae4ae837b787ec6b9ac | [
"BSD-2-Clause"
] | 1 | 2017-07-18T09:32:53.000Z | 2017-07-18T09:32:53.000Z | myproject/myapp/migrations/0002_auto_20170718_1150.py | Bossabossy/Rental | c3bbcd62a749f9591f6e2ae4ae837b787ec6b9ac | [
"BSD-2-Clause"
] | null | null | null | myproject/myapp/migrations/0002_auto_20170718_1150.py | Bossabossy/Rental | c3bbcd62a749f9591f6e2ae4ae837b787ec6b9ac | [
"BSD-2-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2017-07-18 04:50
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('myapp', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='rent',
name='customer',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='myapp.Customer'),
),
]
| 24.818182 | 126 | 0.644689 |
4a1d6265dc0171a463e56d9bd5a940bc07f35759 | 8,434 | py | Python | test/functional/assumevalid.py | tylerbuchea/schezuancoin | 85f14fc2aa74d40ed4b2fc85c656a2fb258412ce | [
"MIT"
] | null | null | null | test/functional/assumevalid.py | tylerbuchea/schezuancoin | 85f14fc2aa74d40ed4b2fc85c656a2fb258412ce | [
"MIT"
] | null | null | null | test/functional/assumevalid.py | tylerbuchea/schezuancoin | 85f14fc2aa74d40ed4b2fc85c656a2fb258412ce | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2014-2016 The Schezuancoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test logic for skipping signature validation on old blocks.
Test logic for skipping signature validation on blocks which we've assumed
valid (https://github.com/schezuancoin/schezuancoin/pull/9484)
We build a chain that includes and invalid signature for one of the
transactions:
0: genesis block
1: block 1 with coinbase transaction output.
2-101: bury that block with 100 blocks so the coinbase transaction
output can be spent
102: a block containing a transaction spending the coinbase
transaction output. The transaction has an invalid signature.
103-2202: bury the bad block with just over two weeks' worth of blocks
(2100 blocks)
Start three nodes:
- node0 has no -assumevalid parameter. Try to sync to block 2202. It will
reject block 102 and only sync as far as block 101
- node1 has -assumevalid set to the hash of block 102. Try to sync to
block 2202. node1 will sync all the way to block 2202.
- node2 has -assumevalid set to the hash of block 102. Try to sync to
block 200. node2 will reject block 102 since it's assumed valid, but it
isn't buried by at least two weeks' work.
"""
import time
from test_framework.blocktools import (create_block, create_coinbase)
from test_framework.key import CECKey
from test_framework.mininode import (CBlockHeader,
COutPoint,
CTransaction,
CTxIn,
CTxOut,
NetworkThread,
NodeConn,
NodeConnCB,
msg_block,
msg_headers)
from test_framework.script import (CScript, OP_TRUE)
from test_framework.test_framework import SchezuancoinTestFramework
from test_framework.util import (p2p_port, assert_equal)
class BaseNode(NodeConnCB):
def send_header_for_blocks(self, new_blocks):
headers_message = msg_headers()
headers_message.headers = [CBlockHeader(b) for b in new_blocks]
self.send_message(headers_message)
class AssumeValidTest(SchezuancoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 3
def setup_network(self):
self.add_nodes(3)
# Start node0. We don't start the other nodes yet since
# we need to pre-mine a block with an invalid transaction
# signature so we can pass in the block hash as assumevalid.
self.start_node(0)
def send_blocks_until_disconnected(self, node):
"""Keep sending blocks to the node until we're disconnected."""
for i in range(len(self.blocks)):
if not node.connection:
break
try:
node.send_message(msg_block(self.blocks[i]))
except IOError as e:
assert str(e) == 'Not connected, no pushbuf'
break
def assert_blockchain_height(self, node, height):
"""Wait until the blockchain is no longer advancing and verify it's reached the expected height."""
last_height = node.getblock(node.getbestblockhash())['height']
timeout = 10
while True:
time.sleep(0.25)
current_height = node.getblock(node.getbestblockhash())['height']
if current_height != last_height:
last_height = current_height
if timeout < 0:
assert False, "blockchain too short after timeout: %d" % current_height
timeout - 0.25
continue
elif current_height > height:
assert False, "blockchain too long: %d" % current_height
elif current_height == height:
break
def run_test(self):
# Connect to node0
node0 = BaseNode()
connections = []
connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], node0))
node0.add_connection(connections[0])
NetworkThread().start() # Start up network handling in another thread
node0.wait_for_verack()
# Build the blockchain
self.tip = int(self.nodes[0].getbestblockhash(), 16)
self.block_time = self.nodes[0].getblock(self.nodes[0].getbestblockhash())['time'] + 1
self.blocks = []
# Get a pubkey for the coinbase TXO
coinbase_key = CECKey()
coinbase_key.set_secretbytes(b"horsebattery")
coinbase_pubkey = coinbase_key.get_pubkey()
# Create the first block with a coinbase output to our key
height = 1
block = create_block(self.tip, create_coinbase(height, coinbase_pubkey), self.block_time)
self.blocks.append(block)
self.block_time += 1
block.solve()
# Save the coinbase for later
self.block1 = block
self.tip = block.sha256
height += 1
# Bury the block 100 deep so the coinbase output is spendable
for i in range(100):
block = create_block(self.tip, create_coinbase(height), self.block_time)
block.solve()
self.blocks.append(block)
self.tip = block.sha256
self.block_time += 1
height += 1
# Create a transaction spending the coinbase output with an invalid (null) signature
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.block1.vtx[0].sha256, 0), scriptSig=b""))
tx.vout.append(CTxOut(49 * 100000000, CScript([OP_TRUE])))
tx.calc_sha256()
block102 = create_block(self.tip, create_coinbase(height), self.block_time)
self.block_time += 1
block102.vtx.extend([tx])
block102.hashMerkleRoot = block102.calc_merkle_root()
block102.rehash()
block102.solve()
self.blocks.append(block102)
self.tip = block102.sha256
self.block_time += 1
height += 1
# Bury the assumed valid block 2100 deep
for i in range(2100):
block = create_block(self.tip, create_coinbase(height), self.block_time)
block.nVersion = 4
block.solve()
self.blocks.append(block)
self.tip = block.sha256
self.block_time += 1
height += 1
# Start node1 and node2 with assumevalid so they accept a block with a bad signature.
self.start_node(1, extra_args=["-assumevalid=" + hex(block102.sha256)])
node1 = BaseNode() # connects to node1
connections.append(NodeConn('127.0.0.1', p2p_port(1), self.nodes[1], node1))
node1.add_connection(connections[1])
node1.wait_for_verack()
self.start_node(2, extra_args=["-assumevalid=" + hex(block102.sha256)])
node2 = BaseNode() # connects to node2
connections.append(NodeConn('127.0.0.1', p2p_port(2), self.nodes[2], node2))
node2.add_connection(connections[2])
node2.wait_for_verack()
# send header lists to all three nodes
node0.send_header_for_blocks(self.blocks[0:2000])
node0.send_header_for_blocks(self.blocks[2000:])
node1.send_header_for_blocks(self.blocks[0:2000])
node1.send_header_for_blocks(self.blocks[2000:])
node2.send_header_for_blocks(self.blocks[0:200])
# Send blocks to node0. Block 102 will be rejected.
self.send_blocks_until_disconnected(node0)
self.assert_blockchain_height(self.nodes[0], 101)
# Send all blocks to node1. All blocks will be accepted.
for i in range(2202):
node1.send_message(msg_block(self.blocks[i]))
# Syncing 2200 blocks can take a while on slow systems. Give it plenty of time to sync.
node1.sync_with_ping(120)
assert_equal(self.nodes[1].getblock(self.nodes[1].getbestblockhash())['height'], 2202)
# Send blocks to node2. Block 102 will be rejected.
self.send_blocks_until_disconnected(node2)
self.assert_blockchain_height(self.nodes[2], 101)
if __name__ == '__main__':
AssumeValidTest().main()
| 41.546798 | 107 | 0.62912 |
4a1d63146f2d8c6151e472c4aff0bd6027fde863 | 12,530 | py | Python | baselines/logger.py | anonymous-supplement/asf | e1a6f6c8e83c49a6e44d01d4bcdf4b79a80248cf | [
"MIT"
] | 2 | 2020-07-06T11:24:03.000Z | 2021-02-10T04:32:43.000Z | baselines/logger.py | NeteaseFuxiRL/asf | e1a6f6c8e83c49a6e44d01d4bcdf4b79a80248cf | [
"MIT"
] | null | null | null | baselines/logger.py | NeteaseFuxiRL/asf | e1a6f6c8e83c49a6e44d01d4bcdf4b79a80248cf | [
"MIT"
] | null | null | null | import datetime
import json
import os
import os.path as osp
import shutil
import sys
import tempfile
import time
LOG_OUTPUT_FORMATS = ['stdout', 'log', 'csv']
# Also valid: json, tensorboard
DEBUG = 10
INFO = 20
WARN = 30
ERROR = 40
DISABLED = 50
class KVWriter(object):
def writekvs(self, kvs):
raise NotImplementedError
class SeqWriter(object):
def writeseq(self, seq):
raise NotImplementedError
class HumanOutputFormat(KVWriter, SeqWriter):
def __init__(self, filename_or_file):
if isinstance(filename_or_file, str):
self.file = open(filename_or_file, 'wt')
self.own_file = True
else:
assert hasattr(filename_or_file, 'read'), 'expected file or str, got %s' % filename_or_file
self.file = filename_or_file
self.own_file = False
def writekvs(self, kvs):
# Create strings for printing
key2str = {}
for (key, val) in sorted(kvs.items()):
if isinstance(val, float):
valstr = '%-8.3g' % (val,)
else:
valstr = str(val)
key2str[self._truncate(key)] = self._truncate(valstr)
# Find max widths
if len(key2str) == 0:
print('WARNING: tried to write empty key-value dict')
return
else:
keywidth = max(map(len, key2str.keys()))
valwidth = max(map(len, key2str.values()))
# Write out the data
dashes = '-' * (keywidth + valwidth + 7)
lines = [dashes]
for (key, val) in sorted(key2str.items()):
lines.append('| %s%s | %s%s |' % (
key,
' ' * (keywidth - len(key)),
val,
' ' * (valwidth - len(val)),
))
lines.append(dashes)
self.file.write('\n'.join(lines) + '\n')
# Flush the output to the file
self.file.flush()
def _truncate(self, s):
return s[:20] + '...' if len(s) > 23 else s
def writeseq(self, seq):
for arg in seq:
self.file.write(arg)
self.file.write('\n')
self.file.flush()
def close(self):
if self.own_file:
self.file.close()
class JSONOutputFormat(KVWriter):
def __init__(self, filename):
self.file = open(filename, 'wt')
def writekvs(self, kvs):
for k, v in sorted(kvs.items()):
if hasattr(v, 'dtype'):
v = v.tolist()
kvs[k] = float(v)
self.file.write(json.dumps(kvs) + '\n')
self.file.flush()
def close(self):
self.file.close()
class CSVOutputFormat(KVWriter):
def __init__(self, filename):
self.file = open(filename, 'w+t')
self.keys = []
self.sep = ','
def writekvs(self, kvs):
# Add our current row to the history
extra_keys = kvs.keys() - self.keys
if extra_keys:
self.keys.extend(extra_keys)
self.file.seek(0)
lines = self.file.readlines()
self.file.seek(0)
for (i, k) in enumerate(self.keys):
if i > 0:
self.file.write(',')
self.file.write(k)
self.file.write('\n')
for line in lines[1:]:
self.file.write(line[:-1])
self.file.write(self.sep * len(extra_keys))
self.file.write('\n')
for (i, k) in enumerate(self.keys):
if i > 0:
self.file.write(',')
v = kvs.get(k)
if v:
self.file.write(str(v))
self.file.write('\n')
self.file.flush()
def close(self):
self.file.close()
class TensorBoardOutputFormat(KVWriter):
"""
Dumps key/value pairs into TensorBoard's numeric format.
"""
def __init__(self, dir):
os.makedirs(dir, exist_ok=True)
self.dir = dir
self.step = 1
prefix = 'events'
path = osp.join(osp.abspath(dir), prefix)
import tensorflow as tf
from tensorflow.python import pywrap_tensorflow
from tensorflow.core.util import event_pb2
from tensorflow.python.util import compat
self.tf = tf
self.event_pb2 = event_pb2
self.pywrap_tensorflow = pywrap_tensorflow
self.writer = pywrap_tensorflow.EventsWriter(compat.as_bytes(path))
def writekvs(self, kvs):
def summary_val(k, v):
kwargs = {'tag': k, 'simple_value': float(v)}
return self.tf.Summary.Value(**kwargs)
summary = self.tf.Summary(value=[summary_val(k, v) for k, v in kvs.items()])
event = self.event_pb2.Event(wall_time=time.time(), summary=summary)
event.step = self.step # is there any reason why you'd want to specify the step?
self.writer.WriteEvent(event)
self.writer.Flush()
self.step += 1
def close(self):
if self.writer:
self.writer.Close()
self.writer = None
def make_output_format(format, ev_dir):
# from mpi4py import MPI
os.makedirs(ev_dir, exist_ok=True)
# rank = MPI.COMM_WORLD.Get_rank()
rank=0
if format == 'stdout':
return HumanOutputFormat(sys.stdout)
elif format == 'log':
suffix = "" if rank == 0 else ("-mpi%03i" % rank)
return HumanOutputFormat(osp.join(ev_dir, 'log%s.txt' % suffix))
elif format == 'json':
assert rank == 0
return JSONOutputFormat(osp.join(ev_dir, 'progress.json'))
elif format == 'csv':
assert rank == 0
return CSVOutputFormat(osp.join(ev_dir, 'progress.csv'))
elif format == 'tensorboard':
assert rank == 0
return TensorBoardOutputFormat(osp.join(ev_dir, 'tb'))
else:
raise ValueError('Unknown format specified: %s' % (format,))
# ================================================================
# API
# ================================================================
def logkv(key, val):
"""
Log a value of some diagnostic
Call this once for each diagnostic quantity, each iteration
"""
Logger.CURRENT.logkv(key, val)
def logkvs(d):
"""
Log a dictionary of key-value pairs
"""
for (k, v) in d.items():
logkv(k, v)
def dumpkvs():
"""
Write all of the diagnostics from the current iteration
level: int. (see logger.py docs) If the global logger level is higher than
the level argument here, don't print to stdout.
"""
Logger.CURRENT.dumpkvs()
def getkvs():
return Logger.CURRENT.name2val
def log(*args, level=INFO):
"""
Write the sequence of args, with no separators, to the console and output files (if you've configured an output file).
"""
Logger.CURRENT.log(*args, level=level)
def debug(*args):
log(*args, level=DEBUG)
def info(*args):
log(*args, level=INFO)
def warn(*args):
log(*args, level=WARN)
def error(*args):
log(*args, level=ERROR)
def set_level(level):
"""
Set logging threshold on current logger.
"""
Logger.CURRENT.set_level(level)
def get_dir():
"""
Get directory that log files are being written to.
will be None if there is no output directory (i.e., if you didn't call start)
"""
return Logger.CURRENT.get_dir()
record_tabular = logkv
dump_tabular = dumpkvs
# ================================================================
# Backend
# ================================================================
class Logger(object):
DEFAULT = None # A logger with no output files. (See right below class definition)
# So that you can still log to the terminal without setting up any output files
CURRENT = None # Current logger being used by the free functions above
def __init__(self, dir, output_formats):
self.name2val = {} # values this iteration
self.level = INFO
self.dir = dir
self.output_formats = output_formats
# Logging API, forwarded
# ----------------------------------------
def logkv(self, key, val):
self.name2val[key] = val
def dumpkvs(self):
if self.level == DISABLED: return
for fmt in self.output_formats:
if isinstance(fmt, KVWriter):
fmt.writekvs(self.name2val)
self.name2val.clear()
def log(self, *args, level=INFO):
if self.level <= level:
self._do_log(args)
# Configuration
# ----------------------------------------
def set_level(self, level):
self.level = level
def get_dir(self):
return self.dir
def close(self):
for fmt in self.output_formats:
fmt.close()
# Misc
# ----------------------------------------
def _do_log(self, args):
for fmt in self.output_formats:
if isinstance(fmt, SeqWriter):
fmt.writeseq(map(str, args))
Logger.DEFAULT = Logger.CURRENT = Logger(dir=None, output_formats=[HumanOutputFormat(sys.stdout)])
def configure(dir=None, format_strs=None):
print("dir is ", dir)
if dir is None:
dir = os.getenv('OPENAI_LOGDIR')
if dir is None:
dir = osp.join(tempfile.gettempdir(),
datetime.datetime.now().strftime("openai-%Y-%m-%d-%H-%M-%S-%f"))
assert isinstance(dir, str)
os.makedirs(dir, exist_ok=True)
if format_strs is None:
strs = os.getenv('OPENAI_LOG_FORMAT')
format_strs = strs.split(',') if strs else LOG_OUTPUT_FORMATS
output_formats = [make_output_format(f, dir) for f in format_strs]
Logger.CURRENT = Logger(dir=dir, output_formats=output_formats)
log('Logging to %s' % dir)
def reset():
if Logger.CURRENT is not Logger.DEFAULT:
Logger.CURRENT.close()
Logger.CURRENT = Logger.DEFAULT
log('Reset logger')
class scoped_configure(object):
def __init__(self, dir=None, format_strs=None):
self.dir = dir
self.format_strs = format_strs
self.prevlogger = None
def __enter__(self):
self.prevlogger = Logger.CURRENT
configure(dir=self.dir, format_strs=self.format_strs)
def __exit__(self, *args):
Logger.CURRENT.close()
Logger.CURRENT = self.prevlogger
# ================================================================
def _demo():
info("hi")
debug("shouldn't appear")
set_level(DEBUG)
debug("should appear")
dir = "/tmp/testlogging"
if os.path.exists(dir):
shutil.rmtree(dir)
configure(dir=dir)
logkv("a", 3)
logkv("b", 2.5)
dumpkvs()
logkv("b", -2.5)
logkv("a", 5.5)
dumpkvs()
info("^^^ should see a = 5.5")
logkv("b", -2.5)
dumpkvs()
logkv("a", "longasslongasslongasslongasslongasslongassvalue")
dumpkvs()
# ================================================================
# Readers
# ================================================================
def read_json(fname):
import pandas
ds = []
with open(fname, 'rt') as fh:
for line in fh:
ds.append(json.loads(line))
return pandas.DataFrame(ds)
def read_csv(fname):
import pandas
return pandas.read_csv(fname, index_col=None, comment='#')
def read_tb(path):
"""
path : a tensorboard file OR a directory, where we will find all TB files
of the form events.*
"""
import pandas
import numpy as np
from glob import glob
from collections import defaultdict
import tensorflow as tf
if osp.isdir(path):
fnames = glob(osp.join(path, "events.*"))
elif osp.basename(path).startswith("events."):
fnames = [path]
else:
raise NotImplementedError("Expected tensorboard file or directory containing them. Got %s" % path)
tag2pairs = defaultdict(list)
maxstep = 0
for fname in fnames:
for summary in tf.train.summary_iterator(fname):
if summary.step > 0:
for v in summary.summary.value:
pair = (summary.step, v.simple_value)
tag2pairs[v.tag].append(pair)
maxstep = max(summary.step, maxstep)
data = np.empty((maxstep, len(tag2pairs)))
data[:] = np.nan
tags = sorted(tag2pairs.keys())
for (colidx, tag) in enumerate(tags):
pairs = tag2pairs[tag]
for (step, value) in pairs:
data[step - 1, colidx] = value
return pandas.DataFrame(data, columns=tags)
if __name__ == "__main__":
_demo()
| 27.599119 | 122 | 0.559537 |
4a1d63909ecde2f82c44ce064e438905a4ea6bab | 2,080 | py | Python | pyjswidgets/pyjamas/DOM.pyqt4.py | takipsizad/pyjs | 54db0ba6747aca744f9f3c3e985a17e913dfb951 | [
"ECL-2.0",
"Apache-2.0"
] | 739 | 2015-01-01T02:05:11.000Z | 2022-03-30T15:26:16.000Z | pyjswidgets/pyjamas/DOM.pyqt4.py | takipsizad/pyjs | 54db0ba6747aca744f9f3c3e985a17e913dfb951 | [
"ECL-2.0",
"Apache-2.0"
] | 33 | 2015-03-25T23:17:04.000Z | 2021-08-19T08:25:22.000Z | pyjswidgets/pyjamas/DOM.pyqt4.py | takipsizad/pyjs | 54db0ba6747aca744f9f3c3e985a17e913dfb951 | [
"ECL-2.0",
"Apache-2.0"
] | 167 | 2015-01-01T22:27:47.000Z | 2022-03-17T13:29:19.000Z | def setStyleAttribute(element, name, value):
element.setStyleAttribute(name, value)
def insertChild(element, insert_element, beforeIndex):
element.insertChild(insert_element, beforeIndex)
def getParent(element):
return element.getParent()
def setAttribute(element, key, value):
element.setAttribute(key, value)
def getAttribute(element, key):
return element.getAttribute(key)
def setElemAttribute(element, key, value):
element.setAttribute(key, value)
def setInnerHTML(element, html):
element.setInnerHTML(html)
def setInnerText(element, text):
element.setInnerText(text)
def getInnerHTML(element):
return element.getInnerHTML()
def sinkEvents(element, bits):
mask = getEventsSunk(element) ^ bits
eventbitsmap[element] = bits
if not mask:
return
bits = mask
if not bits:
return
events = []
if bits & ONBLUR:
events += ["onblur"]
if bits & ONCHANGE:
events += ["onchange"]
if bits & ONCLICK:
events += ["onclick"]
if bits & ONCONTEXTMENU:
events += ["oncontextmenu"]
if bits & ONDBLCLICK:
events += ["ondblclick"]
if bits & ONERROR:
events += ["onerror"]
if bits & ONFOCUS:
events += ["onfocus"]
if bits & ONKEYDOWN:
events += ["onkeydown"]
if bits & ONKEYPRESS:
events += ["onkeypress"]
if bits & ONKEYUP:
events += ["onkeyup"]
if bits & ONLOAD:
events += ["onload"]
if bits & ONLOSECAPTURE:
events += ["onclosecapture"]
if bits & ONMOUSEDOWN:
events += ["onmousedown"]
if bits & ONMOUSEMOVE:
events += ["onmousemove"]
if bits & ONMOUSEOUT:
events += ["onmouseout"]
if bits & ONMOUSEOVER:
events += ["onmouseover"]
if bits & ONMOUSEUP:
events += ["onmouseup"]
if bits & ONSCROLL:
events += ["onscroll"]
mf = get_main_frame()
for e in events:
dispatch = lambda elem, ev: _dispatchEvent(elem, ev, None)
mf.addEventListener(element, e, dispatch)
| 23.370787 | 66 | 0.616827 |
4a1d643be16e11b8ae8602d672f9dd475d94ff6d | 1,521 | py | Python | integration-test/704-exclude-null-values-for-buildings.py | rinnyB/vector-datasource | 024909ed8245a4ad4a25c908413ba3602de6c335 | [
"MIT"
] | null | null | null | integration-test/704-exclude-null-values-for-buildings.py | rinnyB/vector-datasource | 024909ed8245a4ad4a25c908413ba3602de6c335 | [
"MIT"
] | 2 | 2021-03-31T20:22:37.000Z | 2021-12-13T20:50:11.000Z | integration-test/704-exclude-null-values-for-buildings.py | rinnyB/vector-datasource | 024909ed8245a4ad4a25c908413ba3602de6c335 | [
"MIT"
] | null | null | null | # -*- encoding: utf-8 -*-
from shapely.wkt import loads as wkt_loads
import dsl
from . import FixtureTest
class ExcludeNullValuesForBuildings(FixtureTest):
def test_alcatraz(self):
# way 128245373 - alcatraz prison main building
self.generate_fixtures(dsl.way(128245373, wkt_loads('POLYGON ((-122.423257444719 37.82664991474569, -122.423191867704 37.82669851905227, -122.423154767282 37.82672675920409, -122.422982739905 37.82686079334499, -122.422942405549 37.82689236831889, -122.422807029436 37.82678245896928, -122.422727977691 37.82684617661089, -122.42232652059 37.82653000033398, -122.422618742552 37.82630507159251, -122.422648297125 37.8262838558809, -122.42277495958 37.82639369501761, -122.422865689424 37.82632579060928, -122.423257444719 37.82664991474569))'), {u'building': u'yes', u'source': u'openstreetmap.org', u'way_area': u'4693.31', u'name': u'Main Prison', u'alt_name': u'Cellhouse'})) # noqa
self.assert_has_feature(
16, 10481, 25319, 'buildings',
{'kind': 'building'})
# but that same building should not have any "null" values in it
with self.features_in_tile_layer(
16, 10481, 25319, 'buildings') as features:
for f in features:
for k, v in f['properties'].items():
self.assertFalse(
v is None,
'%r is null, but there should be no null values in '
'feature %r' % (k, f['properties']))
| 58.5 | 694 | 0.661407 |
4a1d6489ab456eee6a6305e893ae83ec9a6ec9bd | 4,523 | py | Python | pythonGA/tsp_ga.py | shiftrepo/OperationBreakingDawn | 94a730ba67fa72c4305ec854e22ee6b028068254 | [
"Unlicense"
] | 1 | 2022-02-04T09:46:04.000Z | 2022-02-04T09:46:04.000Z | pythonGA/tsp_ga.py | shiftrepo/OperationBreakingDawn | 94a730ba67fa72c4305ec854e22ee6b028068254 | [
"Unlicense"
] | 42 | 2019-01-03T04:50:01.000Z | 2021-09-23T23:23:39.000Z | pythonGA/tsp_ga.py | shiftrepo/OperationBreakingDawn | 94a730ba67fa72c4305ec854e22ee6b028068254 | [
"Unlicense"
] | null | null | null | #!python3.6.8
from time import sleep
import sys
import random
import math
import copy
import tkinter
SCREEN_WIDTH = 150
SCREEN_HEIGHT = 150
POINTS_SIZE = 50
POPULATION_SIZE = 20
GENERATION = 2000
MUTATE = 0.3
SELECT_RATE = 0.5
def calc_distance(points, route):
distance = 0
for i in range(POINTS_SIZE):
(x0, y0) = points[route[i]]
if i == POINTS_SIZE - 1:
(x1, y1) = points[route[0]]
else:
(x1, y1) = points[route[i+1]]
distance = distance + math.sqrt((x0 - x1) * (x0 - x1) + (y0 - y1) * (y0 - y1))
return distance
def sort_fitness(points, population):
fp = []
for individual in population:
fitness = calc_distance(points, individual)
fp.append((fitness, individual))
fp.sort()
sorted_population = []
for fitness, individual in fp:
sorted_population.append(individual)
return sorted_population
def selection(points, population):
sorted_population = sort_fitness(points, population)
n = int(POPULATION_SIZE * SELECT_RATE)
return sorted_population[0:n]
def crossover(ind1, ind2):
r1 = random.randint(0, POINTS_SIZE - 1)
r2 = random.randint(r1 + 1, POINTS_SIZE)
flag = [0] * POINTS_SIZE
ind = [-1] * POINTS_SIZE
for i in range(r1, r2):
city = ind2[i]
ind[i] = city
flag[city] = 1
for i in list(range(0, r1)) + list(range(r2, POINTS_SIZE)):
city = ind1[i]
if flag[city] == 0:
ind[i] = city
flag[city] = 1
for i in range(0, POINTS_SIZE):
if ind[i] == -1:
for j in range(0, POINTS_SIZE):
city = ind1[j]
if flag[city] == 0:
ind[i] = city
flag[city] = 1
break
return ind
def mutation(ind1):
ind2 = copy.deepcopy(ind1)
if random.random() < MUTATE:
city1 = random.randint(0, POINTS_SIZE - 1)
city2 = random.randint(0, POINTS_SIZE - 1)
if city1 > city2:
city1, city2 = city2, city1
ind2[city1:city2+1] = reversed(ind1[city1:city2+1])
return ind2
root = tkinter.Tk()
root.title(u"TSP GA")
width_size = 5
height_size = math.ceil(POPULATION_SIZE / width_size)
window_width = SCREEN_WIDTH * width_size
window_height = SCREEN_HEIGHT * height_size
root.geometry(str(window_width) + "x" + str(window_height))
canvas_list = []
for i in range(POPULATION_SIZE):
canvas = tkinter.Canvas(root, width=SCREEN_WIDTH, height=SCREEN_HEIGHT)
cx = i % width_size * SCREEN_WIDTH
cy = int(i / width_size) * SCREEN_HEIGHT
canvas.place(x=cx, y=cy)
canvas_list.append(canvas)
points = []
for i in range(POINTS_SIZE):
points.append((random.random(), random.random()))
population = []
for i in range(POPULATION_SIZE):
individual = list(range(POINTS_SIZE))
random.shuffle(individual)
population.append(individual)
for generation in range(GENERATION):
root.title(u"TSP GA (" + str(generation + 1) + u") generation")
population = selection(points, population)
n = POPULATION_SIZE - len(population)
for i in range(n):
r1 = random.randint(0, len(population) - 1)
r2 = random.randint(0, len(population) - 1)
individual = crossover(population[r1], population[r2])
individual = mutation(individual)
population.append(individual)
sort_fitness(points, population)
for ind in range(POPULATION_SIZE):
canvas = canvas_list[ind]
route = population[ind]
dist = calc_distance(points, route)
canvas.delete('all')
for i in range(POINTS_SIZE):
(x0, y0) = points[route[i]]
if i == POINTS_SIZE - 1:
(x1, y1) = points[route[0]]
else:
(x1, y1) = points[route[i + 1]]
canvas.create_line(x0 * SCREEN_WIDTH, \
y0 * SCREEN_HEIGHT, \
x1 * SCREEN_WIDTH, \
y1 * SCREEN_HEIGHT, \
fill="black", width=1)
canvas.create_oval(x0 * SCREEN_WIDTH - 2, \
y0 * SCREEN_HEIGHT - 2, \
x0 * SCREEN_WIDTH + 2, \
y0 * SCREEN_HEIGHT + 2, \
fill="blue")
canvas.create_rectangle(0, 0, SCREEN_WIDTH - 1, SCREEN_HEIGHT - 1, outline="gray", width=1)
canvas.create_text(5, 5, text = "{:.2f}".format(dist), anchor = "nw", fill = "red")
canvas.update()
root.mainloop() | 26.144509 | 99 | 0.586115 |
4a1d64b5e8b245c0ecf1b93fe5567cd4200b51df | 4,989 | py | Python | angr/exploration_techniques/spiller.py | mariusmue/angr | f8304c4b1f0097a721a6692b02a45cabaae137c5 | [
"BSD-2-Clause"
] | 2 | 2018-05-02T17:41:36.000Z | 2020-05-18T02:49:16.000Z | angr/exploration_techniques/spiller.py | mariusmue/angr | f8304c4b1f0097a721a6692b02a45cabaae137c5 | [
"BSD-2-Clause"
] | null | null | null | angr/exploration_techniques/spiller.py | mariusmue/angr | f8304c4b1f0097a721a6692b02a45cabaae137c5 | [
"BSD-2-Clause"
] | 1 | 2022-02-10T02:29:38.000Z | 2022-02-10T02:29:38.000Z | import logging
l = logging.getLogger("angr.exploration_techniques.spiller")
import ana
from . import ExplorationTechnique
class SpilledState(ana.Storable):
def __init__(self, state):
self.state = state
def _ana_getstate(self):
return (self.state,)
def _ana_setstate(self, s):
self.state = s[0]
class Spiller(ExplorationTechnique):
"""
Automatically spill states out. It can spill out states to a different stash, spill
them out to ANA, or first do the former and then (after enough states) the latter.
"""
def __init__(
self,
src_stash="active", min=5, max=10, #pylint:disable=redefined-builtin
staging_stash="spill_stage", staging_min=10, staging_max=20,
pickle_callback=None, unpickle_callback=None, priority_key=None
):
"""
Initializes the spiller.
@param max: the number of states that are *not* spilled
@param src_stash: the stash from which to spill states (default: active)
@param staging_stash: the stash *to* which to spill states (default: "spill_stage")
@param staging_max: the number of states that can be in the staging stash before things get spilled to ANA (default: None. If staging_stash is set, then this means unlimited, and ANA will not be used).
@param priority_key: a function that takes a state and returns its numberical priority (MAX_INT is lowest priority). By default, self.state_priority will be used, which prioritizes by object ID.
"""
super(Spiller, self).__init__()
self.max = max
self.min = min
self.src_stash = src_stash
self.staging_stash = staging_stash
self.staging_max = staging_max
self.staging_min = staging_min
# various callbacks
self.priority_key = priority_key
self.unpickle_callback = unpickle_callback
self.pickle_callback = pickle_callback
# tracking of pickled stuff
self._pickled_states = [ ]
self._ever_pickled = 0
self._ever_unpickled = 0
def _unpickle(self, n):
self._pickled_states.sort()
unpickled = [ SpilledState.ana_load(pid).state for _,pid in self._pickled_states[:n] ]
self._pickled_states[:n] = [ ]
self._ever_unpickled += len(unpickled)
if self.unpickle_callback:
map(self.unpickle_callback, unpickled)
return unpickled
def _get_priority(self, state):
return (self.priority_key or self.state_priority)(state)
def _pickle(self, states):
if self.pickle_callback:
map(self.pickle_callback, states)
wrappers = [ SpilledState(state) for state in states ]
self._ever_pickled += len(states)
for w in wrappers:
w.make_uuid()
self._pickled_states += [ (self._get_priority(w.state), w.ana_store()) for w in wrappers ]
def step(self, simgr, stash=None, **kwargs):
simgr = simgr.step(stash=stash, **kwargs)
l.debug("STASH STATUS: active: %d, staging: %d", len(simgr.stashes[self.src_stash]), len(simgr.stashes[self.staging_stash]))
states = simgr.stashes[self.src_stash]
staged_states = simgr.stashes.setdefault(self.staging_stash, [ ]) if self.staging_stash else [ ]
if len(states) < self.min:
missing = (self.max + self.min) / 2 - len(states)
l.debug("Too few states (%d/%d) in stash %s.", len(states), self.min, self.src_stash)
if self.staging_stash:
l.debug("... retrieving states from staging stash (%s)", self.staging_stash)
staged_states.sort(key=self.priority_key or self.state_priority)
states += staged_states[:missing]
staged_states[:missing] = [ ]
else:
l.debug("... staging stash disabled; unpickling states")
states += self._unpickle(missing)
if len(states) > self.max:
l.debug("Too many states (%d/%d) in stash %s", len(states), self.max, self.src_stash)
states.sort(key=self.priority_key or self.state_priority)
staged_states += states[self.max:]
states[self.max:] = [ ]
# if we have too few staged states, unpickle up to halfway between max and min
if len(staged_states) < self.staging_min:
l.debug("Too few states in staging stash (%s)", self.staging_stash)
staged_states += self._unpickle((self.staging_min + self.staging_max) / 2 - len(staged_states))
if len(staged_states) > self.staging_max:
l.debug("Too many states in staging stash (%s)", self.staging_stash)
self._pickle(staged_states[self.staging_max:])
staged_states[self.staging_max:] = [ ]
simgr.stashes[self.src_stash] = states
simgr.stashes[self.staging_stash] = staged_states
return simgr
@staticmethod
def state_priority(state):
return id(state)
| 41.231405 | 209 | 0.644217 |
4a1d65dfd82074b1a8bbe95dbc21e9e390c68675 | 528 | py | Python | Algo and DSA/LeetCode-Solutions-master/Python/minimum-score-triangulation-of-polygon.py | Sourav692/FAANG-Interview-Preparation | f523e5c94d582328b3edc449ea16ac6ab28cdc81 | [
"Unlicense"
] | 3,269 | 2018-10-12T01:29:40.000Z | 2022-03-31T17:58:41.000Z | Algo and DSA/LeetCode-Solutions-master/Python/minimum-score-triangulation-of-polygon.py | Sourav692/FAANG-Interview-Preparation | f523e5c94d582328b3edc449ea16ac6ab28cdc81 | [
"Unlicense"
] | 53 | 2018-12-16T22:54:20.000Z | 2022-02-25T08:31:20.000Z | Algo and DSA/LeetCode-Solutions-master/Python/minimum-score-triangulation-of-polygon.py | Sourav692/FAANG-Interview-Preparation | f523e5c94d582328b3edc449ea16ac6ab28cdc81 | [
"Unlicense"
] | 1,236 | 2018-10-12T02:51:40.000Z | 2022-03-30T13:30:37.000Z | # Time: O(n^3)
# Space: O(n^2)
class Solution(object):
def minScoreTriangulation(self, A):
"""
:type A: List[int]
:rtype: int
"""
dp = [[0 for _ in xrange(len(A))] for _ in xrange(len(A))]
for p in xrange(3, len(A)+1):
for i in xrange(len(A)-p+1):
j = i+p-1;
dp[i][j] = float("inf")
for k in xrange(i+1, j):
dp[i][j] = min(dp[i][j], dp[i][k]+dp[k][j] + A[i]*A[j]*A[k])
return dp[0][-1]
| 29.333333 | 80 | 0.416667 |
4a1d6689ba9ac5871f9e1d98f0ea06fd69e1af09 | 1,633 | py | Python | python-scripts/contours/Find-contours-of-depressions.py | JiriVales/orienteering-tools | 3bac9e75c2116be35d3a59ffa203e8e736a2b66c | [
"BSD-3-Clause"
] | 5 | 2018-05-31T07:41:51.000Z | 2020-04-16T09:20:34.000Z | python-scripts/contours/Find-contours-of-depressions.py | JiriVales/automatic-creation-orienteering-map | 3bac9e75c2116be35d3a59ffa203e8e736a2b66c | [
"BSD-3-Clause"
] | null | null | null | python-scripts/contours/Find-contours-of-depressions.py | JiriVales/automatic-creation-orienteering-map | 3bac9e75c2116be35d3a59ffa203e8e736a2b66c | [
"BSD-3-Clause"
] | null | null | null | ##Find contours of depressions=name
##numbermetersofenlargethedepressionarea=number9.0
##contours=vector
##dem=raster
##contoursofdepressions=output vector
outputs_MODELERFIND-DEPRESSION-AREA_1=processing.runalg('modeler:find-depression-area', dem,None)
outputs_QGISFIELDCALCULATOR_2=processing.runalg('qgis:fieldcalculator', contours,'depress',1,1.0,0.0,True,'0',None)
outputs_QGISPOLYGONSTOLINES_1=processing.runalg('qgis:polygonstolines', outputs_MODELERFIND-DEPRESSION-AREA_1['OUTPUT_ALGQGISEXTRACTBYATTRIBUTE_1'],None)
outputs_QGISFIXEDDISTANCEBUFFER_1=processing.runalg('qgis:fixeddistancebuffer', outputs_QGISPOLYGONSTOLINES_1['OUTPUT'],numbermetersofenlargethedepressionarea,5.0,False,None)
outputs_QGISMERGEVECTORLAYERS_1=processing.runalg('qgis:mergevectorlayers', [outputs_MODELERFIND-DEPRESSION-AREA_1['OUTPUT_ALGQGISEXTRACTBYATTRIBUTE_1'],outputs_QGISFIXEDDISTANCEBUFFER_1['OUTPUT']],None)
outputs_QGISDISSOLVE_1=processing.runalg('qgis:dissolve', outputs_QGISMERGEVECTORLAYERS_1['OUTPUT'],True,None,None)
outputs_QGISSELECTBYLOCATION_1=processing.runalg('qgis:selectbylocation', contours,outputs_QGISDISSOLVE_1['OUTPUT'],['within'],0.0,0)
outputs_QGISFIELDCALCULATOR_1=processing.runalg('qgis:fieldcalculator', outputs_QGISSELECTBYLOCATION_1['OUTPUT'],'depress',1,1.0,0.0,True,'1',None)
outputs_QGISMERGEVECTORLAYERS_2=processing.runalg('qgis:mergevectorlayers', [outputs_QGISFIELDCALCULATOR_1['OUTPUT_LAYER'],outputs_QGISFIELDCALCULATOR_2['OUTPUT_LAYER']],None)
outputs_QGISDELETEDUPLICATEGEOMETRIES_1=processing.runalg('qgis:deleteduplicategeometries', outputs_QGISMERGEVECTORLAYERS_2['OUTPUT'],contoursofdepressions) | 108.866667 | 203 | 0.861604 |
4a1d66a3502cb5bf125e8129cbcca624eac74cd2 | 5,121 | py | Python | src/backuper/diskremotestorage.py | EduardoLemos567/PyBackuper | c339ffbb25690597e64bcfce5347d8d6e9dc27f0 | [
"MIT"
] | null | null | null | src/backuper/diskremotestorage.py | EduardoLemos567/PyBackuper | c339ffbb25690597e64bcfce5347d8d6e9dc27f0 | [
"MIT"
] | null | null | null | src/backuper/diskremotestorage.py | EduardoLemos567/PyBackuper | c339ffbb25690597e64bcfce5347d8d6e9dc27f0 | [
"MIT"
] | null | null | null | """
:license:
license is described in the LICENSE file provided.
A copy can be accessed in: https://github.com/EduardoLemos567/PyBackuper/blob/master/LICENSE
:author:
Eduardo Lemos de Moraes
"""
from . import remotestorage
class DiskRemoteStorage(remotestorage.RemoteStorage):
FILES_FOLDERNAME = "files"
def __init__(self, app):
super().__init__(app)
def setup(self):
files_folder = self.app.config.remote_folder_path / "files"
if not files_folder.exists():
files_folder.mkdir()
return True
def has_files(self):
files_folder = self.app.config.remote_folder_path / self.FILES_FOLDERNAME
for node in files_folder.iterdir():
if node.is_file():
return True
return False
def has_manifest(self):
return (
self.app.config.remote_folder_path / self.app.MANIFEST_FILENAME
).exists()
def set_manifest(self, manifest_instance):
self.app.local_storage.pickle_pack(
manifest_instance,
self.app.config.remote_folder_path / self.app.MANIFEST_FILENAME,
)
return True
def get_manifest(self):
if (self.app.config.remote_folder_path / self.app.MANIFEST_FILENAME).exists():
return self.app.local_storage.unpack_unpickle(
self.app.config.remote_folder_path / self.app.MANIFEST_FILENAME
)
else:
return None
def delete_manifest(self):
manifest_path = self.app.config.remote_folder_path / self.app.MANIFEST_FILENAME
if manifest_path.exists():
manifest_path.unlink()
return True
return False
def has_salt(self):
return (self.app.config.remote_folder_path / self.app.SALT_FILENAME).exists()
def get_salt(self):
salt_path = self.app.config.remote_folder_path / self.app.SALT_FILENAME
if salt_path.exists():
with salt_path.open("rb") as file:
return file.read()
def set_salt(self, salt):
salt_path = self.app.config.remote_folder_path / self.app.SALT_FILENAME
with salt_path.open("wb") as file:
file.write(salt)
return True
def delete_salt(self):
salt_path = self.app.config.remote_folder_path / self.app.SALT_FILENAME
if salt_path.exists():
salt_path.unlink()
return True
return False
def get_file(self, signature, dest_abs_path):
files_folder = self.app.config.remote_folder_path / self.FILES_FOLDERNAME
for node_path in files_folder.iterdir():
if node_path.is_file() and node_path.stem == signature:
self.app.local_storage.unpack(node_path, dest_abs_path)
return True
self.app.logger.log(9, "file get, signature: {}".format(signature))
return False
def set_file(self, src_abs_path, signature):
if self.file_exists(signature):
self.app.logger.warn(
"set_file: Cant set a file that already exists, ignoring..."
)
return False
self.app.local_storage.pack(
src_abs_path,
self.app.config.remote_folder_path
/ self.FILES_FOLDERNAME
/ (signature + self.app.PACKED_FILE_EXTENSION),
)
self.app.logger.log(9, "file set, signature: {}".format(signature))
return True
def delete_file(self, signature):
if not self.file_exists(signature):
self.app.logger.debug(
"delete_file_node: Trying to delete a remote file that doesnt exist, ignoring..."
)
return False
(
self.app.config.remote_folder_path
/ self.FILES_FOLDERNAME
/ (signature + self.app.PACKED_FILE_EXTENSION)
).unlink()
return True
def file_exists(self, signature):
return (
self.app.config.remote_folder_path
/ self.FILES_FOLDERNAME
/ (signature + self.app.PACKED_FILE_EXTENSION)
).exists()
def clear_all_files(self):
files_folder = self.app.config.remote_folder_path / self.FILES_FOLDERNAME
for node_path in files_folder.iterdir():
if node_path.is_file():
node_path.unlink()
return True
def check_manifest_consistency(self, manifest_instance):
result = True
for file_node in manifest_instance.iterate(include_folders=False):
if not self.file_exists(file_node.signature):
self.app.logger.warn(
"File content not found on remote: {}".format(
file_node.get_path_str()
)
)
result = False
return result
def get_remote_signatures_set(self):
s = set()
files_folder = self.app.config.remote_folder_path / self.FILES_FOLDERNAME
for node_path in files_folder.iterdir():
if node_path.is_file():
s.add(node_path.name)
return s
| 34.369128 | 97 | 0.614724 |
4a1d677a429e18afaed2304a8942a2c0d7951777 | 748 | py | Python | tradearn/products/migrations/0001_initial.py | didils/tradmarkearn | a2a49314639a419db1b7b414319745c81fcc26e1 | [
"MIT"
] | null | null | null | tradearn/products/migrations/0001_initial.py | didils/tradmarkearn | a2a49314639a419db1b7b414319745c81fcc26e1 | [
"MIT"
] | null | null | null | tradearn/products/migrations/0001_initial.py | didils/tradmarkearn | a2a49314639a419db1b7b414319745c81fcc26e1 | [
"MIT"
] | null | null | null | # Generated by Django 2.0.13 on 2019-04-06 07:16
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Product',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('product', models.CharField(blank=True, max_length=255, null=True)),
('category', models.IntegerField(null=True)),
('code', models.CharField(blank=True, max_length=255, null=True)),
('product_en', models.CharField(blank=True, max_length=255, null=True)),
],
),
]
| 29.92 | 114 | 0.586898 |
4a1d67f513c45cdd3a0949addbe7cc2e63ac3c2c | 804 | py | Python | seafood_project/urls.py | masonrybits/django_intro | de9046a0c27ffa442c0c3fb2e08c70062ce130c4 | [
"MIT"
] | null | null | null | seafood_project/urls.py | masonrybits/django_intro | de9046a0c27ffa442c0c3fb2e08c70062ce130c4 | [
"MIT"
] | 6 | 2020-06-05T20:48:31.000Z | 2021-09-22T18:30:51.000Z | seafood_project/urls.py | masonrybits/django_intro | de9046a0c27ffa442c0c3fb2e08c70062ce130c4 | [
"MIT"
] | null | null | null | """seafood_project URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('oyster.urls')),
]
| 34.956522 | 77 | 0.705224 |
4a1d689e5b16c45b8a0026c5602a9caa10c960c0 | 85 | py | Python | tests/roots/test-ext-viewcode-find/not_a_package/__init__.py | zhsj/sphinx | 169297d0b76bf0b503033dadeb14f9a2b735e422 | [
"BSD-2-Clause"
] | 1 | 2021-06-17T13:38:42.000Z | 2021-06-17T13:38:42.000Z | tests/roots/test-ext-viewcode-find/not_a_package/__init__.py | zhsj/sphinx | 169297d0b76bf0b503033dadeb14f9a2b735e422 | [
"BSD-2-Clause"
] | null | null | null | tests/roots/test-ext-viewcode-find/not_a_package/__init__.py | zhsj/sphinx | 169297d0b76bf0b503033dadeb14f9a2b735e422 | [
"BSD-2-Clause"
] | 1 | 2021-07-23T12:09:13.000Z | 2021-07-23T12:09:13.000Z | from __future__ import absolute_import
from .submodule import func1, Class1 # NOQA
| 21.25 | 44 | 0.811765 |
4a1d697c97712437a89bbc2636a9b76f121ba532 | 643 | py | Python | tests/io/test_folders.py | midpipps/pyTenable | 06aabe929ae482080dc2a63f56234b9a873f78ca | [
"MIT"
] | null | null | null | tests/io/test_folders.py | midpipps/pyTenable | 06aabe929ae482080dc2a63f56234b9a873f78ca | [
"MIT"
] | null | null | null | tests/io/test_folders.py | midpipps/pyTenable | 06aabe929ae482080dc2a63f56234b9a873f78ca | [
"MIT"
] | null | null | null | from .fixtures import *
from tenable.errors import *
import uuid
def test_folder_name_typeerror(api):
with pytest.raises(TypeError):
api.folders.create(1)
def test_create(api, folder):
assert isinstance(folder, int)
def test_delete(api, folder):
api.folders.delete(folder)
assert folder not in [f['id'] for f in api.folders.list()]
def test_edit_name_typeerror(api, folder):
with pytest.raises(TypeError):
api.folders.edit(folder, 1)
def test_edit(api, folder):
api.folders.edit(folder, str(uuid.uuid4())[:20])
def test_list(api, folder):
assert folder in [f['id'] for f in api.folders.list()] | 26.791667 | 62 | 0.70451 |
4a1d69b36d6df8e0cf8b0cf154612c37ba79678b | 231 | py | Python | ramda/once_test.py | jakobkolb/ramda.py | 982b2172f4bb95b9a5b09eff8077362d6f2f0920 | [
"MIT"
] | 56 | 2018-08-06T08:44:58.000Z | 2022-03-17T09:49:03.000Z | ramda/once_test.py | jakobkolb/ramda.py | 982b2172f4bb95b9a5b09eff8077362d6f2f0920 | [
"MIT"
] | 28 | 2019-06-17T11:09:52.000Z | 2022-02-18T16:59:21.000Z | ramda/once_test.py | jakobkolb/ramda.py | 982b2172f4bb95b9a5b09eff8077362d6f2f0920 | [
"MIT"
] | 5 | 2019-09-18T09:24:38.000Z | 2021-07-21T08:40:23.000Z | from ramda.private.asserts import *
from ramda.once import once
from ramda.add import add
add_one_once = once(add(1))
def once_test():
assert_equal(add_one_once(10), 11)
assert_equal(add_one_once(add_one_once(50)), 11)
| 19.25 | 52 | 0.748918 |
4a1d69d199ba1240498427e9cce65ae7b4683958 | 2,931 | py | Python | neopixel.py | MrYsLab/pseudo-microbit | ed797ca7679935a646794a5a0d387076c5544957 | [
"MIT"
] | 10 | 2017-12-30T23:44:52.000Z | 2021-02-23T12:30:25.000Z | neopixel.py | MrYsLab/pseudo-microbit | ed797ca7679935a646794a5a0d387076c5544957 | [
"MIT"
] | 7 | 2020-03-08T22:58:03.000Z | 2021-11-28T14:56:12.000Z | neopixel.py | MrYsLab/pseudo-microbit | ed797ca7679935a646794a5a0d387076c5544957 | [
"MIT"
] | 4 | 2018-06-14T15:03:56.000Z | 2020-11-24T18:49:29.000Z | """
The neopixel module lets you use Neopixel (WS2812) individually
addressable RGB LED strips with the Microbit.
Note to use the neopixel module, you need to import it separately with:
import neopixel
Note
From our tests, the Microbit Neopixel module can drive up to around 256
Neopixels. Anything above that and you may experience weird bugs and issues.
NeoPixels are fun strips of multi-coloured programmable LEDs.
This module contains everything to plug them into a micro:bit
and create funky displays, art and games
Warning
Do not use the 3v connector on the Microbit to power any more than
8 Neopixels at a time.
If you wish to use more than 8 Neopixels, you must use a separate
3v-5v power supply for the Neopixel power pin.
Operations
Writing the colour doesn’t update the display (use show() for that).
np[0] = (255, 0, 128) # first element
np[-1] = (0, 255, 0) # last element
np.show() # only now will the updated value be shown
To read the colour of a specific pixel just reference it.
print(np[0])
Using Neopixels
Interact with Neopixels as if they were a list of tuples.
Each tuple represents the RGB (red, green and blue) mix of colours
for a specific pixel. The RGB values can range between 0 to 255.
For example, initialise a strip of 8 neopixels on a strip connected
to pin0 like this:
import neopixel
np = neopixel.NeoPixel(pin0, 8)
Set pixels by indexing them (like with a Python list). For instance,
to set the first pixel to full brightness red, you would use:
np[0] = (255, 0, 0)
Or the final pixel to purple:
np[-1] = (255, 0, 255)
Get the current colour value of a pixel by indexing it. For example,
to print the first pixel’s RGB value use:
print(np[0])
Finally, to push the new colour data to your Neopixel strip, use the .show() function:
np.show()
If nothing is happening, it’s probably because you’ve forgotten this final step..!
Note
If you’re not seeing anything change on your Neopixel strip,
make sure you have show() at least somewhere otherwise your updates won’t be shown.
"""
from typing import Tuple, List, Union
from microbit import MicroBitDigitalPin
class NeoPixel:
def __init__(self, pin: MicroBitDigitalPin, n: int):
"""
Initialise a new strip of n number of neopixel LEDs controlled via pin pin.
Each pixel is addressed by a position (starting from 0).
Neopixels are given RGB (red, green, blue) values between 0-255 as a tuple.
For example, (255,255,255) is white.
"""
def clear(self) -> None:
"""
Clear all the pixels.
"""
def show(self) -> None:
"""
Show the pixels. Must be called for any updates to become visible.
"""
def __len__(self) -> int:
pass
def __getitem__(self, key) -> Tuple[int, int, int]:
pass
def __setitem__(self, key: int, value: Union[Tuple[int, int, int], List[int]]):
pass
| 25.710526 | 86 | 0.706585 |
4a1d6c05cbcb03ac1c42da511f42f2ef3558a33d | 2,169 | py | Python | statsmodels/stats/tests/test_effectsize.py | madhushree14/statsmodels | 04f00006a7aeb1c93d6894caa420698400da6c33 | [
"BSD-3-Clause"
] | 6,931 | 2015-01-01T11:41:55.000Z | 2022-03-31T17:03:24.000Z | statsmodels/stats/tests/test_effectsize.py | madhushree14/statsmodels | 04f00006a7aeb1c93d6894caa420698400da6c33 | [
"BSD-3-Clause"
] | 6,137 | 2015-01-01T00:33:45.000Z | 2022-03-31T22:53:17.000Z | statsmodels/stats/tests/test_effectsize.py | madhushree14/statsmodels | 04f00006a7aeb1c93d6894caa420698400da6c33 | [
"BSD-3-Clause"
] | 2,608 | 2015-01-02T21:32:31.000Z | 2022-03-31T07:38:30.000Z | # -*- coding: utf-8 -*-
"""
Created on Mon Oct 5 13:13:59 2020
Author: Josef Perktold
License: BSD-3
"""
from scipy import stats
from numpy.testing import assert_allclose
from statsmodels.stats.effect_size import (
_noncentrality_chisquare, _noncentrality_f, _noncentrality_t)
def test_noncent_chi2():
# > lochi(7.5,2,.95)
# [1] 0.03349255 0.97499458
# > hichi(7.5,2,.95)
# [1] 20.76049805 0.02500663
chi2_stat, df = 7.5, 2
ci_nc = [0.03349255, 20.76049805]
res = _noncentrality_chisquare(chi2_stat, df, alpha=0.05)
assert_allclose(res.confint, ci_nc, rtol=0.005)
# verify umvue unbiased
mean = stats.ncx2.mean(df, res.nc)
assert_allclose(chi2_stat, mean, rtol=1e-8)
assert_allclose(stats.ncx2.cdf(chi2_stat, df, res.confint), [0.975, 0.025],
rtol=1e-8)
def test_noncent_f():
# F(4, 75) = 3.5, confidence level = .95, two-sided CI:
# > lof(3.5,4,75,.95)
# [1] 0.7781436 0.9750039
# > hif(3.5,4,75,.95)
# [1] 29.72949219 0.02499965
f_stat, df1, df2 = 3.5, 4, 75
ci_nc = [0.7781436, 29.72949219]
res = _noncentrality_f(f_stat, df1, df2, alpha=0.05)
assert_allclose(res.confint, ci_nc, rtol=0.005)
# verify umvue unbiased
mean = stats.ncf.mean(df1, df2, res.nc)
assert_allclose(f_stat, mean, rtol=1e-8)
assert_allclose(stats.ncf.cdf(f_stat, df1, df2, res.confint),
[0.975, 0.025], rtol=1e-10)
def test_noncent_t():
# t(98) = 1.5, confidence level = .95, two-sided CI:
# > lot(1.5,98,.95)
# [1] -0.4749756 0.9750024
# > hit(1.5,98,.95)
# [1] 3.467285 0.025005
# > conf.limits.nct(1.5,98,.95)
# Lower.Limit Prob.Low.Limit Upper.Limit Prob.Up.Limit
# Values -0.474934 0.975 3.467371 0.02499999
t_stat, df = 1.5, 98
ci_nc = [-0.474934, 3.467371]
res = _noncentrality_t(t_stat, df, alpha=0.05)
assert_allclose(res.confint, ci_nc, rtol=0.005)
# verify umvue unbiased
mean = stats.nct.mean(df, res.nc)
assert_allclose(t_stat, mean, rtol=1e-8)
assert_allclose(stats.nct.cdf(t_stat, df, res.confint), [0.975, 0.025],
rtol=1e-10)
| 28.539474 | 79 | 0.625173 |
4a1d6c384cd52a3c30d75fba19f923f82c795bb1 | 14,167 | py | Python | tests/trainer/optimization/test_parity_automatic_optimization.py | nightlessbaron/pytorch-lightning | 239bea5c29cef0d1a0cfb319de5dbc9227aa2a53 | [
"Apache-2.0"
] | 3 | 2021-01-28T14:04:17.000Z | 2021-09-08T12:00:11.000Z | tests/trainer/optimization/test_parity_automatic_optimization.py | nightlessbaron/pytorch-lightning | 239bea5c29cef0d1a0cfb319de5dbc9227aa2a53 | [
"Apache-2.0"
] | null | null | null | tests/trainer/optimization/test_parity_automatic_optimization.py | nightlessbaron/pytorch-lightning | 239bea5c29cef0d1a0cfb319de5dbc9227aa2a53 | [
"Apache-2.0"
] | 1 | 2021-03-10T23:53:49.000Z | 2021-03-10T23:53:49.000Z | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import Callable
from copy import deepcopy
from typing import Optional
from unittest.mock import patch
import numpy as np
import pytest
import torch
from torch.optim import Optimizer
from pytorch_lightning import seed_everything, Trainer
from pytorch_lightning.core.optimizer import LightningOptimizer
from tests.base.boring_model import BoringModel
# TODO:
# For both automatic / manual optimization
# - Test dp, ddp, ddp2
# - Apex
# - Random accumulated_grad_batches (bug)
# - Multiple optimizers
class BaseParityAutomaticOptimizationModel(BoringModel):
def __init__(self, optimizer_cls, optimizer_is_mocked=False, accumulate_grad_batches=None):
super().__init__()
self.optimizer_cls = optimizer_cls
self.losses = []
self.grads = []
self.on_before_zero_grad_count = 0
self.optimizer_is_mocked = optimizer_is_mocked
self.grad_checked = False
self.accumulate_grad_batches = accumulate_grad_batches
def on_before_zero_grad(self, optimizer):
self.on_before_zero_grad_count += 1
if self.layer.weight.grad is not None:
self.grads.append(self.layer.weight.grad.clone())
def configure_optimizers(self):
optimizer = self.optimizer_cls(self.layer.parameters(), lr=0.1)
assert isinstance(optimizer, Optimizer)
return optimizer
def training_step(self, batch, batch_idx):
output = self.layer(batch)
loss = self.loss(batch, output)
self.losses.append(loss.detach().item())
return {"loss": loss}
class AutomaticOptimizationPurePytorchOptimizerModel(BaseParityAutomaticOptimizationModel):
def training_step(self, batch, batch_idx):
output = self.layer(batch)
loss = self.loss(batch, output)
self.losses.append(loss.detach().item())
loss /= float(self.accumulate_grad_batches)
return {"loss": loss}
def optimizer_step(
self,
epoch: int = None,
batch_idx: int = None,
optimizer: Optimizer = None,
optimizer_idx: int = None,
optimizer_closure: Optional[Callable] = None,
on_tpu: bool = None,
using_native_amp: bool = None,
using_lbfgs: bool = None,
) -> None:
"""
Override the optimizer step to define manual optimizer steps, as we use LightningOptimizer wrapper as standard
"""
# Get the unwrapped optimizer
optimizer = optimizer._optimizer
assert not isinstance(optimizer, LightningOptimizer)
optimizer_closure()
assert self.trainer.accumulate_grad_batches == 1
if should_accumulate(self.trainer, self.accumulate_grad_batches):
return
self.grad_checked = True
assert torch.abs(self.layer.weight.grad).sum() > 0
optimizer.step()
self.on_before_zero_grad_count += 1
optimizer.zero_grad()
if not self.optimizer_is_mocked:
assert torch.abs(self.layer.weight.grad).sum() == 0
class AutomaticOptimizationPurePytorchAMPOptimizerModel(BaseParityAutomaticOptimizationModel):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.scaler = torch.cuda.amp.GradScaler()
def training_step(self, batch, batch_idx):
with torch.cuda.amp.autocast():
output = self.layer(batch)
loss = self.loss(batch, output)
self.losses.append(loss.detach().item())
loss /= float(self.accumulate_grad_batches)
loss = self.scaler.scale(loss)
return {"loss": loss}
def optimizer_step(
self,
epoch: int = None,
batch_idx: int = None,
optimizer: Optimizer = None,
optimizer_idx: int = None,
optimizer_closure: Optional[Callable] = None,
on_tpu: bool = None,
using_native_amp: bool = None,
using_lbfgs: bool = None,
) -> None:
"""
Override the optimizer step to define manual optimizer steps, as we use LightningOptimizer wrapper as standard
"""
# Get the unwrapped optimizer
optimizer = optimizer._optimizer
assert not isinstance(optimizer, LightningOptimizer)
optimizer_closure()
assert self.trainer.accumulate_grad_batches == 1
if should_accumulate(self.trainer, self.accumulate_grad_batches):
return
self.scaler.unscale_(optimizer)
self.grad_checked = True
assert torch.abs(self.layer.weight.grad).sum() > 0
self.scaler.step(optimizer)
self.scaler.update()
self.on_before_zero_grad_count += 1
optimizer.zero_grad()
if not self.optimizer_is_mocked:
assert torch.abs(self.layer.weight.grad).sum() == 0
def should_accumulate(trainer, accumulate_grad_batches):
accumulation_done = (trainer.batch_idx + 1) == trainer.num_training_batches
is_final_batch = (trainer.batch_idx + 1) % accumulate_grad_batches == 0
return not (accumulation_done or is_final_batch)
@pytest.mark.parametrize(["precision", "amp_backend", "gpus"], [
pytest.param(32, "native", 0),
pytest.param(16, "native", 1, marks=pytest.mark.skipif(not torch.cuda.is_available(), reason='Requires GPU')),
])
@pytest.mark.parametrize('accumulate_grad_batches', [1, 7])
def test_lightning_optimizer_and_no_lightning_optimizer_equality(
tmpdir,
precision,
amp_backend,
gpus,
accumulate_grad_batches,
):
if accumulate_grad_batches > 1:
accumulate_grad_batches = np.random.randint(1, accumulate_grad_batches)
vanilla_model_cls = AutomaticOptimizationPurePytorchAMPOptimizerModel if precision == 16 \
else AutomaticOptimizationPurePytorchOptimizerModel
run_lightning_optimizer_equality(
BaseParityAutomaticOptimizationModel,
vanilla_model_cls,
precision=precision,
default_root_dir=tmpdir,
max_epochs=1,
limit_train_batches=5,
accumulate_grad_batches=accumulate_grad_batches,
amp_backend=amp_backend,
gpus=gpus
)
@pytest.mark.parametrize(["precision", "amp_backend", "gpus"], [
pytest.param(32, "native", 0),
])
@pytest.mark.parametrize('accumulate_grad_batches', [1])
def test_lightning_optimizer_and_no_lightning_optimizer_equality_check_optim_calls(
tmpdir,
precision,
amp_backend,
gpus,
accumulate_grad_batches,
):
vanilla_model_cls = AutomaticOptimizationPurePytorchAMPOptimizerModel if precision == 16 \
else AutomaticOptimizationPurePytorchOptimizerModel
with patch("torch.optim.SGD.step") as mock_sgd_step, \
patch("torch.optim.Adam.step") as mock_adam_step, \
patch("torch.optim.AdamW.step") as mock_adamw_step, \
patch("torch.optim.SGD.zero_grad") as mock_sgd_zero_grad, \
patch("torch.optim.Adam.zero_grad") as mock_adam_zero_grad, \
patch("torch.optim.AdamW.zero_grad") as mock_adamw_zero_grad:
max_epochs = 2
limit_train_batches = 10
# Run equality test using Lightning Optimizer
run_lightning_optimizer_equality(
BaseParityAutomaticOptimizationModel,
vanilla_model_cls,
default_root_dir=tmpdir,
optimizer_is_mocked=True,
accumulate_grad_batches=accumulate_grad_batches,
max_epochs=max_epochs,
limit_train_batches=limit_train_batches,
amp_backend=amp_backend,
precision=precision,
gpus=gpus
)
expected_num_batches = max_epochs * limit_train_batches
assert mock_sgd_step.call_count == (expected_num_batches // accumulate_grad_batches)
assert mock_sgd_zero_grad.call_count == (expected_num_batches // accumulate_grad_batches)
assert mock_sgd_step.call_count == mock_adam_step.call_count
assert mock_sgd_step.call_count == mock_adam_step.call_count
assert mock_sgd_zero_grad.call_count == mock_adam_zero_grad.call_count
assert mock_sgd_zero_grad.call_count == mock_adamw_zero_grad.call_count
def run_lightning_optimizer_equality(
lightning_model_cls,
vanilla_model_cls,
optimizer_is_mocked=False,
**trainer_kwargs,
):
trainer_kwargs = {
"limit_val_batches": 0,
**trainer_kwargs
}
expected_num_batches = trainer_kwargs["max_epochs"] * trainer_kwargs["limit_train_batches"]
accumulate_grad_batches = trainer_kwargs["accumulate_grad_batches"]
pl_optimizer_initial_model_weights, pl_optimizer_model = train_specific_optimizer_model(
lightning_model_cls,
torch.optim.SGD,
expected_num_batches=expected_num_batches,
optimizer_is_mocked=optimizer_is_mocked,
enable_pl_optimizer=True,
**trainer_kwargs,
)
no_pl_optimizer_initial_model_weights, no_pl_optimizer_model = train_specific_optimizer_model(
lightning_model_cls,
torch.optim.Adam if optimizer_is_mocked else torch.optim.SGD,
expected_num_batches=expected_num_batches,
optimizer_is_mocked=optimizer_is_mocked,
enable_pl_optimizer=False, # Disable pl optimizer
**trainer_kwargs,
)
pure_pytorch_optimizer_initial_model_weights, pure_pytorch_optimizer_model = train_specific_optimizer_model(
vanilla_model_cls,
torch.optim.AdamW if optimizer_is_mocked else torch.optim.SGD,
expected_num_batches=expected_num_batches,
optimizer_is_mocked=optimizer_is_mocked,
replace_optimizer_step_with_pure_pytorch=True,
**trainer_kwargs,
)
if not optimizer_is_mocked:
assert_model_equality(
pl_optimizer_initial_model_weights=pl_optimizer_initial_model_weights,
pl_optimizer_model=pl_optimizer_model,
no_pl_optimizer_initial_model_weights=no_pl_optimizer_initial_model_weights,
no_pl_optimizer_model=no_pl_optimizer_model,
pure_pytorch_optimizer_initial_model_weights=pure_pytorch_optimizer_initial_model_weights,
pure_pytorch_optimizer_model=pure_pytorch_optimizer_model,
expected_num_batches=expected_num_batches,
precision=trainer_kwargs["precision"]
)
def assert_model_equality(
pl_optimizer_initial_model_weights,
pl_optimizer_model,
no_pl_optimizer_initial_model_weights,
no_pl_optimizer_model,
pure_pytorch_optimizer_initial_model_weights,
pure_pytorch_optimizer_model,
expected_num_batches,
precision,
):
assert torch.equal(pl_optimizer_initial_model_weights, no_pl_optimizer_initial_model_weights)
assert torch.equal(pl_optimizer_initial_model_weights, pure_pytorch_optimizer_initial_model_weights)
assert len(pl_optimizer_model.losses) == expected_num_batches
assert pure_pytorch_optimizer_model.grad_checked
assert pure_pytorch_optimizer_model.losses == no_pl_optimizer_model.losses
assert not torch.isnan(torch.FloatTensor(no_pl_optimizer_model.losses)).any()
assert torch.equal(torch.FloatTensor(no_pl_optimizer_model.losses), torch.FloatTensor(pl_optimizer_model.losses))
assert no_pl_optimizer_model.on_before_zero_grad_count == pl_optimizer_model.on_before_zero_grad_count
for pytorch_grad, no_pl_optim_grad, pl_optim_grad in zip(pure_pytorch_optimizer_model.grads,
no_pl_optimizer_model.grads,
pl_optimizer_model.grads):
assert torch.equal(no_pl_optim_grad, pl_optim_grad), 'Grad parameters are different'
assert torch.equal(pytorch_grad, no_pl_optim_grad), 'Grad parameters are different'
for pytorch_weight, no_pl_optim_weight, pl_optim_weight in zip(pure_pytorch_optimizer_model.parameters(),
no_pl_optimizer_model.parameters(),
pl_optimizer_model.parameters()):
assert torch.equal(no_pl_optim_weight, pl_optim_weight), 'Model parameters are different'
assert torch.equal(pytorch_weight, no_pl_optim_weight), 'Model parameters are different'
# train function
def train_specific_optimizer_model(
model_cls,
optimizer_cls,
expected_num_batches,
enable_pl_optimizer=False,
optimizer_is_mocked=False,
replace_optimizer_step_with_pure_pytorch=False,
**trainer_kwargs,
):
seed_everything(42)
trainer_kwargs = deepcopy(trainer_kwargs)
model = model_cls(
optimizer_cls=optimizer_cls,
optimizer_is_mocked=optimizer_is_mocked,
accumulate_grad_batches=trainer_kwargs["accumulate_grad_batches"],
)
if replace_optimizer_step_with_pure_pytorch:
# When running pure vanilla training, accumulate_grad_batches should be 1.
trainer_kwargs["accumulate_grad_batches"] = 1
trainer_kwargs["precision"] = 32
expected_global_step = expected_num_batches // trainer_kwargs["accumulate_grad_batches"]
initial_weights = model.layer.weight.clone()
model.training_epoch_end = None
trainer = Trainer(
enable_pl_optimizer=enable_pl_optimizer,
**trainer_kwargs
)
trainer.fit(model)
assert np.abs(trainer.global_step - expected_global_step) <= 2
return initial_weights, model
| 38.083333 | 118 | 0.69796 |
4a1d6d258bc96bf6e5e6ba103a578d0dd19d4e12 | 2,362 | py | Python | setup.py | WladimirSidorenko/DASA | 618d9060a5fd6f567628c8dec5e26943c8c49ad4 | [
"MIT"
] | 7 | 2018-05-26T15:47:41.000Z | 2021-02-23T08:08:56.000Z | setup.py | WladimirSidorenko/DASA | 618d9060a5fd6f567628c8dec5e26943c8c49ad4 | [
"MIT"
] | null | null | null | setup.py | WladimirSidorenko/DASA | 618d9060a5fd6f567628c8dec5e26943c8c49ad4 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- mode: python; coding: utf-8; -*-
##################################################################
# Libraries
from setuptools import setup
from glob import glob
from os import path
import codecs
##################################################################
# Variables and Constants
PWD = path.abspath(path.dirname(__file__))
ENCODING = "utf-8"
with codecs.open(path.join(PWD, "README.rst"), encoding="utf-8") as ifile:
long_description = ifile.read()
INSTALL_REQUIRES = []
with codecs.open(path.join(PWD, "requirements.txt"),
encoding=ENCODING) as ifile:
for iline in ifile:
iline = iline.strip()
if iline:
INSTALL_REQUIRES.append(iline)
TEST_REQUIRES = []
with codecs.open(path.join(PWD, "test-requirements.txt"),
encoding=ENCODING) as ifile:
for iline in ifile:
iline = iline.strip()
if iline:
TEST_REQUIRES.append(iline)
##################################################################
# setup()
setup(
name="dasa",
version="0.1.0a0",
description=("Discourse-aware sentiment analysis methods."),
long_description=long_description,
author="Wladimir Sidorenko (Uladzimir Sidarenka)",
author_email="[email protected]",
license="MIT",
url="https://github.com/WladimirSidorenko/DASA",
include_package_data=True,
packages=["dasa"],
package_data={},
install_requires=INSTALL_REQUIRES,
dependency_links=[
],
setup_requires=["pytest-runner"],
tests_require=TEST_REQUIRES,
provides=["dasa (0.1.0a0)"],
scripts=glob(path.join("scripts", "dasa*")),
classifiers=["Development Status :: 3 - Alpha",
"Environment :: Console",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Natural Language :: German",
"Operating System :: Unix",
"Operating System :: MacOS",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Topic :: Text Processing :: Linguistic"],
keywords="sentiment-analysis discourse NLP linguistics")
| 33.742857 | 74 | 0.560119 |
4a1d6d55b85801b5388c7088cbdfb56030b0d7dc | 6,959 | py | Python | reframe/frontend/autodetect.py | rngoodner/reframe | 4b0cb19f72af8f4b9f954133a54240011bd510e8 | [
"BSD-3-Clause"
] | null | null | null | reframe/frontend/autodetect.py | rngoodner/reframe | 4b0cb19f72af8f4b9f954133a54240011bd510e8 | [
"BSD-3-Clause"
] | null | null | null | reframe/frontend/autodetect.py | rngoodner/reframe | 4b0cb19f72af8f4b9f954133a54240011bd510e8 | [
"BSD-3-Clause"
] | null | null | null | # Copyright 2016-2021 Swiss National Supercomputing Centre (CSCS/ETH Zurich)
# ReFrame Project Developers. See the top-level LICENSE file for details.
#
# SPDX-License-Identifier: BSD-3-Clause
import json
import jsonschema
import os
import shutil
import tempfile
import reframe as rfm
import reframe.utility.osext as osext
from reframe.core.exceptions import ConfigError
from reframe.core.logging import getlogger
from reframe.core.runtime import runtime
from reframe.core.schedulers import Job
from reframe.utility.cpuinfo import cpuinfo
def _contents(filename):
'''Return the contents of a file.'''
with open(filename) as fp:
return fp.read()
def _log_contents(filename):
filename = os.path.abspath(filename)
getlogger().debug(f'--- {filename} ---\n'
f'{_contents(filename)}\n'
f'--- {filename} ---')
class _copy_reframe:
def __init__(self, prefix):
self._prefix = prefix
self._prefix = runtime().get_option('general/0/remote_workdir')
self._workdir = None
def __enter__(self):
self._workdir = os.path.abspath(
tempfile.mkdtemp(prefix='rfm.', dir=self._prefix)
)
paths = ['bin/', 'reframe/', 'bootstrap.sh', 'requirements.txt']
for p in paths:
src = os.path.join(rfm.INSTALL_PREFIX, p)
if os.path.isdir(src):
dst = os.path.join(self._workdir, p)
osext.copytree(src, dst, dirs_exist_ok=True)
else:
shutil.copy2(src, self._workdir)
return self._workdir
def __exit__(self, exc_type, exc_val, exc_tb):
osext.rmtree(self._workdir)
def _subschema(fragment):
'''Create a configuration subschema.'''
full_schema = runtime().site_config.schema
return {
'$schema': full_schema['$schema'],
'defs': full_schema['defs'],
'$ref': fragment
}
def _validate_info(info, schema):
if schema is None:
return info
jsonschema.validate(info, schema)
return info
def _load_info(filename, schema=None):
try:
with open(filename) as fp:
return _validate_info(json.load(fp), schema)
except OSError as e:
getlogger().warning(
f'could not load file: {filename!r}: {e}'
)
return {}
except jsonschema.ValidationError as e:
raise ConfigError(
f'could not validate meta-config file {filename!r}'
) from e
def _save_info(filename, topo_info):
if not topo_info:
return
os.makedirs(os.path.dirname(filename), exist_ok=True)
try:
with open(filename, 'w') as fp:
json.dump(topo_info, fp, indent=2)
except OSError as e:
getlogger().warning(
f'could not save topology file: {filename!r}: {e}'
)
def _is_part_local(part):
return (part.scheduler.registered_name == 'local' and
part.launcher_type.registered_name == 'local')
def _remote_detect(part):
def _emit_script(job):
launcher_cmd = job.launcher.run_command(job)
commands = [
f'./bootstrap.sh',
f'{launcher_cmd} ./bin/reframe --detect-host-topology=topo.json'
]
job.prepare(commands, trap_errors=True)
getlogger().info(
f'Detecting topology of remote partition {part.fullname!r}: '
f'this may take some time...'
)
topo_info = {}
try:
prefix = runtime().get_option('general/0/remote_workdir')
with _copy_reframe(prefix) as dirname:
with osext.change_dir(dirname):
job = Job.create(part.scheduler,
part.launcher_type(),
name='rfm-detect-job',
sched_access=part.access)
_emit_script(job)
getlogger().debug('submitting detection script')
_log_contents(job.script_filename)
job.submit()
job.wait()
getlogger().debug('job finished')
_log_contents(job.stdout)
_log_contents(job.stderr)
topo_info = json.loads(_contents('topo.json'))
except Exception as e:
getlogger().warning(f'failed to retrieve remote processor info: {e}')
return topo_info
def detect_topology():
rt = runtime()
detect_remote_systems = rt.get_option('general/0/remote_detect')
topo_prefix = os.path.join(os.getenv('HOME'), '.reframe/topology')
for part in rt.system.partitions:
getlogger().debug(f'detecting topology info for {part.fullname}')
found_procinfo = False
found_devinfo = False
if part.processor.info != {}:
# Processor info set up already in the configuration
getlogger().debug(
f'> topology found in configuration file; skipping...'
)
found_procinfo = True
if part.devices:
# Devices set up already in the configuration
getlogger().debug(
f'> devices found in configuration file; skipping...'
)
found_devinfo = True
if found_procinfo and found_devinfo:
continue
topo_file = os.path.join(
topo_prefix, f'{rt.system.name}-{part.name}', 'processor.json'
)
dev_file = os.path.join(
topo_prefix, f'{rt.system.name}-{part.name}', 'devices.json'
)
if not found_procinfo and os.path.exists(topo_file):
getlogger().debug(
f'> found topology file {topo_file!r}; loading...'
)
part.processor._info = _load_info(
topo_file, _subschema('#/defs/processor_info')
)
found_procinfo = True
if not found_devinfo and os.path.exists(dev_file):
getlogger().debug(
f'> found devices file {dev_file!r}; loading...'
)
part._devices = _load_info(dev_file, _subschema('#/defs/devices'))
found_devinfo = True
if found_procinfo and found_devinfo:
continue
if not found_procinfo:
# No topology found, try to auto-detect it
getlogger().debug(f'> no topology file found; auto-detecting...')
if _is_part_local(part):
# Unconditionally detect the system for fully local partitions
part.processor._info = cpuinfo()
_save_info(topo_file, part.processor.info)
elif detect_remote_systems:
part.processor._info = _remote_detect(part)
if part.processor.info:
_save_info(topo_file, part.processor.info)
getlogger().debug(f'> saved topology in {topo_file!r}')
if not found_devinfo:
getlogger().debug(f'> device auto-detection is not supported')
| 32.069124 | 78 | 0.593045 |
4a1d6d801eba735d7c2bed47eb3787fce7fb8cb0 | 6,525 | py | Python | test/functional/bipdersig-p2p.py | aixinwang/Gfc | 4a7fdac234f5f51055e471e77aaff62cfa4c6eab | [
"MIT"
] | null | null | null | test/functional/bipdersig-p2p.py | aixinwang/Gfc | 4a7fdac234f5f51055e471e77aaff62cfa4c6eab | [
"MIT"
] | null | null | null | test/functional/bipdersig-p2p.py | aixinwang/Gfc | 4a7fdac234f5f51055e471e77aaff62cfa4c6eab | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2015-2016 The GFC coin bt developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test BIP66 (DER SIG).
Connect to a single node.
Mine 2 (version 2) blocks (save the coinbases for later).
Generate 98 more version 2 blocks, verify the node accepts.
Mine 749 version 3 blocks, verify the node accepts.
Check that the new DERSIG rules are not enforced on the 750th version 3 block.
Check that the new DERSIG rules are enforced on the 751st version 3 block.
Mine 199 new version blocks.
Mine 1 old-version block.
Mine 1 new version block.
Mine 1 old version block, see that the node rejects.
"""
from test_framework.test_framework import ComparisonTestFramework
from test_framework.util import *
from test_framework.mininode import CTransaction, NetworkThread
from test_framework.blocktools import create_coinbase, create_block
from test_framework.comptool import TestInstance, TestManager
from test_framework.script import CScript
from io import BytesIO
import time
# A canonical signature consists of:
# <30> <total len> <02> <len R> <R> <02> <len S> <S> <hashtype>
def unDERify(tx):
"""
Make the signature in vin 0 of a tx non-DER-compliant,
by adding padding after the S-value.
"""
scriptSig = CScript(tx.vin[0].scriptSig)
newscript = []
for i in scriptSig:
if (len(newscript) == 0):
newscript.append(i[0:-1] + b'\0' + i[-1:])
else:
newscript.append(i)
tx.vin[0].scriptSig = CScript(newscript)
class BIP66Test(ComparisonTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 1
def run_test(self):
test = TestManager(self, self.options.tmpdir)
test.add_all_connections(self.nodes)
NetworkThread().start() # Start up network handling in another thread
test.run()
def create_transaction(self, node, coinbase, to_address, amount):
from_txid = node.getblock(coinbase)['tx'][0]
inputs = [{ "txid" : from_txid, "vout" : 0}]
outputs = { to_address : amount }
rawtx = node.createrawtransaction(inputs, outputs)
signresult = node.signrawtransaction(rawtx)
tx = CTransaction()
f = BytesIO(hex_str_to_bytes(signresult['hex']))
tx.deserialize(f)
return tx
def get_tests(self):
self.coinbase_blocks = self.nodes[0].generate(2)
height = 3 # height of the next block to build
self.tip = int("0x" + self.nodes[0].getbestblockhash(), 0)
self.nodeaddress = self.nodes[0].getnewaddress()
self.last_block_time = int(time.time())
''' 298 more version 2 blocks '''
test_blocks = []
for i in range(298):
block = create_block(self.tip, create_coinbase(height), self.last_block_time + 1)
block.nVersion = 2
block.rehash()
block.solve()
test_blocks.append([block, True])
self.last_block_time += 1
self.tip = block.sha256
height += 1
yield TestInstance(test_blocks, sync_every_block=False)
''' Mine 749 version 3 blocks '''
test_blocks = []
for i in range(749):
block = create_block(self.tip, create_coinbase(height), self.last_block_time + 1)
block.nVersion = 3
block.rehash()
block.solve()
test_blocks.append([block, True])
self.last_block_time += 1
self.tip = block.sha256
height += 1
yield TestInstance(test_blocks, sync_every_block=False)
'''
Check that the new DERSIG rules are not enforced in the 750th
version 3 block.
'''
spendtx = self.create_transaction(self.nodes[0],
self.coinbase_blocks[0], self.nodeaddress, 1.0)
unDERify(spendtx)
spendtx.rehash()
block = create_block(self.tip, create_coinbase(height), self.last_block_time + 1)
block.nVersion = 3
block.vtx.append(spendtx)
block.hashMerkleRoot = block.calc_merkle_root()
block.rehash()
block.solve()
self.last_block_time += 1
self.tip = block.sha256
height += 1
yield TestInstance([[block, True]])
''' Mine 199 new version blocks on last valid tip '''
test_blocks = []
for i in range(199):
block = create_block(self.tip, create_coinbase(height), self.last_block_time + 1)
block.nVersion = 3
block.rehash()
block.solve()
test_blocks.append([block, True])
self.last_block_time += 1
self.tip = block.sha256
height += 1
yield TestInstance(test_blocks, sync_every_block=False)
''' Mine 1 old version block '''
block = create_block(self.tip, create_coinbase(height), self.last_block_time + 1)
block.nVersion = 2
block.rehash()
block.solve()
self.last_block_time += 1
self.tip = block.sha256
height += 1
yield TestInstance([[block, True]])
''' Mine 1 new version block '''
block = create_block(self.tip, create_coinbase(height), self.last_block_time + 1)
block.nVersion = 3
block.rehash()
block.solve()
self.last_block_time += 1
self.tip = block.sha256
height += 1
yield TestInstance([[block, True]])
'''
Check that the new DERSIG rules are enforced in the 951st version 3
block.
'''
spendtx = self.create_transaction(self.nodes[0],
self.coinbase_blocks[1], self.nodeaddress, 1.0)
unDERify(spendtx)
spendtx.rehash()
block = create_block(self.tip, create_coinbase(height), self.last_block_time + 1)
block.nVersion = 3
block.vtx.append(spendtx)
block.hashMerkleRoot = block.calc_merkle_root()
block.rehash()
block.solve()
self.last_block_time += 1
yield TestInstance([[block, False]])
''' Mine 1 old version block, should be invalid '''
block = create_block(self.tip, create_coinbase(height), self.last_block_time + 1)
block.nVersion = 2
block.rehash()
block.solve()
self.last_block_time += 1
yield TestInstance([[block, False]])
if __name__ == '__main__':
BIP66Test().main()
| 35.655738 | 93 | 0.621916 |
4a1d6deb2b2758dd998538f36a7026a712d79289 | 4,598 | py | Python | jishaku/repl/compilation.py | Sengolda/jishkucord | 2aa401ff635b6e0550fbd1bcf3aaf3890a9eb1d8 | [
"MIT"
] | 1 | 2021-11-11T14:06:57.000Z | 2021-11-11T14:06:57.000Z | jishaku/repl/compilation.py | Sengolda/jishkucord | 2aa401ff635b6e0550fbd1bcf3aaf3890a9eb1d8 | [
"MIT"
] | 1 | 2021-11-12T01:27:07.000Z | 2021-11-12T01:27:07.000Z | jishaku/repl/compilation.py | Sengolda/jishkucord | 2aa401ff635b6e0550fbd1bcf3aaf3890a9eb1d8 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
jishaku.repl.compilation
~~~~~~~~~~~~~~~~~~~~~~~~
Constants, functions and classes related to classifying, compiling and executing Python code.
:copyright: (c) 2021 Devon (Gorialis) R
:license: MIT, see LICENSE for more details.
"""
import ast
import asyncio
import inspect
import linecache
import import_expression
from jishaku.functools import AsyncSender
from jishaku.repl.scope import Scope
from jishaku.repl.walkers import KeywordTransformer
CORO_CODE = f"""
async def _repl_coroutine({{0}}):
import asyncio
from importlib import import_module as {import_expression.constants.IMPORTER}
import aiohttp
import discord
from discord.ext import commands
try:
import jishaku
except ImportError:
jishaku = None # keep working even if in panic recovery mode
try:
pass
finally:
_async_executor.scope.globals.update(locals())
"""
def wrap_code(code: str, args: str = "") -> ast.Module:
"""
Compiles Python code into an async function or generator,
and automatically adds return if the function body is a single evaluation.
Also adds inline import expression support.
"""
user_code = import_expression.parse(code, mode="exec")
mod = import_expression.parse(CORO_CODE.format(args), mode="exec")
definition = mod.body[-1] # async def ...:
assert isinstance(definition, ast.AsyncFunctionDef)
try_block = definition.body[-1] # try:
assert isinstance(try_block, ast.Try)
try_block.body.extend(user_code.body)
ast.fix_missing_locations(mod)
KeywordTransformer().generic_visit(try_block)
last_expr = try_block.body[-1]
# if the last part isn't an expression, ignore it
if not isinstance(last_expr, ast.Expr):
return mod
# if the last expression is not a yield
if not isinstance(last_expr.value, ast.Yield):
# copy the value of the expression into a yield
yield_stmt = ast.Yield(last_expr.value)
ast.copy_location(yield_stmt, last_expr)
# place the yield into its own expression
yield_expr = ast.Expr(yield_stmt)
ast.copy_location(yield_expr, last_expr)
# place the yield where the original expression was
try_block.body[-1] = yield_expr
return mod
class AsyncCodeExecutor: # pylint: disable=too-few-public-methods
"""
Executes/evaluates Python code inside of an async function or generator.
Example
-------
.. code:: python3
total = 0
# prints 1, 2 and 3
async for x in AsyncCodeExecutor('yield 1; yield 2; yield 3'):
total += x
print(x)
# prints 6
print(total)
"""
__slots__ = ("args", "arg_names", "code", "loop", "scope", "source")
def __init__(
self,
code: str,
scope: Scope = None,
arg_dict: dict = None,
loop: asyncio.BaseEventLoop = None,
):
self.args = [self]
self.arg_names = ["_async_executor"]
if arg_dict:
for key, value in arg_dict.items():
self.arg_names.append(key)
self.args.append(value)
self.source = code
self.code = wrap_code(code, args=", ".join(self.arg_names))
self.scope = scope or Scope()
self.loop = loop or asyncio.get_event_loop()
def __aiter__(self):
exec(compile(self.code, "<repl>", "exec"), self.scope.globals, self.scope.locals) # pylint: disable=exec-used
func_def = self.scope.locals.get("_repl_coroutine") or self.scope.globals["_repl_coroutine"]
return self.traverse(func_def)
async def traverse(self, func):
"""
Traverses an async function or generator, yielding each result.
This function is private. The class should be used as an iterator instead of using this method.
"""
try:
if inspect.isasyncgenfunction(func):
async for send, result in AsyncSender(func(*self.args)):
send((yield result))
else:
yield await func(*self.args)
except Exception: # pylint: disable=broad-except
# Falsely populate the linecache to make the REPL line appear in tracebacks
linecache.cache["<repl>"] = (
len(self.source), # Source length
None, # Time modified (None bypasses expunge)
[line + "\n" for line in self.source.splitlines()], # Line list
"<repl>", # 'True' filename
)
raise
| 28.7375 | 118 | 0.629187 |
4a1d6e5e24d670d2765b06868c30cd779fd48691 | 716 | py | Python | tests/app/test_cloudfoundry_config.py | alphagov/notify-notifications-api | e604385e0cf4c2ab8c6451b7120ceb196cce21b5 | [
"MIT"
] | null | null | null | tests/app/test_cloudfoundry_config.py | alphagov/notify-notifications-api | e604385e0cf4c2ab8c6451b7120ceb196cce21b5 | [
"MIT"
] | null | null | null | tests/app/test_cloudfoundry_config.py | alphagov/notify-notifications-api | e604385e0cf4c2ab8c6451b7120ceb196cce21b5 | [
"MIT"
] | null | null | null | import json
import os
import pytest
from app.cloudfoundry_config import extract_cloudfoundry_config
@pytest.fixture
def vcap_services():
return {
'postgres': [{
'credentials': {
'uri': 'postgres uri'
}
}],
'redis': [{
'credentials': {
'uri': 'redis uri'
}
}],
'user-provided': []
}
def test_extract_cloudfoundry_config_populates_other_vars(os_environ, vcap_services):
os.environ['VCAP_SERVICES'] = json.dumps(vcap_services)
extract_cloudfoundry_config()
assert os.environ['SQLALCHEMY_DATABASE_URI'] == 'postgresql uri'
assert os.environ['REDIS_URL'] == 'redis uri'
| 22.375 | 85 | 0.599162 |
4a1d701435395e9c4a6e831b3fe874c3a6ce28a8 | 996 | py | Python | main.py | Guilhermeasper/motorola-challenge | 43d187a952a7f18c7de54db90e0ab34e5887673c | [
"MIT"
] | null | null | null | main.py | Guilhermeasper/motorola-challenge | 43d187a952a7f18c7de54db90e0ab34e5887673c | [
"MIT"
] | null | null | null | main.py | Guilhermeasper/motorola-challenge | 43d187a952a7f18c7de54db90e0ab34e5887673c | [
"MIT"
] | null | null | null | from screen.screen import Screen
import sys
USAGE = f'Usage: python {sys.argv[0]} -input="<path/to/input/file>" -output="<path/to/output/file>"'
def argparse():
"""Parse the command line arguments and returns input and output paths"""
input_path = ""
output_path = ""
for arg in sys.argv[1:]:
if arg.startswith("-input"):
input_path = arg.split("=")[1]
elif arg.startswith("-output"):
output_path = arg.split("=")[1]
else:
raise SystemExit(USAGE)
return input_path, output_path
def main():
screen = Screen()
input_path, output_path = argparse()
try:
input_file = open(input_path, "r")
output_file = open(output_path, "w+")
except FileNotFoundError:
raise SystemExit(USAGE)
for item in input_file.read().split("\n"):
output_file.write(screen.change_state(item) + "\n")
input_file.close()
output_file.close()
if __name__ == "__main__":
main()
| 24.292683 | 100 | 0.611446 |
4a1d712c63366d7f1ac5a2179dbbd9672794dd8a | 678 | py | Python | tests/test_graph_builder.py | zspatter/network-simulation | 1a07acbe7b039e04d40cceb790a95fe0421dfea5 | [
"MIT"
] | 1 | 2019-02-11T19:42:28.000Z | 2019-02-11T19:42:28.000Z | tests/test_graph_builder.py | zspatter/Network-Simulation | 1a07acbe7b039e04d40cceb790a95fe0421dfea5 | [
"MIT"
] | 5 | 2019-05-08T18:08:03.000Z | 2019-05-17T14:24:04.000Z | tests/test_graph_builder.py | zspatter/Network-Simulation | 1a07acbe7b039e04d40cceb790a95fe0421dfea5 | [
"MIT"
] | 1 | 2019-02-11T19:42:31.000Z | 2019-02-11T19:42:31.000Z | from network_simulator.GraphBuilder import GraphBuilder
def test_generate_random_adjacency_dict():
test_dict = GraphBuilder.generate_random_adjacency_dict(node_id=1,
total_nodes=50, max_weight=50)
assert len(test_dict) < 50
for node in test_dict:
assert 0 < test_dict[node]['weight'] < 51
assert test_dict[node]['status']
def test_generate_random_network():
n = 5
graph = GraphBuilder.graph_builder(n)
assert len(graph.network_dict) is n
assert len(graph.nodes()) is n
for node in graph.network_dict:
assert len(graph.network_dict[node].adjacency_dict) < n
| 32.285714 | 90 | 0.663717 |
4a1d712d12092ac1bab27cbd53eac671c4a83421 | 762 | py | Python | endpoints/tests/outputs_test.py | stefvra/energy_app | 2091273e4f6c083a8531667885a38d36716220b0 | [
"MIT"
] | 1 | 2022-03-21T20:35:58.000Z | 2022-03-21T20:35:58.000Z | endpoints/tests/outputs_test.py | stefvra/energy_app | 2091273e4f6c083a8531667885a38d36716220b0 | [
"MIT"
] | null | null | null | endpoints/tests/outputs_test.py | stefvra/energy_app | 2091273e4f6c083a8531667885a38d36716220b0 | [
"MIT"
] | null | null | null |
from endpoints.outputs import Output_Factory
from tools import tools
output_factory = Output_Factory()
config_store = tools.Config_Store(filename=tools.get_config_file())
mock_GPIO_Output = output_factory.create_from_config(config_store, 'GPIO_output')
def test_GPIO_Output_init():
assert mock_GPIO_Output.is_enabled() == False
def test_GPIO_Output_enable():
mock_GPIO_Output.enable()
assert mock_GPIO_Output.is_enabled() == True
def test_GPIO_Output_disable():
mock_GPIO_Output.disable()
assert mock_GPIO_Output.is_enabled() == False
def test_GPIO_Output_toggle():
initial_state = mock_GPIO_Output.is_enabled()
mock_GPIO_Output.toggle()
final_state = mock_GPIO_Output.is_enabled()
assert initial_state != final_state | 28.222222 | 81 | 0.790026 |
4a1d71c347671f3df6c3de726d3c222209e6ceca | 1,956 | py | Python | day038/main.py | avholloway/100DaysOfCode | c7eeb8c47731ea7f47144d22fd90e4cc965d34c9 | [
"MIT"
] | null | null | null | day038/main.py | avholloway/100DaysOfCode | c7eeb8c47731ea7f47144d22fd90e4cc965d34c9 | [
"MIT"
] | null | null | null | day038/main.py | avholloway/100DaysOfCode | c7eeb8c47731ea7f47144d22fd90e4cc965d34c9 | [
"MIT"
] | null | null | null | import os
import sys
import json
import requests
from datetime import datetime as dt
# === User Interaction ========================================================
activity = input("Input: ")
# === NutritionIX API Info ====================================================
NUTRX_API_HOST = "https://trackapi.nutritionix.com"
NUTRX_API_VERSION = "v2"
NUTRX_APP_ID = os.getenv('NUTRX_APP_ID')
NUTRX_APP_KEY = os.getenv('NUTRX_APP_KEY')
NUTRX_HEADERS = {
"x-app-id": NUTRX_APP_ID,
"x-app-key": NUTRX_APP_KEY,
"x-remote-user-id": "avholloway"
}
NUTRX_ENDPOINT = f"{NUTRX_API_HOST}/{NUTRX_API_VERSION}/natural/exercise"
NUTRX_DATA = {
"query": activity,
"gender": "male",
"weight_kg": 102,
"height_cm": 188,
"age": 38
}
response = requests.post(url=NUTRX_ENDPOINT, headers=NUTRX_HEADERS, json=NUTRX_DATA)
response.raise_for_status()
data = response.json()
exercise = data["exercises"][0]
name = exercise["name"].title()
duration = int(exercise["duration_min"])
calories = int(exercise["nf_calories"])
# === Sheetly API Info ========================================================
SHEETLY_API_HOST = "https://api.sheety.co"
SHEETLY_UID = os.getenv('SHEETLY_UID')
SHEETLY_API_KEY = os.getenv('SHEETLY_API_KEY')
SHEETLY_PROJECT = "day38MyWorkouts"
SHEETLY_ENDPOINT = f"{SHEETLY_API_HOST}/{SHEETLY_UID}/{SHEETLY_PROJECT}/workouts"
SHEETLY_HEADERS = {
"Authorization": f"Bearer {SHEETLY_API_KEY}"
}
activity_date = dt.now().strftime("%d/%m/%Y")
activity_time = dt.now().strftime("%H:%M:%S")
SHEETLY_DATA = {
"workout": {
"date": activity_date,
"time": activity_time,
"exercise": name,
"duration": duration,
"calories": calories
}
}
response = requests.post(url=SHEETLY_ENDPOINT, headers=SHEETLY_HEADERS, json=SHEETLY_DATA)
response.raise_for_status()
if response.status_code == 200:
print(f"I have logged your {name} for today! You burned {calories} calories! Nice!") | 30.092308 | 90 | 0.645706 |
4a1d7296cbc6ce1d84e1b260c280dc4a7e304232 | 921 | py | Python | examples/microsoft_bot.py | shaikmoeed/ChatterBot | 41c8987a624d751e8935e5ac34da6d7b2b623e1f | [
"BSD-3-Clause"
] | 1 | 2020-08-13T02:46:51.000Z | 2020-08-13T02:46:51.000Z | examples/microsoft_bot.py | Cloudxtreme/ChatterBot | da138ac707769793cb6dc935975aa0287ac4e3b8 | [
"BSD-3-Clause"
] | null | null | null | examples/microsoft_bot.py | Cloudxtreme/ChatterBot | da138ac707769793cb6dc935975aa0287ac4e3b8 | [
"BSD-3-Clause"
] | 1 | 2019-04-20T09:54:28.000Z | 2019-04-20T09:54:28.000Z | from chatterbot import ChatBot
from chatterbot.trainers import ChatterBotCorpusTrainer
from settings import Microsoft
'''
See the Microsoft DirectLine api documentation for how to get a user access token.
https://docs.botframework.com/en-us/restapi/directline/
'''
chatbot = ChatBot(
'MicrosoftBot',
directline_host=Microsoft['directline_host'],
direct_line_token_or_secret=Microsoft['direct_line_token_or_secret'],
conversation_id=Microsoft['conversation_id'],
input_adapter='chatterbot.input.Microsoft',
output_adapter='chatterbot.output.Microsoft'
)
trainer = ChatterBotCorpusTrainer(chatbot)
trainer.train('chatterbot.corpus.english')
# The following loop will execute each time the user enters input
while True:
try:
response = chatbot.get_response('')
# Press ctrl-c or ctrl-d on the keyboard to exit
except (KeyboardInterrupt, EOFError, SystemExit):
break
| 29.709677 | 82 | 0.767644 |
4a1d7338a0849ee952ffd0e27930625b19586c52 | 640 | py | Python | LC/75.py | szhu3210/LeetCode_Solutions | 64747eb172c2ecb3c889830246f3282669516e10 | [
"MIT"
] | 2 | 2018-02-24T17:20:02.000Z | 2018-02-24T17:25:43.000Z | LC/75.py | szhu3210/LeetCode_Solutions | 64747eb172c2ecb3c889830246f3282669516e10 | [
"MIT"
] | null | null | null | LC/75.py | szhu3210/LeetCode_Solutions | 64747eb172c2ecb3c889830246f3282669516e10 | [
"MIT"
] | null | null | null | class Solution(object):
def sortColors(self, nums):
"""
:type nums: List[int]
:rtype: void Do not return anything, modify nums in-place instead.
"""
i=0
j=len(nums)-1
k=0
while k<=j and i<=j:
if nums[k]==0:
self.swap(nums,k,i)
i+=1
k+=1
continue
if nums[k]==1:
k+=1
continue
if nums[k]==2:
self.swap(nums,k,j)
j-=1
def swap(self, nums, i, j):
nums[i], nums[j] = nums[j], nums[i] | 26.666667 | 74 | 0.382813 |
4a1d7386904d0d3789f2b52760e30480de007438 | 1,870 | py | Python | parsl/tests/manual_tests/test_log_filter.py | cylondata/parsl | 00ff9372bd841dafef8a0b3566c79ffe68f0e367 | [
"Apache-2.0"
] | 323 | 2017-07-28T21:31:27.000Z | 2022-03-05T13:06:05.000Z | parsl/tests/manual_tests/test_log_filter.py | cylondata/parsl | 00ff9372bd841dafef8a0b3566c79ffe68f0e367 | [
"Apache-2.0"
] | 1,286 | 2017-06-01T16:50:00.000Z | 2022-03-31T16:45:14.000Z | parsl/tests/manual_tests/test_log_filter.py | cylondata/parsl | 00ff9372bd841dafef8a0b3566c79ffe68f0e367 | [
"Apache-2.0"
] | 113 | 2017-06-03T11:38:40.000Z | 2022-03-26T16:43:05.000Z | import argparse
import parsl
import logging
parsl.load()
from parsl import python_app
@python_app
def platform(sleep=10, stdout=None):
import platform
import time
time.sleep(sleep)
return platform.uname()
class SkipTasksFilter(logging.Filter):
def __init__(self, avoid_string):
self.avoid = avoid_string
def filter(self, record):
return self.avoid not in record.getMessage()
def test_platform(n=2):
# sync
logger = logging.getLogger("parsl.dataflow.dflow")
skip_tags = ['Task', 'dependencies']
for skip in skip_tags:
skip_filter = SkipTasksFilter(skip)
logger.addFilter(skip_filter)
x = platform(sleep=0)
print(x.result())
d = []
for i in range(0, n):
x = platform(sleep=5)
d.append(x)
print(set([i.result()for i in d]))
dfk = parsl.dfk()
dfk.cleanup()
with open("{}/parsl.log".format(dfk.run_dir)) as f:
for line in f.readlines():
if any(skip in line for skip in skip_tags):
raise Exception("Logline {} contains a skip tag".format(line))
return True
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-s", "--sitespec", default=None)
parser.add_argument("-c", "--count", default="10",
help="Count of apps to launch")
parser.add_argument("-d", "--debug", action='store_true',
help="Count of apps to launch")
args = parser.parse_args()
if args.sitespec:
c = None
try:
exec("import parsl; from {} import config".format(args.sitespec))
parsl.load(c)
except Exception:
print("Failed to load the requested config : ", args.sitespec)
exit(0)
if args.debug:
parsl.set_stream_logger()
x = test_platform()
| 23.974359 | 78 | 0.605882 |
4a1d73d6f7a7b7ec709be45e3c4a5dce7aadb4d0 | 4,816 | py | Python | mmocr/models/textrecog/recognizer/seg_recognizer.py | Whatsetsthisend/mmocr | 6444b3226a10162378b5ed3109991cc618e89fa4 | [
"Apache-2.0"
] | null | null | null | mmocr/models/textrecog/recognizer/seg_recognizer.py | Whatsetsthisend/mmocr | 6444b3226a10162378b5ed3109991cc618e89fa4 | [
"Apache-2.0"
] | null | null | null | mmocr/models/textrecog/recognizer/seg_recognizer.py | Whatsetsthisend/mmocr | 6444b3226a10162378b5ed3109991cc618e89fa4 | [
"Apache-2.0"
] | null | null | null | import numpy
import torch
from mmdet.models.builder import (DETECTORS, build_backbone, build_head,
build_loss, build_neck)
from mmocr.models.builder import build_convertor, build_preprocessor
from .base import BaseRecognizer
@DETECTORS.register_module()
class SegRecognizer(BaseRecognizer):
"""Base class for segmentation based recognizer."""
def __init__(self,
preprocessor=None,
backbone=None,
neck=None,
head=None,
loss=None,
label_convertor=None,
train_cfg=None,
test_cfg=None,
pretrained=None):
super().__init__()
# Label_convertor
assert label_convertor is not None
self.label_convertor = build_convertor(label_convertor)
# Preprocessor module, e.g., TPS
self.preprocessor = None
if preprocessor is not None:
self.preprocessor = build_preprocessor(preprocessor)
# Backbone
assert backbone is not None
self.backbone = build_backbone(backbone)
# Neck
assert neck is not None
self.neck = build_neck(neck)
# Head
assert head is not None
head.update(num_classes=self.label_convertor.num_classes())
self.head = build_head(head)
# Loss
assert loss is not None
self.loss = build_loss(loss)
self.train_cfg = train_cfg
self.test_cfg = test_cfg
self.init_weights(pretrained=pretrained)
def init_weights(self, pretrained=None):
"""Initialize the weights of recognizer."""
super().init_weights(pretrained)
if self.preprocessor is not None:
self.preprocessor.init_weights()
self.backbone.init_weights(pretrained=pretrained)
if self.neck is not None:
self.neck.init_weights()
self.head.init_weights()
def extract_feat(self, img):
"""Directly extract features from the backbone."""
if self.preprocessor is not None:
img = self.preprocessor(img)
x = self.backbone(img)
return x
def forward_train(self, img, gt_kernels=None):
"""
Args:
img (tensor): Input images of shape (N, C, H, W).
Typically these should be mean centered and std scaled.
img_metas (list[dict]): A list of image info dict where each dict
contains: 'img_shape', 'filename', and may also contain
'ori_shape', and 'img_norm_cfg'.
For details on the values of these keys see
:class:`mmdet.datasets.pipelines.Collect`.
Returns:
dict[str, tensor]: A dictionary of loss components.
"""
feats = self.extract_feat(img)
out_neck = self.neck(feats)
out_head = self.head(out_neck)
loss_inputs = (out_neck, out_head, gt_kernels)
losses = self.loss(*loss_inputs)
return losses
def simple_test(self, img, **kwargs):
"""Test function without test time augmentation.
Args:
imgs (torch.Tensor): Image input tensor.
img_metas (list[dict]): List of image information.
Returns:
list[str]: Text label result of each image.
"""
feat = self.extract_feat(img)
out_neck = self.neck(feat)
out_head = self.head(out_neck)
# texts, scores = self.label_convertor.tensor2str(out_head)
# scores = self.label_convertor.tensor2str(out_head)
# scores = torch.from_numpy(numpy.array(scores)).cuda()
# flatten batch results
# results = []
# for text, score in zip(texts, scores):
# results.append(dict(text=text, score=score))
return out_head
def merge_aug_results(self, aug_results):
out_text, out_score = '', -1
for result in aug_results:
text = result[0]['text']
score = sum(result[0]['score']) / max(1, len(text))
if score > out_score:
out_text = text
out_score = score
out_results = [dict(text=out_text, score=out_score)]
return out_results
def aug_test(self, imgs, **kwargs):
"""Test function with test time augmentation.
Args:
imgs (list[tensor]): Tensor should have shape NxCxHxW,
which contains all images in the batch.
img_metas (list[list[dict]]): The metadata of images.
"""
aug_results = []
for img, img_meta in zip(imgs, img_metas):
result = self.simple_test(img, **kwargs)
aug_results.append(result)
return self.merge_aug_results(aug_results)
| 30.481013 | 77 | 0.591985 |
4a1d7491ffd2ff892e716f213d2d700c912a41a3 | 5,159 | py | Python | dhcp_lease_db.py | OpenSwitchNOS/openswitch-ops-dhcp-tftp | 5d49e0d63296d9886912a84f031a811f413ba7fd | [
"Apache-2.0"
] | null | null | null | dhcp_lease_db.py | OpenSwitchNOS/openswitch-ops-dhcp-tftp | 5d49e0d63296d9886912a84f031a811f413ba7fd | [
"Apache-2.0"
] | null | null | null | dhcp_lease_db.py | OpenSwitchNOS/openswitch-ops-dhcp-tftp | 5d49e0d63296d9886912a84f031a811f413ba7fd | [
"Apache-2.0"
] | 1 | 2021-09-10T08:19:13.000Z | 2021-09-10T08:19:13.000Z | #!/usr/bin/python
# (C) Copyright 2015 Hewlett Packard Enterprise Development LP
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License..
import os
import sys
from time import sleep
import ovs.dirs
import ovs.db.idl
import ovs.vlog
vlog = ovs.vlog.Vlog("dhcp_lease_db")
# ovs definitions
idl = None
# OPS_TODO: Need to pull this from the build env
def_db = 'unix:/var/run/openvswitch/db.sock'
# OPS_TODO: Need to pull this from the build env
dhcp_lease_db_schema = '/usr/share/openvswitch/dhcp_leases.ovsschema'
# DHCP lease tabe names
DHCP_LEASES_TABLE = "DHCP_Lease"
# DHCP lease db column names
EXPIRY_TIME = "expiry_time"
MAC_ADDR = "mac_address"
IP_ADDR = "ip_address"
CLIENT_HOSTNAME = "client_hostname"
CLIENT_ID = "client_id"
class DHCPLeaseDB(object):
def __init__(self, location=None):
'''
Create a IDL connection to the DHCP lease DB and register all the
columns with schema helper.
'''
self.idl = None
self.txn = None
self.schema_helper = ovs.db.idl.SchemaHelper(
location=dhcp_lease_db_schema)
self.schema_helper.register_table(DHCP_LEASES_TABLE)
self.idl = ovs.db.idl.Idl(def_db, self.schema_helper)
self.expiry_time = None
self.mac_address = None
self.ip_address = None
self.client_hostname = None
self.client_id = None
while not self.idl.run():
sleep(.1)
def find_row_by_mac_addr(self, mac_addr):
'''
Walk through the rows in the dhcp lease table (if any)
looking for a row with mac addr passed in argument
If row is found, set variable tbl_found to True and return
the row object to caller function
'''
tbl_found = False
ovs_rec = None
for ovs_rec in self.idl.tables[DHCP_LEASES_TABLE].rows.itervalues():
if ovs_rec.mac_address == mac_addr:
tbl_found = True
break
return ovs_rec, tbl_found
def __set_column_value(self, row, entry):
if entry[EXPIRY_TIME] != None:
setattr(row, EXPIRY_TIME, entry[EXPIRY_TIME])
if entry[MAC_ADDR] != None:
setattr(row, MAC_ADDR, entry[MAC_ADDR])
if entry[IP_ADDR] != None:
setattr(row, IP_ADDR, entry[IP_ADDR])
if entry[CLIENT_HOSTNAME] != None:
setattr(row, CLIENT_HOSTNAME, entry[CLIENT_HOSTNAME])
if entry[CLIENT_ID] != None:
setattr(row, CLIENT_ID, entry[CLIENT_ID])
def insert_row(self, entry):
'''
Insert a new row in dhcp_lease_db and update the columns with
user configured values. Default values are used if user hasn't
configured any parameter.
'''
self.txn = ovs.db.idl.Transaction(self.idl)
row = self.txn.insert(self.idl.tables[DHCP_LEASES_TABLE])
self.__set_column_value(row, entry)
status = self.txn.commit_block()
return row, status
def update_row(self, mac_addr, entry):
'''
Update a DHCP row with latest modified values.
'''
self.txn = ovs.db.idl.Transaction(self.idl)
row, row_found = self.find_row_by_mac_addr(mac_addr)
if row_found:
self.__set_column_value(row, entry)
status = self.txn.commit_block()
else:
row, status = self.insert_row(entry)
return row, status
def delete_row(self, mac_addr):
'''
Delete a specific row from dhcp_lease_db based on
mac addr passed as argument.
If specified row is found, variable row_found
is updated to True and delete status is returned.
'''
self.txn = ovs.db.idl.Transaction(self.idl)
row, row_found = self.find_row_by_mac_addr(mac_addr)
status = ovs.db.idl.Transaction.UNCHANGED
if row_found:
row.delete()
status = self.txn.commit_block()
return row_found, status
def clear_db(self):
'''
Delete a all rows from dhcp_lease_db
'''
ovs_rec = None
row_deleted = False
self.txn = ovs.db.idl.Transaction(self.idl)
status = ovs.db.idl.Transaction.UNCHANGED
while True:
for ovs_rec in self.idl.tables[DHCP_LEASES_TABLE].rows.itervalues():
ovs_rec.delete()
row_deleted = True
break
else:
break
if row_deleted == True:
status = self.txn.commit_block()
return status
def close(self):
self.idl.close()
| 28.983146 | 80 | 0.631712 |
4a1d74bc7e385280d68887c4c24f7ce7de1504b0 | 9,989 | py | Python | ban_appeals/ban_appeals.py | python-discord/modmail-plugins | eba5a329fae3ee16f0efab991fbd5ad1854186dc | [
"MIT"
] | 1 | 2021-08-03T04:46:07.000Z | 2021-08-03T04:46:07.000Z | ban_appeals/ban_appeals.py | python-discord/modmail-plugins | eba5a329fae3ee16f0efab991fbd5ad1854186dc | [
"MIT"
] | 7 | 2021-09-23T09:26:31.000Z | 2022-03-24T08:58:06.000Z | ban_appeals/ban_appeals.py | python-discord/modmail-plugins | eba5a329fae3ee16f0efab991fbd5ad1854186dc | [
"MIT"
] | 2 | 2021-08-09T03:39:19.000Z | 2022-03-10T06:06:53.000Z | import typing as t
import discord
from discord.ext import commands
from bot import ModmailBot
from core import checks
from core.models import PermissionLevel, getLogger
from core.thread import Thread
from .utils import async_tasks, get_or_fetch
PYDIS_NO_KICK_ROLE_IDS = (
267627879762755584, # Owners in PyDis
409416496733880320, # DevOps in PyDis
)
APPEAL_NO_KICK_ROLE_ID = 890270873813139507 # Staff in appeals server
APPEAL_GUILD_ID = 890261951979061298
BAN_APPEAL_MESSAGE = (
"Please be patient, it may take a while for us to respond to ban appeals.\n\n"
"To ensure we can respond to your appeal, make sure you keep your DMs "
"open and do not block the bot."
)
log = getLogger(__name__)
class BanAppeals(commands.Cog):
"""A plugin to manage threads from a separate ban appeal server."""
def __init__(self, bot: ModmailBot):
self.bot = bot
self.pydis_guild: t.Optional[discord.Guild] = None
self.appeal_categories: list[discord.CategoryChannel] = []
self.appeals_guild: t.Optional[discord.Guild] = None
self.logs_channel: t.Optional[discord.TextChannel] = None
self.db = self.bot.plugin_db.get_partition(self)
self.init_task = async_tasks.create_task(self.init_plugin(), self.bot.loop)
async def init_plugin(self) -> None:
"""Initialise the plugin's configuration."""
self.pydis_guild = self.bot.guild
self.appeals_guild = self.bot.get_guild(APPEAL_GUILD_ID)
db_categories = await self.db.find_one({"_id": "ban-appeal-categories"})
db_categories = db_categories or {}
self.appeal_categories = db_categories.get("categories", [])
log.info("Loaded %s appeal categories", len(self.appeal_categories))
self.logs_channel = discord.utils.get(self.appeals_guild.channels, name="logs")
log.info("Plugin loaded, checking if there are people to kick.")
await self._sync_kicks()
async def _sync_kicks(self) -> None:
"""Iter through all members in appeals guild, kick them if they meet criteria."""
for member in self.appeals_guild.members:
await self._maybe_kick_user(member)
async def _maybe_kick_user(self, member: discord.Member) -> None:
"""Kick members joining appeals if they are not banned, and not part of the bypass list."""
if member.bot:
return
if not await self._is_banned_pydis(member):
pydis_member = await get_or_fetch.get_or_fetch_member(self.pydis_guild, member.id)
if pydis_member and (
any(role.id in PYDIS_NO_KICK_ROLE_IDS for role in pydis_member.roles)
or APPEAL_NO_KICK_ROLE_ID in (role.id for role in member.roles)
):
log.info("Not kicking %s (%d) as they have a bypass role", member, member.id)
return
try:
await member.kick(reason="Not banned in main server")
except discord.Forbidden:
log.error("Failed to kick %s (%d) due to insufficient permissions.", member, member.id)
else:
await self.logs_channel.send(
f"Kicked {member} ({member.id}) on join as they're not banned in main server."
)
log.info("Kicked %s (%d).", member, member.id)
async def _is_banned_pydis(self, member: discord.Member) -> bool:
"""See if the given member is banned in PyDis."""
try:
await self.pydis_guild.fetch_ban(member)
except discord.errors.NotFound:
return False
return True
@commands.Cog.listener()
async def on_member_join(self, member: discord.Member) -> None:
"""
Kick members who cannot appeal and notify for rejoins.
Members who join the appeal server but are in the main server
are kicked.
If a member rejoins while appealing, it's notified in their
thread.
"""
await self.init_task
if member.guild == self.pydis_guild:
# Join event from PyDis
# Kick them from appeals guild now they're back in PyDis
appeals_member = await get_or_fetch.get_or_fetch_member(self.appeals_guild, member.id)
if appeals_member:
await appeals_member.kick(reason="Rejoined PyDis")
await self.logs_channel.send(f"Kicked {member} ({member.id}) as they rejoined PyDis.")
log.info("Kicked %s (%d) as they rejoined PyDis.", member, member.id)
elif member.guild == self.appeals_guild:
# Join event from the appeals server
# Kick them if they are not banned and not part of the bypass list
# otherwise notify that they rejoined while appealing.
await self._maybe_kick_user(member)
thread = await self.bot.threads.find(recipient=member)
if not thread:
return
embed = discord.Embed(description="The recipient has joined the appeals server.", color=self.bot.mod_color)
await thread.channel.send(embed=embed)
@checks.has_permissions(PermissionLevel.SUPPORTER)
@commands.group(invoke_without_command=True, aliases=("appeal_category",))
async def appeal_category_management(self, ctx: commands.Context) -> None:
"""Group of commands for managing appeal categories."""
await ctx.send_help(ctx.command)
@checks.has_permissions(PermissionLevel.SUPPORTER)
@appeal_category_management.command(name="get")
async def get_categories(self, ctx: commands.Context) -> None:
"""Get the list of appeal categories of commands for managing appeal categories."""
await self.init_task
category_str = ", ".join(map(str, self.appeal_categories)) if self.appeal_categories else "None"
await ctx.send(f"Currently configured appeal categories are: {category_str}")
@checks.has_permissions(PermissionLevel.OWNER)
@appeal_category_management.command(name="add")
async def add_category(self, ctx: commands.Context, appeal_category: discord.CategoryChannel) -> None:
"""Add a category to the list of ignored categories."""
await self.init_task
if appeal_category.id in self.appeal_categories:
await ctx.send(f":x: {appeal_category} already in the appeal category list.")
return
self.appeal_categories.append(appeal_category.id)
await self.db.find_one_and_update(
{"_id": "ban-appeal-categories"},
{"$addToSet": {"categories": appeal_category.id}},
upsert=True,
)
await ctx.send(f":+1: Added {appeal_category} to the available appeal categories.")
@checks.has_permissions(PermissionLevel.OWNER)
@appeal_category_management.command(name="delete", aliases=("remove", "del", "rem"))
async def del_category(self, ctx: commands.Context, category_to_remove: discord.CategoryChannel) -> None:
"""Remove a category from the list of appeal categories."""
await self.init_task
if category_to_remove.id not in self.appeal_categories:
await ctx.send(f":x: {category_to_remove} isn't in the appeal categories list.")
return
self.appeal_categories.remove(category_to_remove.id)
await self.db.find_one_and_update(
{"_id": "ping-delay-config"},
{"$pull": {"ignored_categories": category_to_remove.id}},
)
await ctx.send(f":+1: Removed {category_to_remove} from the appeal categories list.")
async def get_useable_appeal_category(self) -> t.Optional[discord.CategoryChannel]:
"""Get a useable (non-full) appeal category from the db, create a new one if needed."""
for category_id in self.appeal_categories:
category = await get_or_fetch.get_or_fetch_channel(self.pydis_guild, category_id)
if len(category.channels) < 50:
return category
return None
@commands.Cog.listener()
async def on_thread_ready(self, thread: Thread, *args) -> None:
"""If the new thread is for an appeal, move it to the appeals category."""
await self.init_task
if await self._is_banned_pydis(thread.recipient):
category = await self.get_useable_appeal_category()
if category:
await thread.channel.edit(category=category, sync_permissions=True)
else:
await thread.channel.send("ERROR! Could not move thread to an appeal category as they're all full!")
embed = discord.Embed(
colour=self.bot.mod_color,
description=BAN_APPEAL_MESSAGE,
timestamp=thread.channel.created_at,
)
recipient_thread_close = self.bot.config.get("recipient_thread_close")
if recipient_thread_close:
footer = self.bot.config["thread_self_closable_creation_footer"]
else:
footer = self.bot.config["thread_creation_footer"]
embed.set_footer(text=footer, icon_url=self.bot.guild.icon_url)
embed.title = "Ban appeal"
await thread.recipient.send(embed=embed)
@commands.Cog.listener()
async def on_member_remove(self, member: discord.Member) -> None:
"""
Notify if a member who is appealing leaves the appeals guild.
An embed is sent in the thread once they leave.
"""
await self.init_task
if not member.guild == self.appeals_guild:
return
thread = await self.bot.threads.find(recipient=member)
if not thread:
return
embed = discord.Embed(description="The recipient has left the appeals server.", color=self.bot.error_color)
await thread.channel.send(embed=embed)
def setup(bot: ModmailBot) -> None:
"""Add the BanAppeals cog."""
bot.add_cog(BanAppeals(bot))
| 41.620833 | 119 | 0.655421 |
4a1d763e4307f68f3a2afab1090f869102c91742 | 549 | py | Python | nipype/interfaces/tests/test_auto_AssertEqual.py | Conxz/nipype | 1281723ae56eacd103597ff4081a205583706e62 | [
"Apache-2.0"
] | null | null | null | nipype/interfaces/tests/test_auto_AssertEqual.py | Conxz/nipype | 1281723ae56eacd103597ff4081a205583706e62 | [
"Apache-2.0"
] | 2 | 2017-10-05T21:08:38.000Z | 2018-10-09T23:01:23.000Z | nipype/interfaces/tests/test_auto_AssertEqual.py | Conxz/nipype | 1281723ae56eacd103597ff4081a205583706e62 | [
"Apache-2.0"
] | 1 | 2016-10-11T19:18:53.000Z | 2016-10-11T19:18:53.000Z | # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from ...testing import assert_equal
from ..utility import AssertEqual
def test_AssertEqual_inputs():
input_map = dict(ignore_exception=dict(nohash=True,
usedefault=True,
),
volume1=dict(mandatory=True,
),
volume2=dict(mandatory=True,
),
)
inputs = AssertEqual.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
yield assert_equal, getattr(inputs.traits()[key], metakey), value
| 26.142857 | 77 | 0.68306 |
4a1d764a98da0ce15d3eaa5faa978cf1038eb79c | 15,836 | py | Python | templates/eman2/eman2_process.py | kmshin1397/ETSimulations | 7b69710cf519900c3ae34480ccf0ef3bba5db8b2 | [
"MIT"
] | null | null | null | templates/eman2/eman2_process.py | kmshin1397/ETSimulations | 7b69710cf519900c3ae34480ccf0ef3bba5db8b2 | [
"MIT"
] | 4 | 2020-02-07T23:23:44.000Z | 2021-07-17T19:56:33.000Z | templates/eman2/eman2_process.py | kmshin1397/ETSimulations | 7b69710cf519900c3ae34480ccf0ef3bba5db8b2 | [
"MIT"
] | null | null | null | """
This script runs a series of EMAN2 processing steps in the order specified in the steps_to_run
variable. The steps_to_run and other parameters for various steps will be filled in dynamically by
the eman2_processor.py module based on the configurations file provided to ets_process_data.py by
the user.
Note: Python3 is required to run this script.
"""
import os
import subprocess
import re
import shlex
import numpy as np
import json
from scipy.spatial.transform import Rotation as R
# ==================== Input parameters ====================
# General parameters
eman2_root = ""
raw_data_dir = ""
name = ""
steps_to_run = []
# Particle picking parameters
particle_coordinates_parameters = {}
e2import_parameters = {}
e2tomogram_parameters = {}
e2spt_tomoctf_parameters = {}
e2spt_extract_parameters = {}
e2spt_buildsets_parameters = {}
e2spt_sgd_parameters = {}
e2spt_refine_parameters = {}
# ==========================================================
def run_process_with_params(
base_command, params_dict, get_command_without_running=False, get_output=False
):
"""Helper function to run a given command line command, used to invoke various EMAN2 programs.
Command line arguments to the base command can be passed in as a dictionary of key, value pairs.
Arguments that do not have a value (i.e --help for many programs) should instead be passed in
with the special value of 'enabled' for that key.
Args:
base_command: The base command to run, i.e. e2tomogram.py
params_dict: A dictionary of input arguments to the command
get_command_without_running: Option to return the assembled full command without actually
running it
get_output: Return the first output line instead of the return code
"""
for arg, value in params_dict.items():
if value == "enable":
base_command += " --%s" % arg
else:
base_command += " --%s=%s" % (arg, str(value))
if get_command_without_running:
return base_command
else:
print("Running command: ")
print(base_command)
process = subprocess.Popen(shlex.split(base_command), stdout=subprocess.PIPE)
while True:
output = os.fsdecode(process.stdout.readline())
if output == "" and process.poll() is not None:
break
if output:
if get_output:
return output.strip()
print(output.strip())
rc = process.poll()
return rc
def rotate_positions_around_z(positions, sign):
"""
Given a list of coordinates, rotate them all by 90 degrees around the z-axis. This is used to
convert particle coordinates from the raw tiltseries to the final reconstruction's
coordinate system for simulated data.
Args:
positions: A list of [x, y, z] coordinates
sign: -1 or 1, determining whether the rotation is -90 or 90 degrees
Returns: A list of [x, y, z] coordinates
"""
rot = R.from_euler("zxz", (sign * 90, 0, 0), degrees=True)
for i, point in enumerate(positions):
positions[i] = np.dot(rot.as_matrix(), np.array(point))
return positions
def invert_z_coordinates(positions):
"""
Given a list of coordinates, invert the z coordinates. This is used when
converting particle coordinates from the raw tiltseries to the final reconstruction's
coordinate system for simulated data.
Args:
positions: A list of [x, y, z] coordinates
Returns: A list of [x, y, z] coordinates
"""
for i, point in enumerate(positions):
point[2] = -1 * point[2]
positions[i] = point
return positions
def detect_eman_version():
command = "e2version.py"
result = run_process_with_params(command, {}, get_output=True)
match = re.search("EMAN [0-9].[0-9]+", result)
if match is None:
print("Unable to parse EMAN version")
exit(1)
else:
return float(match.group(0).split(" ")[1])
# ==================== Processing steps ====================
def import_tiltseries(get_command_without_running=False):
"""Run the e2import.py program to import tilt stacks"""
results = ""
# Scan everything in the raw data folder
for dir_entry in os.scandir(raw_data_dir):
# For every directory found which begins with the proper project name, i.e. assumed to
# contain a raw stack
if dir_entry.is_dir() and dir_entry.name.startswith(name):
stack_basename = dir_entry.name
stack_to_import = dir_entry.path + "/%s.mrc" % stack_basename
base_command = "e2import.py %s" % stack_to_import
result = run_process_with_params(
base_command, e2import_parameters, get_command_without_running
)
if not get_command_without_running and result != 0:
print("Error with import tiltseries, exiting...")
exit(1)
else:
# If we're returning the commands, append with newline
if get_command_without_running:
results += result + "\n"
# Otherwise, we check for errors
else:
if result != 0:
results = result
else:
results = 0
return results
def reconstruct_tomograms(get_command_without_running=False):
"""Run the e2tomogram.py program to reconstruct tomograms"""
# If we haven't imported stacks yet, we don't know the exact reconstruction command
if get_command_without_running and not os.path.exists(
os.path.join(eman2_root, "tiltseries")
):
# Just get the first tiltseries name
for dir_entry in os.scandir(raw_data_dir):
# For every directory found which begins with the proper project name, i.e. assumed to
# contain a raw stack
if dir_entry.is_dir() and dir_entry.name.startswith(name):
stack_basename = dir_entry.name
break
command = "e2tomogram.py tiltseries/%s.hdf" % stack_basename
result = run_process_with_params(
command, e2tomogram_parameters, get_command_without_running
)
return result
else:
results = ""
# Iterate through each tiltseries
for tiltseries in os.scandir(os.path.join(eman2_root, "tiltseries")):
command = "e2tomogram.py %s" % ("tiltseries/" + tiltseries.name)
result = run_process_with_params(
command, e2tomogram_parameters, get_command_without_running
)
if not get_command_without_running and result != 0:
print("Error with reconstructing tomograms, exiting...")
exit(1)
else:
# If we're returning the commands, append with newline
if get_command_without_running:
results += result + "\n"
# Otherwise, we check for errors
else:
if result != 0:
results = result
else:
results = 0
return results
def estimate_ctf(get_command_without_running=False):
"""Run the e2spt_tomoctf.py program to estimate CTF for the tomograms"""
command = "e2spt_tomoctf.py"
result = run_process_with_params(
command, e2spt_tomoctf_parameters, get_command_without_running
)
if not get_command_without_running and result != 0:
print("Error with estimating CTF values, exiting...")
exit(1)
else:
return result
def record_eman2_particle(particles_array, info_file, particle_name, boxsize):
"""Write out particle coordinates to a EMAN2 tomogram info JSON file
Args:
particles_array: A list/numpy array of particle coordinates
info_file: The JSON file in the info directory of the EMAN2 project folder
corresponding to the tomogram in question
particle_name: The name to assign to the particle within the EMAN2 project
boxsize: The EMAN2 box size (as seen in the EMAN2 box picker) to use for the particles.
Returns: None
"""
# If there was only one model point
if particles_array.ndim == 1:
# Wrap in a new list to make it two-dimensional so next for loop will work
particles_array = [particles_array]
with open(info_file, "r") as f:
tomogram_info = json.load(f)
# Build up boxes
boxes = []
for particle in particles_array:
x, y, z = particle[0], particle[1], particle[2]
box = [x, y, z]
box.extend(["manual", 0.0, 0])
boxes.append(box)
tomogram_info["boxes_3d"] = boxes
tomogram_info["class_list"] = {"0": {"boxsize": boxsize, "name": particle_name}}
with open(info_file, "w") as f:
json.dump(tomogram_info, f, indent=4)
def extract_particles(get_command_without_running=False):
"""Run the e2spt_extract.py program to extract subvolumes after writing out the particle
coordinates to the EMAN2 info files
"""
# Record particles
if not get_command_without_running:
mode = ""
if "mode" in particle_coordinates_parameters:
mode = particle_coordinates_parameters["mode"]
else:
print("Error - Missing 'mode' parameter in particle_coordinates_parameters")
exit(1)
coordinates_file = ""
if "coordinates_file" in particle_coordinates_parameters:
coordinates_file = particle_coordinates_parameters["coordinates_file"]
elif mode != "sim":
print(
"Error - Missing 'coordinates_file' parameter in particle_coordinates_parameters"
)
exit(1)
unbinned_boxsize = 64
if "unbinned_boxsize" in particle_coordinates_parameters:
unbinned_boxsize = particle_coordinates_parameters["unbinned_boxsize"]
if mode == "single":
info_files = eman2_root + "/info"
for f in os.listdir(info_files):
info_file = os.fsdecode(f)
if info_file.startswith(name):
particles = np.loadtxt(coordinates_file)
record_eman2_particle(
particles, info_files + "/" + info_file, name, unbinned_boxsize
)
elif mode == "multiple":
for subdir in os.listdir(raw_data_dir):
if subdir.startswith(name):
coordinates = os.path.join(raw_data_dir, subdir, coordinates_file)
info_file = os.path.join(
eman2_root, "info", "%s_info.json" % subdir
)
particles = np.loadtxt(coordinates)
record_eman2_particle(particles, info_file, name, unbinned_boxsize)
elif mode == "sim":
root_dir = os.path.dirname(raw_data_dir)
metadata_file = os.path.join(root_dir, "sim_metadata.json")
with open(metadata_file, "r") as f:
metadata = json.loads(f.read())
for num, tomogram in enumerate(metadata):
basename = "%s_%d" % (name, tomogram["global_stack_no"])
# Positions for TEM-Simulator are in nm, need to convert to pixels
positions = np.array(tomogram["positions"]) / tomogram["apix"]
# During reconstruction, there are rotations imposed by e2tomogram.py based on EMAN version,
# so correct for that with the positions
version = detect_eman_version()
if version >= 2.9:
positions = rotate_positions_around_z(positions, 1)
positions = invert_z_coordinates(positions)
else:
positions = rotate_positions_around_z(positions, -1)
info_file = os.path.join(
eman2_root, "info", "%s_info.json" % basename
)
record_eman2_particle(positions, info_file, name, unbinned_boxsize)
else:
print(
"Error - Invalid 'mode' for particle coordinates. Should be 'single' or "
"'multiple' or 'sim'"
)
exit(1)
# Extract particles
base_command = "e2spt_extract.py --label=%s" % name
result = run_process_with_params(
base_command, e2spt_extract_parameters, get_command_without_running
)
if not get_command_without_running and result != 0:
print("Error with extracting particles, exiting...")
exit(1)
else:
return result
def make_particle_set(get_command_without_running=False):
"""Run the e2spt_buildsets.py program to create a list of particles for averaging"""
# Build set
base_command = "e2spt_buildsets.py --label=%s" % name
result = run_process_with_params(
base_command, e2spt_buildsets_parameters, get_command_without_running
)
if not get_command_without_running and result != 0:
print("Error with building the particle set, exiting...")
exit(1)
else:
return result
def make_initial_model(get_command_without_running=False):
"""Run the e2spt_sgd program to automatically generate an initial reference for averaging"""
base_command = "e2spt_sgd.py sets/%s.lst" % name
result = run_process_with_params(
base_command, e2spt_sgd_parameters, get_command_without_running
)
if not get_command_without_running and result != 0:
print("Error with generating the initial model, exiting...")
exit(1)
else:
return result
def run_refinement(get_command_without_running=False):
"""Run the e2spt_refine.py program to do sub-tomogram refinement"""
particle_set_file = "sets/%s.lst" % name
reference_file = "sptsgd_00/output.hdf"
base_command = "e2spt_refine.py %s --reference=%s" % (
particle_set_file,
reference_file,
)
result = run_process_with_params(
base_command, e2spt_refine_parameters, get_command_without_running
)
if not get_command_without_running and result != 0:
print("Error with the 3D refinement, exiting...")
exit(1)
else:
return result
# ==========================================================
# ==================== Main process ====================
# This table maps the keyword for each processing step to the functions that implement the actions
# for them.
functions_table = {
"import": import_tiltseries,
"reconstruct": reconstruct_tomograms,
"estimate_ctf": estimate_ctf,
"extract": extract_particles,
"build_set": make_particle_set,
"generate_initial_model": make_initial_model,
"3d_refinement": run_refinement,
}
def collect_and_output_commands(output_file):
commands = []
for step in functions_table:
function = functions_table[step]
command = function(get_command_without_running=True)
command += "\n"
commands.append(command)
with open(output_file, "w") as f:
f.writelines(commands)
def main():
# To start, go into EMAN2 project directory
os.chdir(eman2_root)
for step in steps_to_run:
print("=============================================")
print("Running step: %s" % step)
if step in functions_table:
function = functions_table[step]
function()
else:
print("ERROR: %s is not a valid EMAN2 processing step to run" % step)
exit(1)
if __name__ == "__main__":
main()
| 35.666667 | 112 | 0.617391 |
4a1d766858daa771a9add3959440f098aed3facb | 12,832 | py | Python | espnet/nets/pytorch_backend/transformer/attention.py | victor45664/espnet | 0ccacc32d25feddec5270cb3f8e08c24183755d8 | [
"Apache-2.0"
] | 4 | 2021-12-24T03:27:46.000Z | 2022-02-26T13:21:09.000Z | espnet/nets/pytorch_backend/transformer/attention.py | victor45664/espnet | 0ccacc32d25feddec5270cb3f8e08c24183755d8 | [
"Apache-2.0"
] | null | null | null | espnet/nets/pytorch_backend/transformer/attention.py | victor45664/espnet | 0ccacc32d25feddec5270cb3f8e08c24183755d8 | [
"Apache-2.0"
] | 2 | 2021-12-24T03:33:01.000Z | 2022-02-26T03:54:17.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2019 Shigeki Karita
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Multi-Head Attention layer definition."""
import math
import numpy
import torch
from torch import nn
class MultiHeadedAttention(nn.Module):
"""Multi-Head Attention layer.
Args:
n_head (int): The number of heads.
n_feat (int): The number of features.
dropout_rate (float): Dropout rate.
"""
def __init__(self, n_head, n_feat, dropout_rate):
"""Construct an MultiHeadedAttention object."""
super(MultiHeadedAttention, self).__init__()
assert n_feat % n_head == 0
# We assume d_v always equals d_k
self.d_k = n_feat // n_head
self.h = n_head
self.n_feat=n_feat
self.linear_q = nn.Linear(n_feat, n_feat)
self.linear_k = nn.Linear(n_feat, n_feat)
self.linear_v = nn.Linear(n_feat, n_feat)
self.linear_out = nn.Linear(n_feat, n_feat)
self.attn = None
self.dropout = nn.Dropout(p=dropout_rate)
def forward_qkv(self, query, key, value):
"""Transform query, key and value.
Args:
query (torch.Tensor): Query tensor (#batch, time1, size).
key (torch.Tensor): Key tensor (#batch, time2, size).
value (torch.Tensor): Value tensor (#batch, time2, size).
Returns:
torch.Tensor: Transformed query tensor (#batch, n_head, time1, d_k).
torch.Tensor: Transformed key tensor (#batch, n_head, time2, d_k).
torch.Tensor: Transformed value tensor (#batch, n_head, time2, d_k).
"""
n_batch = query.size(0)
q = self.linear_q(query).view(n_batch, -1, self.h, self.d_k)
k = self.linear_k(key).view(n_batch, -1, self.h, self.d_k)
v = self.linear_v(value).view(n_batch, -1, self.h, self.d_k)
q = q.transpose(1, 2) # (batch, head, time1, d_k)
k = k.transpose(1, 2) # (batch, head, time2, d_k)
v = v.transpose(1, 2) # (batch, head, time2, d_k)
return q, k, v
def forward_attention(self, value, scores, mask):
"""Compute attention context vector.
Args:
value (torch.Tensor): Transformed value (#batch, n_head, time2, d_k).
scores (torch.Tensor): Attention score (#batch, n_head, time1, time2).
mask (torch.Tensor): Mask (#batch, 1, time2) or (#batch, time1, time2).
Returns:
torch.Tensor: Transformed value (#batch, time1, d_model)
weighted by the attention score (#batch, time1, time2).
"""
n_batch = value.size(0)
if mask is not None:
mask = mask.unsqueeze(1).eq(0) # (batch, 1, *, time2)
min_value = float(
numpy.finfo(torch.tensor(0, dtype=scores.dtype).numpy().dtype).min
)
scores = scores.masked_fill(mask, min_value)
self.attn = torch.softmax(scores, dim=-1).masked_fill(
mask, 0.0
) # (batch, head, time1, time2)
else:
self.attn = torch.softmax(scores, dim=-1) # (batch, head, time1, time2)
p_attn = self.dropout(self.attn)
x = torch.matmul(p_attn, value) # (batch, head, time1, d_k)
x = (
x.transpose(1, 2).contiguous().view(n_batch, -1, self.h * self.d_k)
) # (batch, time1, d_model)
return self.linear_out(x) # (batch, time1, d_model)
def forward(self, query, key, value, mask):
"""Compute scaled dot product attention.
Args:
query (torch.Tensor): Query tensor (#batch, time1, size).
key (torch.Tensor): Key tensor (#batch, time2, size).
value (torch.Tensor): Value tensor (#batch, time2, size).
mask (torch.Tensor): Mask tensor (#batch, 1, time2) or
(#batch, time1, time2).
Returns:
torch.Tensor: Output tensor (#batch, time1, d_model).
"""
q, k, v = self.forward_qkv(query, key, value)
scores = torch.matmul(q, k.transpose(-2, -1)) / math.sqrt(self.d_k)
return self.forward_attention(v, scores, mask)
def init_ilme(self,args):
#ilme
if args["ilmetype"]=="nacl":
from espnet2.asr.ilme.ilmenet import NACL
self.ilme=NACL(self.n_feat)
elif args["ilmetype"]=="acl":
from espnet2.asr.ilme.ilmenet import ACL
self.ilme=ACL(self.n_feat,args["acllayers"],args["aclactivations"],self.n_feat)
self.ilme_parameter = list(self.ilme.parameters())
def forward_ilm(self, query):
"""Compute scaled dot product attention.
Args:
query (torch.Tensor): Query tensor (#batch, time1, size).
key (torch.Tensor): Key tensor (#batch, time2, size).
value (torch.Tensor): Value tensor (#batch, time2, size).
mask (torch.Tensor): Mask tensor (#batch, 1, time2) or
(#batch, time1, time2).
Returns:
torch.Tensor: Output tensor (#batch, time1, d_model).
"""
q = self.linear_q(query)
ctx=self.ilme(q,-1) #transformer ilme doesn't support mini lstm
return self.linear_out(ctx)
class LegacyRelPositionMultiHeadedAttention(MultiHeadedAttention):
"""Multi-Head Attention layer with relative position encoding (old version).
Details can be found in https://github.com/espnet/espnet/pull/2816.
Paper: https://arxiv.org/abs/1901.02860
Args:
n_head (int): The number of heads.
n_feat (int): The number of features.
dropout_rate (float): Dropout rate.
zero_triu (bool): Whether to zero the upper triangular part of attention matrix.
"""
def __init__(self, n_head, n_feat, dropout_rate, zero_triu=False):
"""Construct an RelPositionMultiHeadedAttention object."""
super().__init__(n_head, n_feat, dropout_rate)
self.zero_triu = zero_triu
# linear transformation for positional encoding
self.linear_pos = nn.Linear(n_feat, n_feat, bias=False)
# these two learnable bias are used in matrix c and matrix d
# as described in https://arxiv.org/abs/1901.02860 Section 3.3
self.pos_bias_u = nn.Parameter(torch.Tensor(self.h, self.d_k))
self.pos_bias_v = nn.Parameter(torch.Tensor(self.h, self.d_k))
torch.nn.init.xavier_uniform_(self.pos_bias_u)
torch.nn.init.xavier_uniform_(self.pos_bias_v)
def rel_shift(self, x):
"""Compute relative positional encoding.
Args:
x (torch.Tensor): Input tensor (batch, head, time1, time2).
Returns:
torch.Tensor: Output tensor.
"""
zero_pad = torch.zeros((*x.size()[:3], 1), device=x.device, dtype=x.dtype)
x_padded = torch.cat([zero_pad, x], dim=-1)
x_padded = x_padded.view(*x.size()[:2], x.size(3) + 1, x.size(2))
x = x_padded[:, :, 1:].view_as(x)
if self.zero_triu:
ones = torch.ones((x.size(2), x.size(3)))
x = x * torch.tril(ones, x.size(3) - x.size(2))[None, None, :, :]
return x
def forward(self, query, key, value, pos_emb, mask):
"""Compute 'Scaled Dot Product Attention' with rel. positional encoding.
Args:
query (torch.Tensor): Query tensor (#batch, time1, size).
key (torch.Tensor): Key tensor (#batch, time2, size).
value (torch.Tensor): Value tensor (#batch, time2, size).
pos_emb (torch.Tensor): Positional embedding tensor (#batch, time1, size).
mask (torch.Tensor): Mask tensor (#batch, 1, time2) or
(#batch, time1, time2).
Returns:
torch.Tensor: Output tensor (#batch, time1, d_model).
"""
q, k, v = self.forward_qkv(query, key, value)
q = q.transpose(1, 2) # (batch, time1, head, d_k)
n_batch_pos = pos_emb.size(0)
p = self.linear_pos(pos_emb).view(n_batch_pos, -1, self.h, self.d_k)
p = p.transpose(1, 2) # (batch, head, time1, d_k)
# (batch, head, time1, d_k)
q_with_bias_u = (q + self.pos_bias_u).transpose(1, 2)
# (batch, head, time1, d_k)
q_with_bias_v = (q + self.pos_bias_v).transpose(1, 2)
# compute attention score
# first compute matrix a and matrix c
# as described in https://arxiv.org/abs/1901.02860 Section 3.3
# (batch, head, time1, time2)
matrix_ac = torch.matmul(q_with_bias_u, k.transpose(-2, -1))
# compute matrix b and matrix d
# (batch, head, time1, time1)
matrix_bd = torch.matmul(q_with_bias_v, p.transpose(-2, -1))
matrix_bd = self.rel_shift(matrix_bd)
scores = (matrix_ac + matrix_bd) / math.sqrt(
self.d_k
) # (batch, head, time1, time2)
return self.forward_attention(v, scores, mask)
class RelPositionMultiHeadedAttention(MultiHeadedAttention):
"""Multi-Head Attention layer with relative position encoding (new implementation).
Details can be found in https://github.com/espnet/espnet/pull/2816.
Paper: https://arxiv.org/abs/1901.02860
Args:
n_head (int): The number of heads.
n_feat (int): The number of features.
dropout_rate (float): Dropout rate.
zero_triu (bool): Whether to zero the upper triangular part of attention matrix.
"""
def __init__(self, n_head, n_feat, dropout_rate, zero_triu=False):
"""Construct an RelPositionMultiHeadedAttention object."""
super().__init__(n_head, n_feat, dropout_rate)
self.zero_triu = zero_triu
# linear transformation for positional encoding
self.linear_pos = nn.Linear(n_feat, n_feat, bias=False)
# these two learnable bias are used in matrix c and matrix d
# as described in https://arxiv.org/abs/1901.02860 Section 3.3
self.pos_bias_u = nn.Parameter(torch.Tensor(self.h, self.d_k))
self.pos_bias_v = nn.Parameter(torch.Tensor(self.h, self.d_k))
torch.nn.init.xavier_uniform_(self.pos_bias_u)
torch.nn.init.xavier_uniform_(self.pos_bias_v)
def rel_shift(self, x):
"""Compute relative positional encoding.
Args:
x (torch.Tensor): Input tensor (batch, head, time1, 2*time1-1).
time1 means the length of query vector.
Returns:
torch.Tensor: Output tensor.
"""
zero_pad = torch.zeros((*x.size()[:3], 1), device=x.device, dtype=x.dtype)
x_padded = torch.cat([zero_pad, x], dim=-1)
x_padded = x_padded.view(*x.size()[:2], x.size(3) + 1, x.size(2))
x = x_padded[:, :, 1:].view_as(x)[
:, :, :, : x.size(-1) // 2 + 1
] # only keep the positions from 0 to time2
if self.zero_triu:
ones = torch.ones((x.size(2), x.size(3)), device=x.device)
x = x * torch.tril(ones, x.size(3) - x.size(2))[None, None, :, :]
return x
def forward(self, query, key, value, pos_emb, mask):
"""Compute 'Scaled Dot Product Attention' with rel. positional encoding.
Args:
query (torch.Tensor): Query tensor (#batch, time1, size).
key (torch.Tensor): Key tensor (#batch, time2, size).
value (torch.Tensor): Value tensor (#batch, time2, size).
pos_emb (torch.Tensor): Positional embedding tensor
(#batch, 2*time1-1, size).
mask (torch.Tensor): Mask tensor (#batch, 1, time2) or
(#batch, time1, time2).
Returns:
torch.Tensor: Output tensor (#batch, time1, d_model).
"""
q, k, v = self.forward_qkv(query, key, value)
q = q.transpose(1, 2) # (batch, time1, head, d_k)
n_batch_pos = pos_emb.size(0)
p = self.linear_pos(pos_emb).view(n_batch_pos, -1, self.h, self.d_k)
p = p.transpose(1, 2) # (batch, head, 2*time1-1, d_k)
# (batch, head, time1, d_k)
q_with_bias_u = (q + self.pos_bias_u).transpose(1, 2)
# (batch, head, time1, d_k)
q_with_bias_v = (q + self.pos_bias_v).transpose(1, 2)
# compute attention score
# first compute matrix a and matrix c
# as described in https://arxiv.org/abs/1901.02860 Section 3.3
# (batch, head, time1, time2)
matrix_ac = torch.matmul(q_with_bias_u, k.transpose(-2, -1))
# compute matrix b and matrix d
# (batch, head, time1, 2*time1-1)
matrix_bd = torch.matmul(q_with_bias_v, p.transpose(-2, -1))
matrix_bd = self.rel_shift(matrix_bd)
scores = (matrix_ac + matrix_bd) / math.sqrt(
self.d_k
) # (batch, head, time1, time2)
return self.forward_attention(v, scores, mask)
| 37.741176 | 91 | 0.595153 |
4a1d77a709f93db41a8962c4be54495afd02106b | 2,092 | py | Python | models/seg/decoder.py | HenryOsborne/SemanticSegmentation | d41549c3fd22731d7a12cdb1b438f730b0ebfcbc | [
"MIT"
] | null | null | null | models/seg/decoder.py | HenryOsborne/SemanticSegmentation | d41549c3fd22731d7a12cdb1b438f730b0ebfcbc | [
"MIT"
] | null | null | null | models/seg/decoder.py | HenryOsborne/SemanticSegmentation | d41549c3fd22731d7a12cdb1b438f730b0ebfcbc | [
"MIT"
] | null | null | null | import torch
import torch.nn as nn
import torch.nn.functional as F
class Decoder(nn.Module):
def __init__(self, num_classes, backbone):
super(Decoder, self).__init__()
if backbone == 'resnet':
low_level_inplanes = 256
elif backbone == 'xception':
low_level_inplanes = 128
elif backbone == 'mobilenet':
low_level_inplanes = 24
elif backbone == 'hrnet':
low_level_inplanes = 256
else:
raise NotImplementedError
self.conv1 = nn.Conv2d(low_level_inplanes, 48, 1, bias=False)
self.bn1 = nn.BatchNorm2d(48)
self.relu = nn.ReLU()
self.last_conv = nn.Sequential(nn.Conv2d(304, 256, kernel_size=3, stride=1, padding=1, bias=False),
nn.BatchNorm2d(256),
nn.ReLU(),
nn.Dropout(0.5),
nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1, bias=False),
nn.BatchNorm2d(256),
nn.ReLU(),
nn.Dropout(0.1),
nn.Conv2d(256, num_classes, kernel_size=1, stride=1))
self._init_weight()
def forward(self, x, low_level_feat):
low_level_feat = self.conv1(low_level_feat)
low_level_feat = self.bn1(low_level_feat)
low_level_feat = self.relu(low_level_feat)
x = F.interpolate(x, size=low_level_feat.size()[2:], mode='bilinear', align_corners=True)
x = torch.cat((x, low_level_feat), dim=1)
x = self.last_conv(x)
return x
def _init_weight(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
torch.nn.init.kaiming_normal_(m.weight)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def build_decoder(num_classes, backbone):
return Decoder(num_classes, backbone)
| 37.357143 | 107 | 0.533939 |
4a1d77d51e04a28995888027c3950f032de0279b | 3,695 | py | Python | Incident-Response/Tools/grr/grr/server/grr_response_server/blob_stores/benchmark.py | sn0b4ll/Incident-Playbook | cf519f58fcd4255674662b3620ea97c1091c1efb | [
"MIT"
] | 1 | 2021-07-24T17:22:50.000Z | 2021-07-24T17:22:50.000Z | Incident-Response/Tools/grr/grr/server/grr_response_server/blob_stores/benchmark.py | sn0b4ll/Incident-Playbook | cf519f58fcd4255674662b3620ea97c1091c1efb | [
"MIT"
] | 2 | 2022-02-28T03:40:31.000Z | 2022-02-28T03:40:52.000Z | Incident-Response/Tools/grr/grr/server/grr_response_server/blob_stores/benchmark.py | sn0b4ll/Incident-Playbook | cf519f58fcd4255674662b3620ea97c1091c1efb | [
"MIT"
] | 2 | 2022-02-25T08:34:51.000Z | 2022-03-16T17:29:44.000Z | #!/usr/bin/env python
"""Benchmark to compare different BlobStore implementations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import io
import time
from absl import app
from absl import flags
import numpy as np
# pylint: disable=unused-import,g-bad-import-order
from grr_response_server import server_plugins
# pylint: enable=unused-import,g-bad-import-order
from grr_response_core.lib import rdfvalue
from grr_response_server import blob_store
from grr_response_server import server_startup
from grr_response_server.rdfvalues import objects as rdf_objects
flags.DEFINE_list(
"target",
default=None,
help="Benchmark the given BlobStore implementation classes. "
"Separate multiple by comma.")
flags.DEFINE_list(
"sizes",
default=["500K", "200K", "100K", "50K", "5K", "500", "50"],
help="Use the given blob sizes for the benchmark.")
flags.DEFINE_integer(
"per_size_duration_seconds",
default=30,
help="Benchmark duration per blob size in seconds.")
def _MakeBlobStore(blobstore_name):
try:
cls = blob_store.REGISTRY[blobstore_name]
except KeyError:
raise ValueError("No blob store %s found." % blobstore_name)
return blob_store.BlobStoreValidationWrapper(cls())
def _MakeRandomBlob(size_b, random_fd):
blob_data = random_fd.read(size_b)
blob_id = rdf_objects.BlobID.FromBlobData(blob_data)
return blob_id, blob_data
def _Timed(fn, *args, **kwargs):
start = time.time()
result = fn(*args, **kwargs)
return result, time.time() - start
def _PrintStats(size, size_b, durations):
durations_ms = np.array(durations) * 1000
total_s = sum(durations)
qps = len(durations) / total_s
print(
"{size}\t{total:.1f}s\t{num}\t{qps:.2f}\t{bps: >7}\t{p50:.1f}\t{p90:.1f}"
"\t{p95:.1f}\t{p99:.1f}".format(
size=size,
total=total_s,
num=len(durations),
qps=qps,
bps=str(rdfvalue.ByteSize(int(size_b * qps))).replace("iB", ""),
p50=np.percentile(durations_ms, 50),
p90=np.percentile(durations_ms, 90),
p95=np.percentile(durations_ms, 95),
p99=np.percentile(durations_ms, 99),
))
def _RunBenchmark(bs, size_b, duration_sec, random_fd):
"""Returns a list of runtimes for writes of the given size."""
start_timestamp = time.time()
durations = []
# Monotonically increasing time would be nice, but is unavailable in Py2.
while time.time() < start_timestamp + duration_sec:
blob_id, blob_data = _MakeRandomBlob(size_b, random_fd)
_, write_time = _Timed(bs.WriteBlobs, {blob_id: blob_data})
durations.append(write_time)
return durations
def main(argv):
"""Main."""
del argv # Unused.
# Initialise flows and config_lib
server_startup.Init()
if not flags.FLAGS.target:
store_names = ", ".join(sorted(blob_store.REGISTRY.keys()))
print("Missing --target. Use one or multiple of: {}.".format(store_names))
exit(1)
stores = [
_MakeBlobStore(blobstore_name) for blobstore_name in flags.FLAGS.target
]
with io.open("/dev/urandom", "rb") as random_fd:
for blobstore_name, bs in zip(flags.FLAGS.target, stores):
print()
print(blobstore_name)
print("size\ttotal\tnum\tqps\t b/sec\tp50\tp90\tp95\tp99")
for size in flags.FLAGS.sizes:
size_b = rdfvalue.ByteSize(size)
durations = _RunBenchmark(bs, size_b,
flags.FLAGS.per_size_duration_seconds,
random_fd)
_PrintStats(size, size_b, durations)
if __name__ == "__main__":
app.run(main)
| 29.56 | 79 | 0.690663 |
4a1d77e31b2a35f82d63e2880e0526963f19c809 | 2,685 | py | Python | bme680_log_serial.py | bernardocarvalho/esp32-bme680 | 83d143aab831ab6444f157c9f170433f384371c0 | [
"MIT"
] | null | null | null | bme680_log_serial.py | bernardocarvalho/esp32-bme680 | 83d143aab831ab6444f157c9f170433f384371c0 | [
"MIT"
] | null | null | null | bme680_log_serial.py | bernardocarvalho/esp32-bme680 | 83d143aab831ab6444f157c9f170433f384371c0 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Oct 4 12:26:02 2020
@author: bernardo
https://makersportal.com/blog/2018/2/25/python-datalogger-reading-the-serial-output-from-arduino-to-analyze-data-using-pyserial
"""
import serial
import numpy as np
from matplotlib import pyplot as plt
import time
import csv
#If you're not using Linux, you'll need to change this
#check the Arduino IDE to see what serial port it's attached to
#ser = serial.Serial('/dev/ttyACM0', 115200)
# For a MAC, eg.
ser = serial.Serial('/dev/cu.SLAB_USBtoUART', 115200)
while True:
try:
ser.reset_input_buffer()
ser_bytes= ser.readline()
# convert byte to string
data = ser_bytes[0:len(ser_bytes)-2].decode("utf-8")
dataList = data.split(',')
msgType = int(dataList[0])
# data=np.array(data.split(','))
# append PC time stamp
line_sv= str(int(time.time())) + ', ' + data
print(line_sv)
if(msgType == 1):
"""
output += ", " + String(iaqSensor.rawTemperature);
output += ", " + String(iaqSensor.pressure);
output += ", " + String(iaqSensor.rawHumidity);
output += ", " + String(iaqSensor.gasResistance);
output += ", " + String(iaqSensor.iaq);
output += ", " + String(iaqSensor.iaqAccuracy);
output += ", " + String(iaqSensor.temperature);
output += ", " + String(iaqSensor.humidity);
output += ", " + String(iaqSensor.staticIaq);
output += ", " + String(iaqSensor.co2Equivalent);
output += ", " + String(iaqSensor.breathVocEquivalent);
"""
uCtime =int(dataList[1])
pressure =float(dataList[3])
gasResistance = float(dataList[5])
iaq =float(dataList[6])
iaqAccuracy =int(dataList[7])
temperature =float(dataList[8])
humidity =float(dataList[9])
staticIaq =float(dataList[10])
co2Equivalent =float(dataList[11])
breathVocEquivalent =float(dataList[12])
#print(time.time() + ',' + line)
with open("bme680_data.csv","a") as f:
#f.write(line_sv + '\r\n')
writer = csv.writer(f,delimiter=",")
writer.writerow([int(time.time()),uCtime,pressure,gasResistance,iaq,iaqAccuracy,temperature,humidity,staticIaq, co2Equivalent,breathVocEquivalent])
except (KeyboardInterrupt, SystemExit):
print("Keyboard Interrupt")
break
except:
pass
ser.close() | 38.357143 | 163 | 0.568715 |
4a1d799f26d2b38f09079d0dcab0bf6f67429691 | 1,367 | py | Python | wrappers/python/tests/test_cleanup.py | animo/aries-askar | 11a145b46846e59d43de5b72b47d3594de4a2737 | [
"Apache-2.0"
] | null | null | null | wrappers/python/tests/test_cleanup.py | animo/aries-askar | 11a145b46846e59d43de5b72b47d3594de4a2737 | [
"Apache-2.0"
] | null | null | null | wrappers/python/tests/test_cleanup.py | animo/aries-askar | 11a145b46846e59d43de5b72b47d3594de4a2737 | [
"Apache-2.0"
] | 1 | 2020-06-17T19:54:13.000Z | 2020-06-17T19:54:13.000Z | from ctypes import c_char, c_char_p, c_size_t, c_ubyte, pointer
from unittest import mock
from aries_askar.bindings.handle import ArcHandle
from aries_askar.bindings.lib import ByteBuffer, RawBuffer, StrBuffer
def test_cleanup_handle():
logged = []
class Handle(ArcHandle):
@classmethod
def _cleanup(cls, handle: c_size_t):
logged.append(handle.value)
h = Handle()
assert not h.value
del h
assert not logged
h = Handle()
h.value = 99
del h
assert logged == [(99)]
def test_cleanup_bytebuffer():
logged = []
def cleanup(buffer: RawBuffer):
logged.append((buffer.len, buffer.data.contents.value if buffer.data else None))
with mock.patch.object(ByteBuffer, "_cleanup", cleanup):
b = ByteBuffer()
del b
assert not logged
c = c_ubyte(99)
b = ByteBuffer()
b.buffer = RawBuffer(len=1, data=pointer(c))
del b
assert logged == [(1, 99)]
def test_cleanup_strbuffer():
logged = []
def cleanup(buffer: c_char_p):
logged.append(buffer.value)
with mock.patch.object(StrBuffer, "_cleanup", cleanup):
s = StrBuffer()
del s
assert not logged
s = StrBuffer()
c = c_char(ord("a"))
s.buffer = pointer(c)
del s
assert logged == [b"a"]
| 22.409836 | 88 | 0.609364 |
4a1d7ac5e9a15226a195d0da97b8802b814d3066 | 947 | py | Python | lintcode/17.py | jianershi/algorithm | c3c38723b9c5f1cc745550d89e228f92fd4abfb2 | [
"MIT"
] | 1 | 2021-01-08T06:57:49.000Z | 2021-01-08T06:57:49.000Z | lintcode/17.py | jianershi/algorithm | c3c38723b9c5f1cc745550d89e228f92fd4abfb2 | [
"MIT"
] | null | null | null | lintcode/17.py | jianershi/algorithm | c3c38723b9c5f1cc745550d89e228f92fd4abfb2 | [
"MIT"
] | 1 | 2021-01-08T06:57:52.000Z | 2021-01-08T06:57:52.000Z | class Solution:
def search(self, nums, S, index):
if index == len(nums):
self.results.append(list(S))
return
S.append(nums[index])
print ("index = %d, nums[%d] = %d, S = %s, results = %s" % (index, index, nums[index], S, self.results))
self.search(nums, S, index + 1)
print ("index = %d, nums[%d] = %d, S = %s, results = %s" % (index, index, nums[index], S, self.results))
S.pop()
print ("index = %d, nums[%d] = %d, S = %s, results = %s" % (index, index, nums[index], S, self.results))
self.search(nums, S, index + 1)
print ("index = %d, nums[%d] = %d, S = %s, results = %s" % (index, index, nums[index], S, self.results))
def subsets(self, nums):
self.results = []
self.search(sorted(nums), [], 0)
return self.results
def main():
s = Solution()
print (s.subsets([1,2]))
if __name__=="__main__":
main()
| 35.074074 | 112 | 0.519535 |
4a1d7ace95a986ebc4bb42348514f37b5241b145 | 237 | py | Python | Darlington/phase2/STRINGS/day 29 solution/qtn10.py | CodedLadiesInnovateTech/-python-challenge-solutions | 430cd3eb84a2905a286819eef384ee484d8eb9e7 | [
"MIT"
] | 6 | 2020-05-23T19:53:25.000Z | 2021-05-08T20:21:30.000Z | Darlington/phase2/STRINGS/day 29 solution/qtn10.py | CodedLadiesInnovateTech/-python-challenge-solutions | 430cd3eb84a2905a286819eef384ee484d8eb9e7 | [
"MIT"
] | 8 | 2020-05-14T18:53:12.000Z | 2020-07-03T00:06:20.000Z | Darlington/phase2/STRINGS/day 29 solution/qtn10.py | CodedLadiesInnovateTech/-python-challenge-solutions | 430cd3eb84a2905a286819eef384ee484d8eb9e7 | [
"MIT"
] | 39 | 2020-05-10T20:55:02.000Z | 2020-09-12T17:40:59.000Z | # program to change a given string to a new string where the first and last
# chars have been exchanged.
def change_sring(str1):
return str1[-1:] + str1[1:-1] + str1[:1]
print(change_sring('abcd'))
print(change_sring('12345')) | 33.857143 | 75 | 0.696203 |
4a1d7bb40b7ece42845050d538adea422861fc4c | 2,668 | py | Python | setup.py | HCord-py/HCord | bacd34de3d19ae7b08fcc3f7eae0cd39bfe0ad97 | [
"MIT"
] | 1 | 2021-12-19T14:23:04.000Z | 2021-12-19T14:23:04.000Z | setup.py | HCord-py/HCord | bacd34de3d19ae7b08fcc3f7eae0cd39bfe0ad97 | [
"MIT"
] | null | null | null | setup.py | HCord-py/HCord | bacd34de3d19ae7b08fcc3f7eae0cd39bfe0ad97 | [
"MIT"
] | null | null | null | from setuptools import setup
import re
requirements = []
with open('requirements.txt') as f:
requirements = f.read().splitlines()
version = ''
with open('hcord/__init__.py') as f:
version = re.search(r'^__version__\s*=\s*[\'"]([^\'"]*)[\'"]', f.read(), re.MULTILINE).group(1)
if not version:
raise RuntimeError('version is not set')
if version.endswith(('a', 'b', 'rc')):
# append version identifier based on commit count
try:
import subprocess
p = subprocess.Popen(['git', 'rev-list', '--count', 'HEAD'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
if out:
version += out.decode('utf-8').strip()
p = subprocess.Popen(['git', 'rev-parse', '--short', 'HEAD'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
if out:
version += '+g' + out.decode('utf-8').strip()
except Exception:
pass
readme = ''
with open('README.rst') as f:
readme = f.read()
extras_require = {
'voice': ['PyNaCl>=1.3.0,<1.5'],
'docs': [
'sphinx==4.0.2',
'sphinxcontrib_trio==1.1.2',
'sphinxcontrib-websupport',
],
'speed': [
'orjson>=3.5.4',
]
}
packages = [
'hcord',
'hcord.types',
'hcord.ui',
'hcord.webhook',
'hcord.ext.commands',
'hcord.ext.tasks',
]
setup(name='hcord',
author='HCord-py',
url='https://github.com/HCord-py/hcord',
project_urls={
"Documentation": "https://hcord.readthedocs.io/en/latest/",
"Issue tracker": "https://github.com/HCord-py/hcord/issues",
},
version=version,
packages=packages,
license='MIT',
description='A Python wrapper for the Discord API',
long_description=readme,
long_description_content_type="text/x-rst",
include_package_data=True,
install_requires=requirements,
extras_require=extras_require,
python_requires='>=3.8.0',
classifiers=[
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: MIT License',
'Intended Audience :: Developers',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'Topic :: Internet',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Utilities',
'Typing :: Typed',
]
)
| 29.644444 | 99 | 0.571589 |
4a1d7bbea981712fb4eeea7a490224af9d0fedcb | 653 | py | Python | pywayland/client/__init__.py | green-green-avk/pywayland | 65aae61d5df320dc0c39d46761e44a4e34137bb2 | [
"Apache-2.0"
] | 55 | 2015-07-12T16:22:05.000Z | 2022-03-20T20:31:58.000Z | pywayland/client/__init__.py | green-green-avk/pywayland | 65aae61d5df320dc0c39d46761e44a4e34137bb2 | [
"Apache-2.0"
] | 27 | 2015-08-13T13:27:17.000Z | 2022-01-21T14:00:33.000Z | pywayland/client/__init__.py | green-green-avk/pywayland | 65aae61d5df320dc0c39d46761e44a4e34137bb2 | [
"Apache-2.0"
] | 14 | 2015-08-05T20:38:28.000Z | 2021-12-01T10:25:09.000Z | # Copyright 2015 Sean Vig
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .display import Display # noqa
from .eventqueue import EventQueue # noqa
| 38.411765 | 74 | 0.762634 |
4a1d7c7cf74269a06d9e27166a040184c845a311 | 5,524 | py | Python | app/tornado_handlers/common.py | didouard/flight_review | 3cc9bad113bd93254c710aa800f8ebe6e233bf99 | [
"BSD-3-Clause"
] | null | null | null | app/tornado_handlers/common.py | didouard/flight_review | 3cc9bad113bd93254c710aa800f8ebe6e233bf99 | [
"BSD-3-Clause"
] | null | null | null | app/tornado_handlers/common.py | didouard/flight_review | 3cc9bad113bd93254c710aa800f8ebe6e233bf99 | [
"BSD-3-Clause"
] | null | null | null | """
Common methods and classes used by several tornado handlers
"""
from __future__ import print_function
import os
import sqlite3
import sys
from jinja2 import Environment, FileSystemLoader
import tornado.web
# this is needed for the following imports
sys.path.append(
os.path.join(os.path.dirname(os.path.realpath(__file__)), "../plot_app")
)
from db_entry import DBDataGenerated
from config import get_db_filename
# pylint: disable=abstract-method
_ENV = Environment(
loader=FileSystemLoader(
os.path.join(
os.path.dirname(os.path.realpath(__file__)), "../plot_app/templates"
)
)
)
def get_jinja_env():
"""get the jinja2 Environment object"""
return _ENV
class CustomHTTPError(tornado.web.HTTPError):
"""simple class for HTTP exceptions with a custom error message"""
def __init__(self, status_code, error_message=None):
self.error_message = error_message
super().__init__(status_code, error_message)
class TornadoRequestHandlerBase(tornado.web.RequestHandler):
"""
base class for a tornado request handler with custom error display
"""
def write_error(self, status_code, **kwargs):
html_template = """
<html><title>Error {status_code}</title>
<body>HTTP Error {status_code}{error_message}</body>
</html>
"""
error_message = ""
if "exc_info" in kwargs:
e = kwargs["exc_info"][1]
if isinstance(e, CustomHTTPError) and e.error_message:
error_message = ": " + e.error_message
self.write(
html_template.format(status_code=status_code, error_message=error_message)
)
def generate_db_data_from_log_file(log_id, db_connection=None):
"""
Extract necessary information from the log file and insert as an entry to
the LogsGenerated table (faster information retrieval later on).
This is an expensive operation.
It's ok to call this a second time for the same log, the call will just
silently fail (but still read the whole log and will not update the DB entry)
:return: DBDataGenerated object
"""
db_data_gen = DBDataGenerated.from_log_file(log_id)
need_closing = False
if db_connection is None:
db_connection = sqlite3.connect(get_db_filename())
need_closing = True
db_cursor = db_connection.cursor()
try:
db_cursor.execute(
"insert into LogsGenerated (Id, Duration, "
"Mavtype, Estimator, AutostartId, Hardware, "
"Software, NumLoggedErrors, NumLoggedWarnings, "
"FlightModes, SoftwareVersion, UUID, FlightModeDurations, "
"StartTime, VibrationState, GpsType, QuickDischarge"
") values "
"(?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)",
[
log_id,
db_data_gen.duration_s,
db_data_gen.mav_type,
db_data_gen.estimator,
db_data_gen.sys_autostart_id,
db_data_gen.sys_hw,
db_data_gen.ver_sw,
db_data_gen.num_logged_errors,
db_data_gen.num_logged_warnings,
",".join(map(str, db_data_gen.flight_modes)),
db_data_gen.ver_sw_release,
db_data_gen.vehicle_uuid,
db_data_gen.flight_mode_durations_str(),
db_data_gen.start_time_utc,
db_data_gen.vibration_state,
db_data_gen.gps_type,
db_data_gen.quick_discharge,
],
)
db_connection.commit()
except sqlite3.IntegrityError:
# someone else already inserted it (race). just ignore it
pass
db_cursor.close()
if need_closing:
db_connection.close()
return db_data_gen
def get_generated_db_data_from_log(log_id, con, cur):
"""
try to get the additional data from the DB (or generate it if it does not
exist)
:param con: db connection
:param cur: db cursor
:return: DBDataGenerated or None
"""
cur.execute("select * from LogsGenerated where Id = ?", [log_id])
db_tuple = cur.fetchone()
if db_tuple is None: # need to generate from file
try:
# Note that this is not necessary in most cases, as the entry is
# also generated after uploading (but with a timeout)
db_data_gen = generate_db_data_from_log_file(log_id, con)
except Exception as e:
print("Failed to load log file: " + str(e))
return None
else: # get it from the DB
db_data_gen = DBDataGenerated()
db_data_gen.duration_s = db_tuple[1]
db_data_gen.mav_type = db_tuple[2]
db_data_gen.estimator = db_tuple[3]
db_data_gen.sys_autostart_id = db_tuple[4]
db_data_gen.sys_hw = db_tuple[5]
db_data_gen.ver_sw = db_tuple[6]
db_data_gen.num_logged_errors = db_tuple[7]
db_data_gen.num_logged_warnings = db_tuple[8]
db_data_gen.flight_modes = {
int(x) for x in db_tuple[9].split(",") if len(x) > 0
}
db_data_gen.ver_sw_release = db_tuple[10]
db_data_gen.vehicle_uuid = db_tuple[11]
db_data_gen.flight_mode_durations = [
tuple(map(int, x.split(":"))) for x in db_tuple[12].split(",") if len(x) > 0
]
db_data_gen.vibration_state = db_tuple[12]
db_data_gen.gps_type = db_tuple[13]
db_data_gen.quick_discharge = db_tuple[14]
return db_data_gen
| 33.478788 | 88 | 0.6374 |
4a1d7cc2f3be0b635f8eee99f21741797ee46c3a | 8,016 | py | Python | gui.py | simonstre/pyng | 0ae01e3757f6b53defe2fba66c1a40f1b6641e42 | [
"MIT"
] | 1 | 2018-12-30T10:41:19.000Z | 2018-12-30T10:41:19.000Z | gui.py | simonstre/pyng | 0ae01e3757f6b53defe2fba66c1a40f1b6641e42 | [
"MIT"
] | null | null | null | gui.py | simonstre/pyng | 0ae01e3757f6b53defe2fba66c1a40f1b6641e42 | [
"MIT"
] | null | null | null | '''
Created on Jul 6, 2012
@author: sbolduc
'''
import Tkinter
import tkMessageBox
from protocol import create_kill, encodeCrc8, encodeCrc16
from datetime import datetime
class Panel(Tkinter.Frame):
def __init__(self, master, communicator, output_serializer):
Tkinter.Frame.__init__(self, master)
self.communicator = communicator
self.output_serializer = output_serializer
self.grid();
self.grid_propagate(True);
heading_label = Tkinter.Label(self)
heading_label.grid(column=1, row=0, sticky=Tkinter.NE)
heading_label["text"] = "Heading:"
self.heading = Tkinter.StringVar()
heading_entry = Tkinter.Entry(self, textvariable=self.heading)
heading_entry.grid(column=2, row=0, sticky=Tkinter.N)
heading_entry["state"] = Tkinter.DISABLED;
self.heading_target = Tkinter.StringVar()
heading_target_entry = Tkinter.Entry(self, textvariable=self.heading_target)
heading_target_entry.grid(column=3, row=0, sticky=Tkinter.NW)
depth_label = Tkinter.Label(self)
depth_label.grid(column=1, row=1, sticky=Tkinter.E)
depth_label["text"] = "Depth:"
self.depth = Tkinter.StringVar()
depth_entry = Tkinter.Entry(self, textvariable=self.depth)
depth_entry.grid(column=2, row=1)
depth_entry["state"] = Tkinter.DISABLED;
self.depth_target = Tkinter.StringVar()
depth_target_entry = Tkinter.Entry(self, textvariable=self.depth_target)
depth_target_entry.grid(column=3, row=1, sticky=Tkinter.W)
self.light = Tkinter.BooleanVar()
light_checkbutton = Tkinter.Checkbutton(self, variable=self.light)
light_checkbutton["text"] = "Buoy's Light:"
light_checkbutton.grid(column=2, row=2, sticky=Tkinter.W, columnspan=2)
light_checkbutton["state"] = Tkinter.DISABLED;
self.light_target = Tkinter.BooleanVar()
light_actuator_checkbutton = Tkinter.Checkbutton(self, variable=self.light_target)
light_actuator_checkbutton["text"] = "Submarine's Light:"
light_actuator_checkbutton.grid(column=3, row=2, sticky=Tkinter.W, columnspan=2)
send_button = Tkinter.Button(self, command=self.sendCommand)
send_button["text"] = "Send"
send_button.grid(column=3, row=3, sticky=Tkinter.W)
raw_frame = Tkinter.Frame(self)
raw_frame.grid(column=1, row=4, columnspan=2)
raw_scrollbar = Tkinter.Scrollbar(raw_frame)
raw_scrollbar.pack(side=Tkinter.RIGHT, fill=Tkinter.Y)
self.raw_text = Tkinter.Text(raw_frame, yscrollcommand=raw_scrollbar.set)
self.raw_text["width"] = 50
self.raw_text["height"] = 8
self.raw_text.pack()
raw_scrollbar.config(command=self.raw_text.yview)
parsed_frame = Tkinter.Frame(self)
parsed_frame.grid(column=3, row=4, columnspan=2)
parsed_scrollbar = Tkinter.Scrollbar(parsed_frame)
parsed_scrollbar.pack(side=Tkinter.RIGHT, fill=Tkinter.Y)
self.parsed_text = Tkinter.Text(parsed_frame, yscrollcommand=parsed_scrollbar.set)
self.parsed_text["width"] = 100
self.parsed_text["height"] = 8
self.parsed_text.pack()
parsed_scrollbar.config(command=self.parsed_text.yview)
self.kill_value = Tkinter.BooleanVar()
kill_button = Tkinter.Checkbutton(self, variable=self.kill_value)
kill_button["text"] = "Kill"
kill_button.grid(column=4, row=0, sticky=Tkinter.NE)
def appendRaw(self, data):
line = str(datetime.now()) + ": " + self.hexStr(data) + "\n"
self.raw_text.insert(Tkinter.END, line)
self.raw_text.see(Tkinter.END)
def hexStr(self, data):
return ' '.join(["%02X " % ord(x) for x in data]).strip()
def appendParsed(self, heading, depth, light):
line = str(datetime.now()) + ": heading=" + str(heading) + ", depth=" + str(depth) + ", light=" + str(light) + "\n"
self.parsed_text.insert(Tkinter.END, line)
self.parsed_text.see(Tkinter.END)
def setHeading(self, heading):
self.heading.set(heading)
def setDepth(self, depth):
self.depth.set(depth)
def setLight(self, light):
self.light.set(light)
def sendCommand(self):
if self.validateSend():
light = 1 if self.light_target.get() == Tkinter.ON else 0
data = encodeCrc16(self.heading_target.get(), self.depth_target.get(), light)
self.communicator.write(data[0])
print "Sent heading=" + self.heading_target.get() + " depth=" + self.depth_target.get() + " light=" + str(self.light_target.get()) + " crc=" + str(data[4])
print "Bytes: " + self.hexStr(data[0])
self.output_serializer.write([data[1], data[2], data[3], data[4], data[5], data[6], data[7], data[8]])
def is_kill(self):
return self.kill_value.get()
def kill(self):
result = tkMessageBox.askyesno(message='Are you sure you want to kill the submarine?', icon='question', title='Kill')
if result == True:
message = create_kill()
self.communicator.write(message)
print "Sent kill"
def validateSend(self):
if self.heading_target.get() == "":
tkMessageBox.showerror("Error", "Could not send command, heading target must be set.")
return False
if self.depth_target.get() == "":
tkMessageBox.showerror("Error", "Could not send command, depth target must be set.")
return False
try:
heading = float(self.heading_target.get())
if heading < 0 or heading > 359:
tkMessageBox.showerror("Error", "Could not send command, heading target must be between 0 and 359.")
return False
except ValueError:
tkMessageBox.showerror("Error", "Could not send command, heading target is not a number.")
return False
try:
depth = float(self.depth_target.get())
if depth < 0 or depth > 10:
tkMessageBox.showerror("Error", "Could not send command, depth target must be between 0 and 10.")
return False
except ValueError:
tkMessageBox.showerror("Error", "Could not send command, depth target is not a number.")
return False
return True
class Window(Tkinter.Tk):
def __init__(self, communicator, input_serializer, output_serializer):
Tkinter.Tk.__init__(self)
print "Initializing main window"
self.input_serializer = input_serializer
self.output_serializer = output_serializer
self.title("SONIA AUV pyng")
self.resizable(True, True)
self.grid_rowconfigure(1, weight=20)
self.grid_rowconfigure(2, weight=50)
self.menubar = Tkinter.Menu(self)
menu = Tkinter.Menu(self.menubar, tearoff=0)
self.menubar.add_cascade(label="File", menu=menu)
menu.add_command(label="Quit", command=self.close)
self.config(menu=self.menubar)
self.panel = Panel(self, communicator, self.output_serializer)
def close(self):
self.input_serializer.close()
self.output_serializer.close()
self.quit()
def is_kill(self):
return self.panel.is_kill()
def setHeading(self, heading):
self.panel.setHeading(heading)
def setDepth(self, depth):
self.panel.setDepth(depth)
def setLight(self, light):
self.panel.setLight(light)
def appendRaw(self, data):
self.panel.appendRaw(data);
def appendParsed(self, heading, depth, light):
self.panel.appendParsed(heading, depth, light)
| 38.538462 | 167 | 0.622754 |
4a1d7d2a14ace6584ca524266e154ea33f28e890 | 433 | py | Python | demo/signals.py | hugorodgerbrown/django-visitor | badb4a7fb5ea696bf5a249188b6cff8cc0a834ae | [
"MIT"
] | 6 | 2021-06-11T13:33:00.000Z | 2022-03-16T13:49:55.000Z | demo/signals.py | hugorodgerbrown/django-visitor | badb4a7fb5ea696bf5a249188b6cff8cc0a834ae | [
"MIT"
] | 3 | 2021-02-13T15:09:54.000Z | 2021-09-23T14:43:20.000Z | demo/signals.py | hugorodgerbrown/django-visitor | badb4a7fb5ea696bf5a249188b6cff8cc0a834ae | [
"MIT"
] | 2 | 2021-03-25T14:34:40.000Z | 2021-07-28T17:42:32.000Z | import logging
from typing import cast
from django.dispatch import receiver
from visitors.models import Visitor
from visitors.signals import self_service_visitor_created
logger = logging.getLogger(__name__)
@receiver(self_service_visitor_created)
def send_visitor_notification(sender: object, **kwargs: object) -> None:
visitor = cast(Visitor, kwargs["visitor"])
logger.info(f"Sending visitor pass to: {visitor.email}")
| 27.0625 | 72 | 0.796767 |
4a1d7e2d6dff1d320209924c184da025924502c3 | 1,627 | py | Python | main.py | bio-punk/PhotoCheck | f4c6b11a8d6e15c7b081e70f0afe4be0439a1f19 | [
"MIT"
] | null | null | null | main.py | bio-punk/PhotoCheck | f4c6b11a8d6e15c7b081e70f0afe4be0439a1f19 | [
"MIT"
] | null | null | null | main.py | bio-punk/PhotoCheck | f4c6b11a8d6e15c7b081e70f0afe4be0439a1f19 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import sys
import numpy as np
import dlib
import cv2
from helper import *
import time
# from PIL import Image, ImageDraw, ImageFont
FACE_POINTS = list(range(17, 68))
MOUTH_POINTS = list(range(48, 61))
RIGHT_BROW_POINTS = list(range(17, 22))
LEFT_BROW_POINTS = list(range(22, 27))
RIGHT_EYE_POINTS = list(range(36, 42))
LEFT_EYE_POINTS = list(range(42, 48))
NOSE_POINTS = list(range(27, 35))
JAW_POINTS = list(range(0, 17))
def shape_to_np(shape, dtype="int"):
coords = np.zeros((68, 2), dtype=dtype)
for i in range(0, 68):
coords[i] = (shape.part(i).x, shape.part(i).y)
return coords
def show_point(image, shape):
i = 0
for (x, y) in shape:
cv2.rectangle(image, (x, y), (x+1, y+1), (127,255,127), 2)
cv2.putText(image, str(i), (x - 10, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.38, (0, 255, 0), 2)
i = i + 1
return image
timebegin = time.time()
if len(sys.argv) < 2:
print "Usage: %s <image file>" % sys.argv[0]
sys.exit(1)
image_file = sys.argv[1]
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor("shape_predictor_68_face_landmarks.dat")
image = cv2.imread(image_file,cv2.IMREAD_COLOR)
image = resize_width(image, 800)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
rects = detector(gray, 1)
if len(rects) > 1:
print("TooManyFaces")
exit()
if len(rects) < 1:
print("NoFaces")
exit()
timeend= time.time()
print int(1000*(timeend-timebegin)),'ms'
rect = rects[0]
shape = predictor(gray, rect)
shape = shape_to_np(shape)
image = show_point(image, shape)
cv2.imshow("img", image)
cv2.waitKey(0)
| 23.242857 | 94 | 0.66933 |
4a1d7e914dc24f3352a13487c81153198aeed6d2 | 745 | py | Python | Ex115/lib/interface/__init__.py | Fernando-Rodrigo/Exercicios | 04fe641220f36df85a754b2944d60f245cf6cabd | [
"MIT"
] | 1 | 2022-03-14T20:49:04.000Z | 2022-03-14T20:49:04.000Z | Ex115/lib/interface/__init__.py | Fernando-Rodrigo/Exercicios | 04fe641220f36df85a754b2944d60f245cf6cabd | [
"MIT"
] | null | null | null | Ex115/lib/interface/__init__.py | Fernando-Rodrigo/Exercicios | 04fe641220f36df85a754b2944d60f245cf6cabd | [
"MIT"
] | null | null | null | from ctypes import windll
def cor(numcor):
std_out_handle = windll.kernel32.GetStdHandle(-11)
windll.kernel32.SetConsoleTextAttribute(std_out_handle, numcor)
def leiaInt(msg):
while True:
try:
cor(1)
n = int(input(msg))
return n
break
except(ValueError, TypeError):
cor(4)
print('Digite somente valores inteiros')
def linha(tam=30):
return '-' * tam
def cabecalho(txt):
print(linha())
print(txt.center(30))
print(linha())
def menu(lista):
cabecalho('Menu de opções')
for i in range(0, 3):
print(f'{i + 1} - {lista[i]}')
print(linha())
opcao = leiaInt('Qual a opção que deseja? ')
return opcao
| 20.694444 | 67 | 0.581208 |
4a1d7fb11d75cef46bbacb2cc00dde4db915fc22 | 152 | py | Python | cbw_api_toolbox/__routes__.py | Paulhb7/cyberwatch_api_toolbox | 91e766c8c736f1f23e5d0d707b18bb4e72b39a1e | [
"MIT"
] | null | null | null | cbw_api_toolbox/__routes__.py | Paulhb7/cyberwatch_api_toolbox | 91e766c8c736f1f23e5d0d707b18bb4e72b39a1e | [
"MIT"
] | null | null | null | cbw_api_toolbox/__routes__.py | Paulhb7/cyberwatch_api_toolbox | 91e766c8c736f1f23e5d0d707b18bb4e72b39a1e | [
"MIT"
] | null | null | null | """API routes"""
ROUTE_CVE_ANNOUNCEMENTS = "cve_announcements"
ROUTE_PING = "ping"
ROUTE_REMOTE_ACCESSES = "remote_accesses"
ROUTE_SERVERS = "servers"
| 21.714286 | 45 | 0.782895 |
4a1d80a01ef1d0b6b5056d2aa6b360d896ff7ed8 | 2,001 | py | Python | tests/accounts/test_models.py | mattrobenolt/warehouse | 3ae010f8bcac6f8fd948096bb8925353c2e00ff4 | [
"Apache-2.0"
] | null | null | null | tests/accounts/test_models.py | mattrobenolt/warehouse | 3ae010f8bcac6f8fd948096bb8925353c2e00ff4 | [
"Apache-2.0"
] | null | null | null | tests/accounts/test_models.py | mattrobenolt/warehouse | 3ae010f8bcac6f8fd948096bb8925353c2e00ff4 | [
"Apache-2.0"
] | null | null | null | # Copyright 2013 Donald Stufft
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
from __future__ import unicode_literals
import datetime
import mock
from warehouse.accounts.tables import users, emails
def test_get_user(dbapp):
dbapp.engine.execute(users.insert().values(
password="!",
username="test-user",
name="Test User",
last_login=datetime.datetime.utcnow(),
is_active=True,
is_superuser=False,
is_staff=False,
))
assert {
"date_joined": mock.ANY,
"email": None,
"name": "Test User",
"username": "test-user",
} == dbapp.models.accounts.get_user("test-user")
def test_get_user_with_email(dbapp):
dbapp.engine.execute(users.insert().values(
id=1,
password="!",
username="test-user",
name="Test User",
last_login=datetime.datetime.utcnow(),
is_active=True,
is_superuser=False,
is_staff=False,
))
dbapp.engine.execute(emails.insert().values(
user_id=1,
email="[email protected]",
primary=True,
verified=True,
))
assert {
"date_joined": mock.ANY,
"email": "[email protected]",
"name": "Test User",
"username": "test-user",
} == dbapp.models.accounts.get_user("test-user")
def test_get_user_missing(dbapp):
assert dbapp.models.accounts.get_user("test-user") is None
| 28.183099 | 74 | 0.66067 |
4a1d80e882e058c1cabbdd8f2b4675c9b513d734 | 27,258 | py | Python | utils/lost1.py | xixigaga/GolemQ | 79640eaf34ab61c1591879e58c135ed2bab0c8ed | [
"MIT"
] | null | null | null | utils/lost1.py | xixigaga/GolemQ | 79640eaf34ab61c1591879e58c135ed2bab0c8ed | [
"MIT"
] | null | null | null | utils/lost1.py | xixigaga/GolemQ | 79640eaf34ab61c1591879e58c135ed2bab0c8ed | [
"MIT"
] | null | null | null | #
# The MIT License (MIT)
#
# Copyright (c) 2018-2020 azai/Rgveda/GolemQuant
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import numpy as np
import numba as nb
from numba import vectorize, float64, jit as njit
import scipy.stats as scs
from datetime import datetime as dt, timezone, timedelta
from GolemQ.analysis.timeseries import *
from QUANTAXIS.QAUtil.QADate_Adv import (QA_util_print_timestamp,)
import pandas as pd
import empyrical
from GolemQ.utils.parameter import (
AKA,
INDICATOR_FIELD as FLD,
TREND_STATUS as ST,
FEATURES as FTR,
)
"""
这里定义的是一些 fractal, strategy, portfolio 的绩效统计相关工具函数
"""
def calc_fractal_stats(symbol, display_name, fractal_uid, fractal_triggers,
ref_features=None, rsk_fre=0.04, annual=252, taxfee=0.0003,
long=1, mode='lrc', format='pd'):
"""
explanation:
分型触发点(卖点or买点,取决于交易方向)绩效分析
什么是分型触发点呢?对大众认知的交易里,最通俗的含义就是特定交易策略的金叉/死叉点。
例如MACD金叉/死叉点,双MA金叉、死叉点,KDJ金叉、死叉点等。
它们全部都可以纳入这套 fractal_stats 评估体系中衡量。
它的功能用法有点像 alphalens,但是 alphalens 使用的框架体系是传统经济学量化金融
的那一套理论,跟我朴素的衡量标准并不一致。比如 alpha_beta 只计算当前 bar。
在我的交易系统内,并没有发现我认知的“趋势买点”和传统金融量化理论计算的 IC alpha值有
什么显著的相关性特征。
所以我只能另起炉灶,用统计学习的方式来量化衡量一个分型触发点的优劣。
这套分型系统的数学涵义上更类似深度学习中Attention模型的注意力关注点,能否与机器学习
结合有待后续研究。
params:
symbol: str, 交易标的代码
display_name: str, 交易标的显示名称
fractal_uid: str, 分型唯一标识编码
fractal_triggers : np.array, 分型触发信号
ref_features: np.array, 参考指标特征
rsk_fre: float32, 无风险利率
annual: int32, 年化周期
taxfee: float32, 税费
long: int32, 交易方向
mode: str, 'lrc' or 'zen' 趋势判断模式为 zen趋势 或者 lrc回归线趋势,
'hmapower'为追踪hmapower120的MA逆序趋势,
'raw'不进行趋势判断,完全跟随 fractal_triggers 状态信号
format : string, 返回格式
return:
pd.Series or np.array or string
"""
# 这里严格定义应该是考虑交易方向,但是暂时先偷懒简化了计算,以后做双向策略出现问题再完善
if (long > 0):
# 做多方向
fractal_cross_before = Timeline_duration(np.where(fractal_triggers > 0, 1, 0))
else:
# 做空方向
fractal_cross_before = Timeline_duration(np.where(fractal_triggers < 0, 1, 0))
if (annual > 125) and (annual < 366):
# 推断为日线级别的数据周期
fractal_forcast_position = np.where(fractal_cross_before < 3, 1, 0)
fractal_limited = 3
elif ((annual > 1680) and (annual < 2560)):
# 推断为数字币 4小时先级别的数据周期
fractal_limited = 24
fractal_forcast_position = np.where(fractal_cross_before < 24, 1, 0)
elif ((annual > 512) and (annual < 1280)):
# 推断为股票/证券1小时先级别的数据周期
fractal_limited = 12
fractal_forcast_position = np.where(fractal_cross_before < 12, 1, 0)
elif ((annual > 6180) and (annual < 9600)):
# 推断为股票/证券1小时先级别的数据周期
fractal_limited = 72
fractal_forcast_position = np.where(fractal_cross_before < 72, 1, 0)
# 固定统计3交易日内收益
fractal_forcast_3d_lag = calc_event_timing_lag(np.where(fractal_forcast_position > 0, 1, -1))
fractal_forcast_3d_lag = np.where(fractal_forcast_3d_lag <= fractal_limited, fractal_forcast_3d_lag, 0)
closep = ref_features[AKA.CLOSE].values
if (mode == 'lrc'):
# 统计到下一次 lineareg_band / 死叉等对等交易信号结束时的 收益,时间长度不固定
if (long > 0):
# 做多方向
lineareg_endpoint_before = Timeline_duration(np.where(ref_features[FLD.LINEAREG_BAND_TIMING_LAG] == -1, 1, 0))
else:
# 做空方向
lineareg_endpoint_before = Timeline_duration(np.where(ref_features[FLD.LINEAREG_BAND_TIMING_LAG] == 1, 1, 0))
fractal_lineareg_position = np.where(fractal_cross_before < lineareg_endpoint_before, 1, 0)
fractal_lineareg_lag = calc_event_timing_lag(np.where(fractal_lineareg_position > 0, 1, -1))
transcation_stats = calc_transcation_stats(fractal_triggers,
closep,
fractal_forcast_3d_lag,
fractal_lineareg_lag,
ref_features[FLD.LINEAREG_BAND_TIMING_LAG].values,
taxfee=taxfee,
long=long)
transcation_stats_df = pd.DataFrame(transcation_stats, columns=['trans_stats',
'trans_start',
'trans_act',
'trans_end',
'start_principle',
'ret_3d',
'ret_fractal',
'pric_settle',
'trans_3d',
'price_end_3d',
'price_end_fractal',
'ret_fractal_sim',
'long',
'duration_time',])
elif (mode == 'zen') or \
(mode == 'mapower'):
if (long > 0):
# 做多方向
zen_wavelet_endpoint_before = Timeline_duration(np.where(ref_features[FLD.ZEN_WAVELET_TIMING_LAG] == -1, 1, 0))
else:
# 做空方向
zen_wavelet_endpoint_before = Timeline_duration(np.where(ref_features[FLD.ZEN_WAVELET_TIMING_LAG] == 1, 1, 0))
fractal_zen_wavelet_position = np.where(fractal_cross_before < zen_wavelet_endpoint_before, 1, 0)
fractal_zen_wavelet_lag = calc_event_timing_lag(np.where(fractal_zen_wavelet_position > 0, 1, -1))
transcation_stats = calc_transcation_stats_np(fractal_triggers,
closep,
fractal_forcast_3d_lag,
fractal_zen_wavelet_lag,
taxfee=taxfee,
long=long)
transcation_stats_df = pd.DataFrame(transcation_stats, columns=['trans_stats',
'trans_start',
'trans_act',
'trans_end',
'start_principle',
'ret_3d',
'ret_fractal',
'pric_settle',
'trans_3d',
'price_end_3d',
'price_end_fractal',
'ret_fractal_sim',
'long',
'duration_time',])
elif (mode == 'hmapower') or \
(mode == 'hmapower120') or \
(mode == 'hmapower30'):
if (long > 0):
# 做多方向
hmapower120_endpoint_before = Timeline_duration(np.where(ref_features[FLD.HMAPOWER120_TIMING_LAG] == -1, 1, 0))
else:
# 做空方向
hmapower120_endpoint_before = Timeline_duration(np.where(ref_features[FLD.HMAPOWER120_TIMING_LAG] == 1, 1, 0))
fractal_hmapower120_position = np.where(fractal_cross_before < hmapower120_endpoint_before, 1, 0)
fractal_hmapower120_lag = calc_event_timing_lag(np.where(fractal_hmapower120_position > 0, 1, -1))
transcation_stats = calc_transcation_stats_np(fractal_triggers,
closep,
fractal_forcast_3d_lag,
fractal_hmapower120_lag,
taxfee=taxfee,
long=long)
transcation_stats_df = pd.DataFrame(transcation_stats, columns=['trans_stats',
'trans_start',
'trans_act',
'trans_end',
'start_principle',
'ret_3d',
'ret_fractal',
'pric_settle',
'trans_3d',
'price_end_3d',
'price_end_fractal',
'ret_fractal_sim',
'long',
'duration_time',])
elif (mode == 'raw'):
fractal_position = np.where(fractal_triggers > 0, 1, 0)
fractal_timing_lag = calc_event_timing_lag(np.where(fractal_position > 0, 1, -1))
if (np.max(fractal_timing_lag) < 12):
#print('A spot Fractal, not a Complete Cycle Fractal')
pass
transcation_stats = calc_transcation_stats_np(fractal_triggers,
closep,
fractal_forcast_3d_lag,
fractal_timing_lag,
taxfee=taxfee,
long=long)
transcation_stats_df = pd.DataFrame(transcation_stats, columns=['trans_stats',
'trans_start',
'trans_act',
'trans_end',
'start_principle',
'ret_3d',
'ret_fractal',
'pric_settle',
'trans_3d',
'price_end_3d',
'price_end_fractal',
'ret_fractal_sim',
'long',
'duration_time',])
transcation_stats_df[AKA.CODE] = symbol
transcation_stats_df['fractal_uid'] = fractal_uid
# bar ID索引 转换成交易时间戳
selected_trans_start = ref_features.iloc[transcation_stats[:, 1], :]
transcation_stats_df['trans_start'] = pd.to_datetime(selected_trans_start.index.get_level_values(level=0))
selected_trans_action = ref_features.iloc[transcation_stats[:, 2], :]
transcation_stats_df['trans_act'] = pd.to_datetime(selected_trans_action.index.get_level_values(level=0))
selected_trans_end = ref_features.iloc[transcation_stats[:, 3], :]
transcation_stats_df['trans_end'] = pd.to_datetime(selected_trans_end.index.get_level_values(level=0))
transcation_stats_df = transcation_stats_df.assign(datetime=pd.to_datetime(selected_trans_start.index.get_level_values(level=0))).drop_duplicates((['datetime',
'code'])).set_index(['datetime',
'code'],
drop=True)
return transcation_stats_df
@nb.jit(nopython=True)
def calc_transcation_stats(fractal_triggers: np.ndarray,
closep: np.ndarray,
fractal_forcast_position: np.ndarray,
fractal_sim_position: np.ndarray,
principle_timing_lag: np.ndarray,
taxfee: float=0.0003,
long: int=1):
"""
explanation:
在“大方向”(规则)引导下,计算当前交易盈亏状况
np.ndarray 实现,编码规范支持JIT和Cython加速
params:
fractal_triggers : np.array, 分型触发信号
closep: np.array, 参考指标特征
fractal_forcast_position: np.ndarray,
fractal_principle_position: np.ndarray,
principle_timing_lag:np.ndarray,
taxfee: float32, 税费
long: int32, 交易方向
return:
np.array
"""
# 交易状态,状态机规则,低状态可以向高状态迁移
stats_nop = 0 # 无状态
stats_onhold = 1 # 执行交易并持有
stats_suspended = 2 # 挂起,不执行交易,观察走势
stats_closed = 3 # 结束交易
stats_teminated = 4 # 趋势走势不对,终止交易
idx_transcation = -1
idx_transcation_stats = 0
idx_transcation_start = 1
idx_transcation_action = 2
idx_transcation_endpoint = 3
idx_start_in_principle = 4
idx_forcast_returns = 5
idx_principle_returns = 6
idx_settle_price = 7
idx_transcation_3d = 8
idx_endpoint_price_3d = 9
idx_endpoint_price_principle = 10
idx_fractal_sim_returns = 11
idx_long = 12
idx_duration_time = 13
#idx_lineareg_band_lag = 12
ret_transcation_stats = np.zeros((len(closep), 14))
onhold_price = onhold_returns = 0.0
onhold_position_3d = onhold_position_lineareg = False
assert long == 1 or long == -1
ret_transcation_stats[:, idx_long] = long
for i in range(0, len(closep)):
# 开启交易判断
if (fractal_triggers[i] > 0) and \
(not onhold_position_3d) and \
(not onhold_position_lineareg):
onhold_position_3d = True
onhold_position_lineareg = True
idx_transcation = idx_transcation + 1
ret_transcation_stats[idx_transcation, idx_transcation_start] = i
if (principle_timing_lag[i] * long > 0):
ret_transcation_stats[idx_transcation,
idx_start_in_principle] = principle_timing_lag[i]
if (ret_transcation_stats[idx_transcation,
idx_start_in_principle] * long == -1):
ret_transcation_stats[idx_transcation,
idx_transcation_stats] = stats_suspended
elif (ret_transcation_stats[idx_transcation,
idx_transcation_stats] < stats_onhold):
ret_transcation_stats[idx_transcation,
idx_transcation_stats] = stats_onhold
ret_transcation_stats[idx_transcation,
idx_transcation_action] = i
else:
ret_transcation_stats[idx_transcation,
idx_start_in_principle] = principle_timing_lag[i]
if (ret_transcation_stats[idx_transcation,
idx_transcation_stats] < stats_suspended):
ret_transcation_stats[idx_transcation,
idx_transcation_stats] = stats_suspended
if (principle_timing_lag[i] * long > 0):
if (int(ret_transcation_stats[idx_transcation,
idx_transcation_stats]) == stats_onhold):
onhold_price = closep[i]
ret_transcation_stats[idx_transcation,
idx_settle_price] = onhold_price
elif (i != len(closep)):
if (int(ret_transcation_stats[idx_transcation,
idx_transcation_stats]) == stats_onhold):
onhold_price = closep[i + 1]
ret_transcation_stats[idx_transcation,
idx_settle_price] = onhold_price
if (onhold_position_lineareg) and (fractal_forcast_position[i] > 0):
if (principle_timing_lag[i] * long > 0):
if (int(ret_transcation_stats[idx_transcation,
idx_transcation_stats]) == stats_suspended):
ret_transcation_stats[idx_transcation,
idx_transcation_stats] = stats_onhold
ret_transcation_stats[idx_transcation,
idx_transcation_action] = i
onhold_price = closep[i]
ret_transcation_stats[idx_transcation,
idx_settle_price] = onhold_price
else:
ret_transcation_stats[idx_transcation,
idx_transcation_stats] = stats_suspended
# 结束交易判断
if (onhold_position_lineareg) and (fractal_sim_position[i] <= 0):
onhold_position_lineareg = False
onhold_position_3d = False
ret_transcation_stats[idx_transcation,
idx_transcation_endpoint] = i
onhold_sim_price = closep[int(ret_transcation_stats[idx_transcation, idx_transcation_start])]
ret_transcation_stats[idx_transcation,
idx_fractal_sim_returns] = (closep[i] - onhold_sim_price) / onhold_sim_price * long
if (int(ret_transcation_stats[idx_transcation,
idx_transcation_stats]) == stats_onhold):
ret_transcation_stats[idx_transcation,
idx_principle_returns] = (closep[i] - onhold_price) / onhold_price * long
ret_transcation_stats[idx_transcation,
idx_endpoint_price_principle] = closep[i]
ret_transcation_stats[idx_transcation,
idx_transcation_stats] = stats_closed
onhold_price = 0.0
elif (int(ret_transcation_stats[idx_transcation,
idx_transcation_stats]) == stats_suspended):
ret_transcation_stats[idx_transcation,
idx_transcation_stats] = stats_teminated
onhold_price = 0.0
if (onhold_position_3d) and (fractal_forcast_position[i] <= 0):
onhold_position_3d = False
ret_transcation_stats[idx_transcation,
idx_transcation_3d] = principle_timing_lag[i]
if (int(ret_transcation_stats[idx_transcation,
idx_transcation_stats]) == stats_onhold):
ret_transcation_stats[idx_transcation,
idx_forcast_returns] = (closep[i] - onhold_price) / onhold_price * long
ret_transcation_stats[idx_transcation,
idx_endpoint_price_3d] = closep[i]
elif (int(ret_transcation_stats[idx_transcation,
idx_transcation_stats]) == stats_suspended):
ret_transcation_stats[idx_transcation,
idx_transcation_stats] = stats_teminated
onhold_price = 0.0
else:
pass
if (onhold_position_lineareg) and (i == len(closep)):
# 交易当前处于未结束状态
if (int(ret_transcation_stats[idx_transcation,
idx_transcation_stats]) == stats_onhold):
ret_transcation_stats[idx_transcation,
idx_principle_returns] = (closep[i] - onhold_price) / onhold_price * long
pass
ret_transcation_stats[idx_transcation,
idx_duration_time] = ret_transcation_stats[idx_transcation,
idx_transcation_endpoint] - ret_transcation_stats[idx_transcation,
idx_transcation_action]
return ret_transcation_stats[:idx_transcation + 1, :]
@nb.jit(nopython=True)
def calc_transcation_stats_np(fractal_triggers:np.ndarray,
closep:np.ndarray,
fractal_forcast_position:np.ndarray,
fractal_timing_lag:np.ndarray,
taxfee:float=0.0003,
long:int=1):
"""
计算当前交易盈亏状况
np.ndarray 实现,编码规范支持JIT和Cython加速
"""
# 交易状态,状态机规则,低状态可以向高状态迁移
stats_nop = 0 # 无状态
stats_onhold = 1 # 执行交易并持有
stats_suspended = 2 # 挂起,不执行交易,观察走势
stats_closed = 3 # 结束交易
stats_teminated = 4 # 趋势走势不对,终止交易
idx_transcation = -1
idx_transcation_stats = 0
idx_transcation_start = 1
idx_transcation_action = 2
idx_transcation_endpoint = 3
idx_start_zen_wavelet = 4
idx_forcast_returns = 5
idx_fractal_returns = 6
idx_settle_price = 7
idx_transcation_3d = 8
idx_endpoint_price_3d = 9
idx_endpoint_price_fractal = 10
idx_fractal_sim_returns = 11
idx_long = 12
idx_duration_time = 13
#idx_lineareg_band_lag = 12
ret_transcation_stats = np.zeros((len(closep), 14))
onhold_price = onhold_returns = 0.0
onhold_position_3d = onhold_position_lineareg = False
assert long == 1 or long == -1
ret_transcation_stats[:, idx_long] = long
for i in range(0, len(closep)):
# 开启交易判断
if (fractal_triggers[i] > 0) and \
(not onhold_position_3d) and \
(not onhold_position_lineareg):
onhold_position_3d = True
onhold_position_lineareg = True
idx_transcation = idx_transcation + 1
ret_transcation_stats[idx_transcation, idx_transcation_start] = i
ret_transcation_stats[idx_transcation,
idx_transcation_stats] = stats_onhold
ret_transcation_stats[idx_transcation, idx_transcation_action] = i
onhold_price = closep[i]
ret_transcation_stats[idx_transcation,
idx_settle_price] = onhold_price
# 结束交易判断
if (onhold_position_lineareg) and (fractal_timing_lag[i] <= 0):
onhold_position_lineareg = False
onhold_position_3d = False
ret_transcation_stats[idx_transcation,
idx_transcation_endpoint] = i
ret_transcation_stats[idx_transcation,
idx_fractal_sim_returns] = (closep[i] - onhold_price) / onhold_price * long
ret_transcation_stats[idx_transcation,
idx_fractal_returns] = (closep[i] - onhold_price) / onhold_price * long
ret_transcation_stats[idx_transcation,
idx_endpoint_price_fractal] = closep[i]
ret_transcation_stats[idx_transcation,
idx_transcation_stats] = stats_closed
onhold_price = 0.0
if (onhold_position_3d) and (fractal_forcast_position[i] <= 0):
onhold_position_3d = False
ret_transcation_stats[idx_transcation,
idx_transcation_3d] = fractal_timing_lag[i]
if (onhold_position_lineareg):
ret_transcation_stats[idx_transcation,
idx_forcast_returns] = (closep[i] - onhold_price) / onhold_price * long
ret_transcation_stats[idx_transcation,
idx_endpoint_price_3d] = closep[i]
else:
ret_transcation_stats[idx_transcation,
idx_transcation_stats] = stats_teminated
onhold_price = 0.0
if (onhold_position_lineareg) and (i == len(closep)):
# 交易当前处于未结束状态
if (int(ret_transcation_stats[idx_transcation,
idx_transcation_stats]) == stats_onhold):
ret_transcation_stats[idx_transcation,
idx_fractal_returns] = (closep[i] - onhold_price) / onhold_price * long
pass
ret_transcation_stats[idx_transcation,
idx_duration_time] = ret_transcation_stats[idx_transcation,
idx_transcation_endpoint] - ret_transcation_stats[idx_transcation,
idx_transcation_action]
return ret_transcation_stats[:idx_transcation + 1, :] | 51.821293 | 163 | 0.501321 |
4a1d81049d108a4afeca5f0b62650ae985af4129 | 16,148 | py | Python | rdflib/namespace.py | mehrzadshm/rdflib | c390d732efdaee81cd99e974b3ce60875b47edfc | [
"BSD-3-Clause"
] | null | null | null | rdflib/namespace.py | mehrzadshm/rdflib | c390d732efdaee81cd99e974b3ce60875b47edfc | [
"BSD-3-Clause"
] | null | null | null | rdflib/namespace.py | mehrzadshm/rdflib | c390d732efdaee81cd99e974b3ce60875b47edfc | [
"BSD-3-Clause"
] | null | null | null | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import os
from unicodedata import category
from six import string_types
from six import text_type
from six.moves.urllib.request import pathname2url
from six.moves.urllib.parse import urldefrag
from six.moves.urllib.parse import urljoin
from rdflib.term import URIRef, Variable, _XSD_PFX, _is_valid_uri
__doc__ = """
===================
Namespace Utilities
===================
RDFLib provides mechanisms for managing Namespaces.
In particular, there is a :class:`~rdflib.namespace.Namespace` class
that takes as its argument the base URI of the namespace.
.. code-block:: pycon
>>> from rdflib.namespace import Namespace
>>> owl = Namespace('http://www.w3.org/2002/07/owl#')
Fully qualified URIs in the namespace can be constructed either by attribute
or by dictionary access on Namespace instances:
.. code-block:: pycon
>>> owl.seeAlso
rdflib.term.URIRef(u'http://www.w3.org/2002/07/owl#seeAlso')
>>> owl['seeAlso']
rdflib.term.URIRef(u'http://www.w3.org/2002/07/owl#seeAlso')
Automatic handling of unknown predicates
-----------------------------------------
As a programming convenience, a namespace binding is automatically
created when :class:`rdflib.term.URIRef` predicates are added to the graph.
Importable namespaces
-----------------------
The following namespaces are available by directly importing from rdflib:
* RDF
* RDFS
* OWL
* XSD
* FOAF
* SKOS
* DOAP
* DC
* DCTERMS
* VOID
.. code-block:: pycon
>>> from rdflib import OWL
>>> OWL.seeAlso
rdflib.term.URIRef(u'http://www.w3.org/2002/07/owl#seeAlso')
"""
__all__ = [
'is_ncname', 'split_uri', 'Namespace',
'ClosedNamespace', 'NamespaceManager',
'XMLNS', 'RDF', 'RDFS', 'XSD', 'OWL',
'SKOS', 'DOAP', 'FOAF', 'DC', 'DCTERMS', 'VOID',
'SWRL', 'SWRLB', 'PPO', 'CSO', 'CCO']
logger = logging.getLogger(__name__)
class Namespace(text_type):
__doc__ = """
Utility class for quickly generating URIRefs with a common prefix
>>> from rdflib import Namespace
>>> n = Namespace("http://example.org/")
>>> n.Person # as attribute
rdflib.term.URIRef(u'http://example.org/Person')
>>> n['first-name'] # as item - for things that are not valid python identifiers
rdflib.term.URIRef(u'http://example.org/first-name')
"""
def __new__(cls, value):
try:
rt = text_type.__new__(cls, value)
except UnicodeDecodeError:
rt = text_type.__new__(cls, value, 'utf-8')
return rt
@property
def title(self):
return URIRef(self + 'title')
def term(self, name):
# need to handle slices explicitly because of __getitem__ override
return URIRef(self + (name if isinstance(name, string_types) else ''))
def __getitem__(self, key, default=None):
return self.term(key)
def __getattr__(self, name):
if name.startswith("__"): # ignore any special Python names!
raise AttributeError
else:
return self.term(name)
def __repr__(self):
return "Namespace(%r)" % text_type(self)
class URIPattern(text_type):
__doc__ = """
Utility class for creating URIs according to some pattern
This supports either new style formatting with .format
or old-style with % operator
>>> u=URIPattern("http://example.org/%s/%d/resource")
>>> u%('books', 12345)
rdflib.term.URIRef(u'http://example.org/books/12345/resource')
"""
def __new__(cls, value):
try:
rt = text_type.__new__(cls, value)
except UnicodeDecodeError:
rt = text_type.__new__(cls, value, 'utf-8')
return rt
def __mod__(self, *args, **kwargs):
return URIRef(text_type(self).__mod__(*args, **kwargs))
def format(self, *args, **kwargs):
return URIRef(text_type.format(self, *args, **kwargs))
def __repr__(self):
return "URIPattern(%r)" % text_type(self)
class ClosedNamespace(object):
"""
A namespace with a closed list of members
Trying to create terms not listen is an error
"""
def __init__(self, uri, terms):
self.uri = uri
self.__uris = {}
for t in terms:
self.__uris[t] = URIRef(self.uri + t)
def term(self, name):
uri = self.__uris.get(name)
if uri is None:
raise KeyError(
"term '{}' not in namespace '{}'".format(name, self.uri)
)
else:
return uri
def __getitem__(self, key, default=None):
return self.term(key)
def __getattr__(self, name):
if name.startswith("__"): # ignore any special Python names!
raise AttributeError
else:
try:
return self.term(name)
except KeyError as e:
raise AttributeError(e)
def __str__(self):
return text_type(self.uri)
def __repr__(self):
return "rdf.namespace.ClosedNamespace(%r)" % text_type(self.uri)
class _RDFNamespace(ClosedNamespace):
"""
Closed namespace for RDF terms
"""
def __init__(self):
super(_RDFNamespace, self).__init__(
URIRef("http://www.w3.org/1999/02/22-rdf-syntax-ns#"),
terms=[
# Syntax Names
"RDF", "Description", "ID", "about", "parseType",
"resource", "li", "nodeID", "datatype",
# RDF Classes
"Seq", "Bag", "Alt", "Statement", "Property",
"List", "PlainLiteral",
# RDF Properties
"subject", "predicate", "object", "type",
"value", "first", "rest",
# and _n where n is a non-negative integer
# RDF Resources
"nil",
# Added in RDF 1.1
"XMLLiteral", "HTML", "langString"]
)
def term(self, name):
try:
i = int(name)
return URIRef("%s_%s" % (self.uri, i))
except ValueError:
return super(_RDFNamespace, self).term(name)
RDF = _RDFNamespace()
RDFS = ClosedNamespace(
uri=URIRef("http://www.w3.org/2000/01/rdf-schema#"),
terms=[
"Resource", "Class", "subClassOf", "subPropertyOf", "comment", "label",
"domain", "range", "seeAlso", "isDefinedBy", "Literal", "Container",
"ContainerMembershipProperty", "member", "Datatype"]
)
OWL = Namespace('http://www.w3.org/2002/07/owl#')
XSD = Namespace(_XSD_PFX)
SKOS = Namespace('http://www.w3.org/2004/02/skos/core#')
DOAP = Namespace('http://usefulinc.com/ns/doap#')
FOAF = Namespace('http://xmlns.com/foaf/0.1/')
DC = Namespace('http://purl.org/dc/elements/1.1/')
DCTERMS = Namespace('http://purl.org/dc/terms/')
VOID = Namespace('http://rdfs.org/ns/void#')
# adding new Namespaces
SWRL = Namespace('http://www.w3.org/2003/11/swrl#')
SWRLB = Namespace('http://www.w3.org/2003/11/swrlb#')
PPO = Namespace('http://www.semanticweb.org/SBU/CAFCM-ontology/ProjectProfileLayer#')
CSO = Namespace('http://www.semanticweb.org/SBU/CAFCM-ontology/ContextSenseLayer#')
CCO = Namespace('http://www.semanticweb.org/SBU/CAFCM-ontology/ChangeCausalityLayer#')
class NamespaceManager(object):
"""
Class for managing prefix => namespace mappings
Sample usage from FuXi ...
.. code-block:: python
ruleStore = N3RuleStore(additionalBuiltins=additionalBuiltins)
nsMgr = NamespaceManager(Graph(ruleStore))
ruleGraph = Graph(ruleStore,namespace_manager=nsMgr)
and ...
.. code-block:: pycon
>>> import rdflib
>>> from rdflib import Graph
>>> from rdflib.namespace import Namespace, NamespaceManager
>>> exNs = Namespace('http://example.com/')
>>> namespace_manager = NamespaceManager(Graph())
>>> namespace_manager.bind('ex', exNs, override=False)
>>> g = Graph()
>>> g.namespace_manager = namespace_manager
>>> all_ns = [n for n in g.namespace_manager.namespaces()]
>>> assert ('ex', rdflib.term.URIRef('http://example.com/')) in all_ns
>>>
"""
def __init__(self, graph):
self.graph = graph
self.__cache = {}
self.__log = None
self.bind("xml", "http://www.w3.org/XML/1998/namespace")
self.bind("rdf", RDF)
self.bind("rdfs", RDFS)
self.bind("xsd", XSD)
def reset(self):
self.__cache = {}
def __get_store(self):
return self.graph.store
store = property(__get_store)
def qname(self, uri):
prefix, namespace, name = self.compute_qname(uri)
if prefix == "":
return name
else:
return ":".join((prefix, name))
def normalizeUri(self, rdfTerm):
"""
Takes an RDF Term and 'normalizes' it into a QName (using the
registered prefix) or (unlike compute_qname) the Notation 3
form for URIs: <...URI...>
"""
try:
namespace, name = split_uri(rdfTerm)
namespace = URIRef(text_type(namespace))
except:
if isinstance(rdfTerm, Variable):
return "?%s" % rdfTerm
else:
return "<%s>" % rdfTerm
prefix = self.store.prefix(namespace)
if prefix is None and isinstance(rdfTerm, Variable):
return "?%s" % rdfTerm
elif prefix is None:
return "<%s>" % rdfTerm
else:
qNameParts = self.compute_qname(rdfTerm)
return ':'.join([qNameParts[0], qNameParts[-1]])
def compute_qname(self, uri, generate=True):
if not _is_valid_uri(uri):
raise ValueError(
'"{}" does not look like a valid URI, cannot serialize this. Did you want to urlencode it?'.format(uri)
)
if uri not in self.__cache:
namespace, name = split_uri(uri)
namespace = URIRef(namespace)
prefix = self.store.prefix(namespace)
if prefix is None:
if not generate:
raise KeyError(
"No known prefix for {} and generate=False".format(namespace)
)
num = 1
while 1:
prefix = "ns%s" % num
if not self.store.namespace(prefix):
break
num += 1
self.bind(prefix, namespace)
self.__cache[uri] = (prefix, namespace, name)
return self.__cache[uri]
def bind(self, prefix, namespace, override=True, replace=False):
"""bind a given namespace to the prefix
if override, rebind, even if the given namespace is already
bound to another prefix.
if replace, replace any existing prefix with the new namespace
"""
namespace = URIRef(text_type(namespace))
# When documenting explain that override only applies in what cases
if prefix is None:
prefix = ''
bound_namespace = self.store.namespace(prefix)
# Check if the bound_namespace contains a URI
# and if so convert it into a URIRef for comparison
# This is to prevent duplicate namespaces with the
# same URI
if bound_namespace:
bound_namespace = URIRef(bound_namespace)
if bound_namespace and bound_namespace != namespace:
if replace:
self.store.bind(prefix, namespace)
return
# prefix already in use for different namespace
#
# append number to end of prefix until we find one
# that's not in use.
if not prefix:
prefix = "default"
num = 1
while 1:
new_prefix = "%s%s" % (prefix, num)
tnamespace = self.store.namespace(new_prefix)
if tnamespace and namespace == URIRef(tnamespace):
# the prefix is already bound to the correct
# namespace
return
if not self.store.namespace(new_prefix):
break
num += 1
self.store.bind(new_prefix, namespace)
else:
bound_prefix = self.store.prefix(namespace)
if bound_prefix is None:
self.store.bind(prefix, namespace)
elif bound_prefix == prefix:
pass # already bound
else:
if override or bound_prefix.startswith("_"): # or a generated prefix
self.store.bind(prefix, namespace)
def namespaces(self):
for prefix, namespace in self.store.namespaces():
namespace = URIRef(namespace)
yield prefix, namespace
def absolutize(self, uri, defrag=1):
base = urljoin("file:", pathname2url(os.getcwd()))
result = urljoin("%s/" % base, uri, allow_fragments=not defrag)
if defrag:
result = urldefrag(result)[0]
if not defrag:
if uri and uri[-1] == "#" and result[-1] != "#":
result = "%s#" % result
return URIRef(result)
# From: http://www.w3.org/TR/REC-xml#NT-CombiningChar
#
# * Name start characters must have one of the categories Ll, Lu, Lo,
# Lt, Nl.
#
# * Name characters other than Name-start characters must have one of
# the categories Mc, Me, Mn, Lm, or Nd.
#
# * Characters in the compatibility area (i.e. with character code
# greater than #xF900 and less than #xFFFE) are not allowed in XML
# names.
#
# * Characters which have a font or compatibility decomposition
# (i.e. those with a "compatibility formatting tag" in field 5 of the
# database -- marked by field 5 beginning with a "<") are not allowed.
#
# * The following characters are treated as name-start characters rather
# than name characters, because the property file classifies them as
# Alphabetic: [#x02BB-#x02C1], #x0559, #x06E5, #x06E6.
#
# * Characters #x20DD-#x20E0 are excluded (in accordance with Unicode
# 2.0, section 5.14).
#
# * Character #x00B7 is classified as an extender, because the property
# list so identifies it.
#
# * Character #x0387 is added as a name character, because #x00B7 is its
# canonical equivalent.
#
# * Characters ':' and '_' are allowed as name-start characters.
#
# * Characters '-' and '.' are allowed as name characters.
NAME_START_CATEGORIES = ["Ll", "Lu", "Lo", "Lt", "Nl"]
NAME_CATEGORIES = NAME_START_CATEGORIES + ["Mc", "Me", "Mn", "Lm", "Nd"]
ALLOWED_NAME_CHARS = [u"\u00B7", u"\u0387", u"-", u".", u"_", u":"]
# http://www.w3.org/TR/REC-xml-names/#NT-NCName
# [4] NCName ::= (Letter | '_') (NCNameChar)* /* An XML Name, minus
# the ":" */
# [5] NCNameChar ::= Letter | Digit | '.' | '-' | '_' | CombiningChar
# | Extender
def is_ncname(name):
first = name[0]
if first == "_" or category(first) in NAME_START_CATEGORIES:
for i in range(1, len(name)):
c = name[i]
if not category(c) in NAME_CATEGORIES:
if c != ':' and c in ALLOWED_NAME_CHARS:
continue
return 0
# if in compatibility area
# if decomposition(c)!='':
# return 0
return 1
else:
return 0
XMLNS = "http://www.w3.org/XML/1998/namespace"
def split_uri(uri):
if uri.startswith(XMLNS):
return (XMLNS, uri.split(XMLNS)[1])
length = len(uri)
for i in range(0, length):
c = uri[-i - 1]
if not category(c) in NAME_CATEGORIES:
if c in ALLOWED_NAME_CHARS:
continue
for j in range(-1 - i, length):
if category(uri[j]) in NAME_START_CATEGORIES or uri[j] == "_":
ns = uri[:j]
if not ns:
break
ln = uri[j:]
return (ns, ln)
break
raise ValueError("Can't split '{}'".format(uri))
| 30.641366 | 119 | 0.584902 |
4a1d813b6794df48705f734e77a70e28627c7681 | 21,043 | py | Python | suplemon/editor.py | johnmbaughman/suplemon | fdde20f2181c280236d40f89b89b9bbe5843440e | [
"MIT"
] | 912 | 2015-01-04T22:08:48.000Z | 2022-03-29T00:52:24.000Z | suplemon/editor.py | johnmbaughman/suplemon | fdde20f2181c280236d40f89b89b9bbe5843440e | [
"MIT"
] | 222 | 2015-01-26T13:25:37.000Z | 2022-01-22T08:10:20.000Z | suplemon/editor.py | johnmbaughman/suplemon | fdde20f2181c280236d40f89b89b9bbe5843440e | [
"MIT"
] | 76 | 2015-03-31T18:11:15.000Z | 2021-05-19T12:31:10.000Z | # -*- encoding: utf-8
"""
Editor class for extending viewer with text editing features.
"""
from . import helpers
from .line import Line
from .cursor import Cursor
from .viewer import Viewer
class State:
"""Store editor state for undo/redo."""
def __init__(self, editor=None):
self.cursors = [Cursor()]
self.lines = [Line()]
self.y_scroll = 0
self.x_scroll = 0
self.last_find = ""
if editor is not None:
self.store(editor)
def store(self, editor):
"""Store the state of editor instance."""
self.cursors = [cursor.tuple() for cursor in editor.cursors]
self.lines = [line.data for line in editor.lines]
self.y_scroll = editor.y_scroll
self.x_scroll = editor.x_scroll
self.last_find = editor.last_find
def restore(self, editor):
"""Restore stored state into the editor instance."""
editor.cursors = [Cursor(cursor) for cursor in self.cursors]
editor.lines = [Line(line) for line in self.lines]
editor.y_scroll = self.y_scroll
editor.x_scroll = self.x_scroll
editor.last_find = self.last_find
class Editor(Viewer):
"""Extends Viewer with editing capabilities."""
def __init__(self, app, window):
"""Initialize the editor.
Args:
app: The Suplemon main instance.
window: A window object to use for the ui.
"""
Viewer.__init__(self, app, window)
# History of editor states for undo/redo
self.history = [State()]
# Current state index of the editor
self.current_state = 0
# Last editor action that was used (for undo/redo)
self.last_action = None
def init(self):
Viewer.init(self)
operations = {
"backspace": self.backspace, # Backspace
"delete": self.delete, # Delete
"insert": self.insert, # Insert
"enter": self.enter, # Enter
"tab": self.tab, # Tab
"untab": self.untab, # Shift + Tab
"escape": self.escape, # Escape
"single_selection": self.single_selection, # Escape
"clear_last_find": self.clear_last_find, # Escape
"new_cursor_up": self.new_cursor_up, # Alt + Up
"new_cursor_down": self.new_cursor_down, # Alt + Down
"new_cursor_left": self.new_cursor_left, # Alt + Left
"new_cursor_right": self.new_cursor_right, # Alt + Right
"page_up": self.page_up, # Page Up
"page_down": self.page_down, # Page Down
"push_up": self.push_up, # Alt + Page Up
"push_down": self.push_down, # Alt + Page Down
"undo": self.undo, # F5
"redo": self.redo, # F6
"toggle_line_nums": self.toggle_line_nums, # F9
"toggle_line_ends": self.toggle_line_ends, # F10
"toggle_highlight": self.toggle_highlight, # F11
"copy": self.copy, # Ctrl + C
"cut": self.cut, # Ctrl + X
"duplicate_line": self.duplicate_line, # Ctrl + W
}
for key in operations.keys():
self.operations[key] = operations[key]
def set_buffer(self, buffer):
"""Sets local or global buffer depending on config."""
if self.app.config["editor"]["use_global_buffer"]:
self.app.global_buffer = buffer
else:
self.buffer = buffer
def set_data(self, data):
"""Set the editor text contents."""
Viewer.set_data(self, data)
buffer = self.get_buffer() # TODO: check this
if len(buffer) > 1:
self.store_state()
else:
state = State()
state.store(self)
self.history[0] = state
def store_action_state(self, action, state=None):
"""Store the editor state if a new action is taken."""
if self.last_action != action:
self.last_action = action
self.store_state(state)
else:
# FIXME: This if is here just for safety.
# FIXME: current_state might be wrong ;.<
if self.current_state < len(self.history)-1:
self.history[self.current_state].store(self)
def store_state(self, state=None, action=None):
"""Store the current editor state for undo/redo."""
if state is None:
state = State()
state.store(self)
if len(self.history) > 1:
if self.current_state < len(self.history)-1:
self.history = self.history[:self.current_state]
self.history.append(state)
self.current_state = len(self.history)-1
if len(self.history) > self.config["max_history"]:
self.history.pop(0)
def restore_state(self, index=None):
"""Restore an editor state."""
if len(self.history) <= 1:
return False
if index is None:
index = self.current_state-1
if index < 0 or index >= len(self.history):
return False
# if self.current_state < len(self.history):
# self.current_state = self.current_state-1
state = self.history[index]
state.restore(self)
self.current_state = index
def handle_input(self, event):
done = Viewer.handle_input(self, event)
if not done:
if event.is_typeable:
if isinstance(event.key_code, str):
self.type(event.key_code)
elif event.key_name:
self.type(event.key_name)
return True
return False
def undo(self):
"""Undo the last command or change."""
self.last_action = "undo"
self.restore_state()
def redo(self):
"""Redo the last command or change."""
self.last_action = "redo"
if self.current_state == len(self.history)-1:
return False
index = self.current_state+1
self.restore_state(index)
#
# Cursor operations
#
def new_cursor_up(self):
"""Add a new cursor one line up."""
x = self.get_cursor().x
cursor = self.get_first_cursor()
if cursor.y == 0:
return
new = Cursor(x, cursor.y-1)
self.cursors.append(new)
self.move_cursors()
self.scroll_up()
def new_cursor_down(self):
"""Add a new cursor one line down."""
x = self.get_cursor().x
cursor = self.get_last_cursor()
if cursor.y == len(self.lines)-1:
return
new = Cursor(x, cursor.y+1)
self.cursors.append(new)
self.move_cursors()
self.scroll_down()
def new_cursor_left(self):
"""Add a new cursor one character left."""
new = []
for cursor in self.cursors:
if cursor.x == 0:
continue
new.append(Cursor(cursor.x-1, cursor.y))
for c in new:
self.cursors.append(c)
self.move_cursors()
self.scroll_up()
def new_cursor_right(self):
"""Add a new cursor one character right."""
new = []
for cursor in self.cursors:
if cursor.x+1 > len(self.lines[cursor.y]):
continue
new.append(Cursor(cursor.x+1, cursor.y))
for c in new:
self.cursors.append(c)
self.move_cursors()
self.scroll_down()
def escape(self):
"""Handle escape key.
Wrapper for clear_last_find and single_selection."""
self.clear_last_find()
self.single_selection()
def clear_last_find(self):
"""Removes last_find so a new auto-find can be initiated."""
self.last_find = ""
def single_selection(self):
"""Removes all cursors except primary cursor."""
self.cursors = [self.cursors[0]]
self.move_cursors()
#
# Text editing operations
#
def replace_all(self, what, replacement):
"""Replaces what with replacement on each line."""
for line in self.lines:
data = line.get_data()
new = data.replace(what, replacement)
line.set_data(new)
self.move_cursors()
def delete(self):
"""Delete the next character."""
for cursor in self.cursors:
if len(self.lines)-1 < cursor.y:
# If we've run out of lines
break
line = self.lines[cursor.y]
# if we have more than 1 line
# and we're at the end of the current line
# and we're not on the last line
if len(self.lines) > 1 and cursor.x == len(line) and cursor.y != len(self.lines) - 1:
data = self.lines[cursor.y].get_data()
self.lines.pop(cursor.y)
self.lines[cursor.y].set_data(data+self.lines[cursor.y])
# Reposition cursors from line below into correct positions on current line
line_cursors = self.get_cursors_on_line(cursor.y+1)
for c in line_cursors:
c.move_right(len(data))
c.move_up()
self.move_y_cursors(cursor.y, -1)
else:
start = line[:cursor.x]
end = line[cursor.x+1:]
self.lines[cursor.y].set_data(start+end)
self.move_x_cursors(cursor.y, cursor.x, -1)
self.move_cursors()
# Add a restore point if previous action != delete
self.store_action_state("delete")
def backspace(self):
"""Delete the previous character."""
curs = reversed(sorted(self.cursors, key=lambda c: (c[1], c[0])))
# Iterate through all cursors from bottom to top
for cursor in curs:
line_no = cursor.y
# If we're at the beginning of file don't do anything
if cursor.x == 0 and cursor.y == 0:
continue
# If were operating at the beginning of a line
if cursor.x == 0 and cursor.y != 0:
curr_line = self.lines.pop(line_no)
prev_line = self.lines[line_no-1]
length = len(prev_line) # Get the length of previous line
# Add the current line to the previous one
new_data = self.lines[cursor.y-1] + curr_line
self.lines[cursor.y-1].set_data(new_data)
# Get all cursors on current line
line_cursors = self.get_cursors_on_line(line_no)
for line_cursor in line_cursors: # Move the cursors
line_cursor.move_up()
# Add the length of previous line to each x coordinate
# so that their relative positions
line_cursor.move_right(length)
# Move all cursors below up one line
# (since a line was removed above them)
self.move_y_cursors(cursor.y, -1)
# Handle all other cases
else:
curr_line = self.lines[line_no]
# Remove one character by default
del_n_chars = 1
# Check if we should unindent
if self.config["backspace_unindent"]:
# Check if we can unindent, and that it's actually whitespace
# We don't do this for hard tabs since they're just a single character
if not self.config["hard_tabs"]:
indent = self.config["tab_width"]
if cursor.x >= indent:
if curr_line[cursor.x-indent:cursor.x] == indent*" ":
# Remove an indents worth of whitespace
del_n_chars = indent
# Slice characters out of the line
start = curr_line[:cursor.x-del_n_chars]
end = curr_line[cursor.x:]
# Store the new line
self.lines[line_no].set_data(start+end)
# Move the operating curser back the deleted amount
cursor.move_left(del_n_chars)
# Do the same to the rest
self.move_x_cursors(line_no, cursor.x, -1*del_n_chars)
# Ensure we keep the view scrolled
self.move_cursors()
self.scroll_up()
# Add a restore point if previous action != backspace
self.store_action_state("backspace")
def enter(self):
"""Insert a new line at each cursor."""
# We sort the cursors, and loop through them from last to first
# That way we avoid messing with
# the relative positions of the higher cursors
curs = sorted(self.cursors, key=lambda c: (c[1], c[0]))
curs = reversed(curs)
for cursor in curs:
# The current line this cursor is on
line = self.lines[cursor.y]
# Start of the line
start = line[:cursor.x]
# End of the line
end = line[cursor.x:]
# Leave the beginning of the line
self.lines[cursor.y].set_data(start)
wspace = ""
if self.config["auto_indent_newline"]:
wspace = helpers.whitespace(self.lines[cursor.y])*" "
self.lines.insert(cursor.y+1, Line(wspace+end))
self.move_y_cursors(cursor.y, 1)
cursor.set_x(len(wspace))
cursor.move_down()
self.move_cursors()
self.scroll_down()
# Add a restore point if previous action != enter
self.store_action_state("enter")
def insert(self):
"""Insert buffer data at cursor(s)."""
cur = self.get_cursor()
buffer = list(self.get_buffer())
# If we have more than one cursor
# Or one cursor and one line
if len(self.cursors) > 1 or len(buffer) == 1:
# If the cursor count is more than the buffer length extend
# the buffer until it's at least as long as the cursor count
while len(buffer) < len(self.cursors):
buffer.extend(buffer)
curs = sorted(self.cursors, key=lambda c: (c[1], c[0]))
for cursor in curs:
line = self.lines[cursor.y]
buf = buffer[0]
line = line[:cursor.x] + buf + line[cursor.x:]
self.lines[cursor.y].set_data(line)
buffer.pop(0)
self.move_x_cursors(cursor.y, cursor.x-1, len(buf))
# If we have one cursor and multiple lines
else:
for buf in buffer:
y = cur[1]
if y < 0:
y = 0
self.lines.insert(y, Line(buf))
self.move_y_cursors(cur[1]-1, 1)
self.move_cursors()
self.scroll_down()
# Add a restore point if previous action != insert
self.store_action_state("insert")
def insert_lines_at(self, lines, at):
rev_lines = reversed(lines)
for line in rev_lines:
self.lines.insert(at, Line(line))
self.move_y_cursors(at, len(lines))
def push_up(self):
"""Move current lines up by one line."""
used_y = []
curs = sorted(self.cursors, key=lambda c: (c[1], c[0]))
for cursor in curs:
if cursor.y in used_y:
continue
used_y.append(cursor.y)
if cursor.y == 0:
break
old = self.lines[cursor.y-1]
self.lines[cursor.y-1] = self.lines[cursor.y]
self.lines[cursor.y] = old
self.move_cursors((0, -1))
self.scroll_up()
# Add a restore point if previous action != push_up
self.store_action_state("push_up")
def push_down(self):
"""Move current lines down by one line."""
used_y = []
curs = reversed(sorted(self.cursors, key=lambda c: (c[1], c[0])))
for cursor in curs:
if cursor.y in used_y:
continue
if cursor.y >= len(self.lines)-1:
break
used_y.append(cursor.y)
old = self.lines[cursor.y+1]
self.lines[cursor.y+1] = self.lines[cursor.y]
self.lines[cursor.y] = old
self.move_cursors((0, 1))
self.scroll_down()
# Add a restore point if previous action != push_down
self.store_action_state("push_down")
def tab(self):
"""Indent lines."""
# Add a restore point if previous action != tab
self.store_action_state("tab")
if not self.config["hard_tabs"]:
self.type(" "*self.config["tab_width"])
else:
self.type("\t")
def untab(self):
"""Unindent lines."""
linenums = []
# String to compare tabs to
tab = " "*self.config["tab_width"]
if self.config["hard_tabs"]:
tab = "\t"
width = len(tab)
for cursor in self.cursors:
line = self.lines[cursor.y]
if cursor.y in linenums:
cursor.x = helpers.whitespace(line)
continue
elif line[:width] == tab:
line = Line(line[width:])
self.lines[cursor.y] = line
cursor.x = helpers.whitespace(line)
linenums.append(cursor.y)
# Add a restore point if previous action != untab
self.store_action_state("untab")
def copy(self):
"""Copy lines to buffer."""
# Store cut lines in buffer
copy_buffer = []
# Get all lines with cursors on them
line_nums = self.get_lines_with_cursors()
for i in range(len(line_nums)):
# Get the line
line = self.lines[line_nums[i]]
# Put it in our temporary buffer
copy_buffer.append(line.get_data())
self.set_buffer(copy_buffer)
self.store_action_state("copy")
def cut(self):
"""Cut lines to buffer."""
# Store cut lines in buffer
cut_buffer = []
# Get all lines with cursors on them
line_nums = self.get_lines_with_cursors()
# Sort from last to first (invert order)
line_nums = line_nums[::-1]
for i in range(len(line_nums)): # Iterate from last to first
# Make sure we don't completely remove the last line
if len(self.lines) == 1:
cut_buffer.append(self.lines[0])
self.lines[0] = Line()
break
# Get the current line
line_no = line_nums[i]
# Get and remove the line
line = self.lines.pop(line_no)
# Put it in our temporary buffer
cut_buffer.append(line)
# Move all cursors below the current line up
self.move_y_cursors(line_no, -1)
self.move_cursors() # Make sure cursors are in valid places
# Reverse the buffer to get correct order and store it
self.set_buffer(cut_buffer[::-1])
self.store_action_state("cut")
def type(self, data):
"""Insert data at each cursor position."""
for cursor in self.cursors:
self.type_at_cursor(cursor, data)
self.move_cursors()
# Add a restore point if previous action != type
self.store_action_state("type")
def type_at_cursor(self, cursor, data):
"""Insert data at specified cursor."""
line = self.lines[cursor.y]
start = line[:cursor.x]
end = line[cursor.x:]
self.lines[cursor.y].set_data(start + data + end)
self.move_x_cursors(cursor.y, cursor.x, len(data))
cursor.move_right(len(data))
def go_to_pos(self, line_no, col=0):
"""Move primary cursor to line_no, col=0."""
if line_no < 0:
line_no = len(self.lines)-1
else:
line_no = line_no-1
self.store_state()
cur = self.get_cursor()
if col is not None:
cur.x = col
cur.y = line_no
if cur.y >= len(self.lines):
cur.y = len(self.lines)-1
self.scroll_to_line(cur.y)
self.move_cursors()
def duplicate_line(self):
"""Copy current line and add it below as a new line."""
curs = sorted(self.cursors, key=lambda c: (c.y, c.x))
for cursor in curs:
line = Line(self.lines[cursor.y])
self.lines.insert(cursor.y+1, line)
self.move_y_cursors(cursor.y, 1)
self.move_cursors()
self.store_action_state("duplicate_line")
| 37.178445 | 97 | 0.541463 |
4a1d814f09dd7154b0263e0bf010db3677fa11ad | 767 | py | Python | oceanbolt/com/entities_v3/services/entity_service/__init__.py | oceanbolt/oceanbolt-python-sdk | c03c400fb7861a7918c6d18d39ad7a108a72baab | [
"MIT"
] | 8 | 2021-04-15T08:43:55.000Z | 2021-12-21T09:23:58.000Z | oceanbolt/com/entities_v3/services/entity_service/__init__.py | oceanbolt/oceanbolt-python-sdk | c03c400fb7861a7918c6d18d39ad7a108a72baab | [
"MIT"
] | null | null | null | oceanbolt/com/entities_v3/services/entity_service/__init__.py | oceanbolt/oceanbolt-python-sdk | c03c400fb7861a7918c6d18d39ad7a108a72baab | [
"MIT"
] | 2 | 2022-01-16T11:43:51.000Z | 2022-03-24T19:26:44.000Z | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from .client import EntityServiceClient
from .async_client import EntityServiceAsyncClient
__all__ = (
'EntityServiceClient',
'EntityServiceAsyncClient',
)
| 30.68 | 74 | 0.754889 |
4a1d84b97b06a4dfee9de214ff0c038a3198c213 | 10,100 | py | Python | Python/test/gjrgarch.py | igitur/QuantLib-SWIG | 55cd35323647a13a468dc0488b664172ff0fd861 | [
"BSD-3-Clause"
] | 1 | 2020-11-19T09:53:34.000Z | 2020-11-19T09:53:34.000Z | Python/test/gjrgarch.py | nhaga/QuantLib-SWIG | c20457d9f0cd55d411a5144aad617383b58e9c02 | [
"BSD-3-Clause"
] | 10 | 2021-05-10T08:33:32.000Z | 2022-03-28T09:04:45.000Z | Python/test/gjrgarch.py | Omnistac/QuantLib-SWIG | d63049a018795cc7f66d57944a6564af2fb9ae9f | [
"BSD-3-Clause"
] | null | null | null | """
Copyright (C) 2019 Pedro Coelho
This file is part of QuantLib, a free-software/open-source library
for financial quantitative analysts and developers - http://quantlib.org/
QuantLib is free software: you can redistribute it and/or modify it
under the terms of the QuantLib license. You should have received a
copy of the license along with this program; if not, please email
<[email protected]>. The license is also available online at
<http://quantlib.org/license.shtml>.
This program is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the license for more details.
"""
import QuantLib as ql
from math import pi, exp, sqrt
import unittest
class GJRGARCHEngineTest(unittest.TestCase):
def setUp(self):
self.settle_date = ql.Date.todaysDate()
ql.Settings.instance().evaluationDate = self.settle_date
self.dayCounter = ql.ActualActual()
self.risk_free_handle = ql.YieldTermStructureHandle(ql.FlatForward(self.settle_date, 0.05, self.dayCounter))
self.dividend_yield_handle = ql.YieldTermStructureHandle(ql.FlatForward(self.settle_date, 0, self.dayCounter))
self.s0 = 50
self.omega = 0.000002
self.alpha = 0.024
self.beta = 0.93
self.gamma = 0.059
self.daysPerYear = 365.0
self.maturity = [90, 180]
self.strike = [35, 40, 45, 50, 55, 60]
self.lambda_values = [0.0, 0.1, 0.2]
# correct values of analytic approximation
self.analytic = [
[[15.4315, 10.5552, 5.9625, 2.3282, 0.5408, 0.0835], [15.8969, 11.2173, 6.9112, 3.4788, 1.3769, 0.4357]],
[[15.4556, 10.6929, 6.2381, 2.6831, 0.7822, 0.1738], [16.0587, 11.5338, 7.3170, 3.9074, 1.7279, 0.6568]],
[[15.8000, 11.2734, 7.0376, 3.6767, 1.5871, 0.5934], [16.9286, 12.3170, 8.0405, 4.6348, 2.3429, 1.0590]],
]
# correct values of Monte Carlo
self.mc_values = [
[[15.4332, 10.5453, 5.9351, 2.3521, 0.5597, 0.0776], [15.8910, 11.1772, 6.8827, 3.5096, 1.4196, 0.4502]],
[[15.4580, 10.6433, 6.2019, 2.7513, 0.8374, 0.1706], [15.9884, 11.4139, 7.3103, 4.0497, 1.8862, 0.7322]],
[[15.6619, 11.1263, 7.0968, 3.9152, 1.8133, 0.7010], [16.5195, 12.3181, 8.6085, 5.5700, 3.3103, 1.8053]],
]
def tearDown(self):
ql.Settings.instance().evaluationDate = ql.Date()
def testOptionPricing(self):
tolerance = 0.075
for k in range(3):
lambda_value = self.lambda_values[k]
m1 = (
self.beta
+ (self.alpha + self.gamma * ql.CumulativeNormalDistribution()(lambda_value))
* (1 + lambda_value * lambda_value)
+ self.gamma * lambda_value * exp(-lambda_value * lambda_value / 2) / sqrt(2 * pi)
)
v0 = self.omega / (1 - m1)
quote = ql.QuoteHandle(ql.SimpleQuote(self.s0))
garch = ql.GJRGARCHProcess(
self.risk_free_handle,
self.dividend_yield_handle,
quote,
v0,
self.omega,
self.alpha,
self.beta,
self.gamma,
lambda_value,
self.daysPerYear,
)
garch_model = ql.GJRGARCHModel(garch)
analytic_engine = ql.AnalyticGJRGARCHEngine(garch_model)
mc_engine = ql.MCEuropeanGJRGARCHEngine(
process=garch, traits="pseudorandom", timeStepsPerYear=20, requiredTolerance=0.02, seed=1234
)
for i in range(2):
for j in range(6):
payoff = ql.PlainVanillaPayoff(ql.Option.Call, self.strike[j])
ex_date = self.settle_date + ql.Period(self.maturity[i], ql.Days)
exercise = ql.EuropeanExercise(ex_date)
option = ql.VanillaOption(payoff, exercise)
option.setPricingEngine(analytic_engine)
analytic_price = option.NPV()
analytic_difference = analytic_price - self.analytic[k][i][j]
self.assertTrue(analytic_difference <= 2 * tolerance)
option.setPricingEngine(mc_engine)
mc_price = option.NPV()
mc_difference = mc_price - self.mc_values[k][i][j]
self.assertTrue(mc_difference <= 2 * tolerance)
class GJRGARCHCalibrationTest(unittest.TestCase):
def setUp(self):
self.settle_date = ql.Date(5, ql.July, 2002)
ql.Settings.instance().evaluationDate = self.settle_date
self.dayCounter = ql.Actual365Fixed()
self.calendar = ql.TARGET()
self.days = [0, 13, 41, 75, 165, 256, 345, 524, 703]
self.rates = [0.0357, 0.0357, 0.0349, 0.0341, 0.0355, 0.0359, 0.0368, 0.0386, 0.0401]
dates = list()
for day in self.days:
date = self.settle_date + ql.Period(day, ql.Days)
dates.append(date)
self.risk_free_ts = ql.YieldTermStructureHandle(ql.ZeroCurve(dates, self.rates, self.dayCounter))
self.dividend_yield_handle = ql.YieldTermStructureHandle(ql.FlatForward(self.settle_date, 0, self.dayCounter))
self.s0 = 4468.17
self.omega = 0.000002
self.alpha = 0.024
self.beta = 0.93
self.gamma = 0.059
self.daysPerYear = 365.0
self.Volatility = [
0.6625,
0.4875,
0.4204,
0.3667,
0.3431,
0.3267,
0.3121,
0.3121,
0.6007,
0.4543,
0.3967,
0.3511,
0.3279,
0.3154,
0.2984,
0.2921,
0.5084,
0.4221,
0.3718,
0.3327,
0.3155,
0.3027,
0.2919,
0.2889,
0.4541,
0.3869,
0.3492,
0.3149,
0.2963,
0.2926,
0.2819,
0.2800,
0.4060,
0.3607,
0.3330,
0.2999,
0.2887,
0.2811,
0.2751,
0.2775,
0.3726,
0.3396,
0.3108,
0.2781,
0.2788,
0.2722,
0.2661,
0.2686,
0.3550,
0.3277,
0.3012,
0.2781,
0.2781,
0.2661,
0.2661,
0.2681,
0.3428,
0.3209,
0.2958,
0.2740,
0.2688,
0.2627,
0.2580,
0.2620,
0.3302,
0.3062,
0.2799,
0.2631,
0.2573,
0.2533,
0.2504,
0.2544,
0.3343,
0.2959,
0.2705,
0.2540,
0.2504,
0.2464,
0.2448,
0.2462,
0.3460,
0.2845,
0.2624,
0.2463,
0.2425,
0.2385,
0.2373,
0.2422,
0.3857,
0.2860,
0.2578,
0.2399,
0.2357,
0.2327,
0.2312,
0.2351,
0.3976,
0.2860,
0.2607,
0.2356,
0.2297,
0.2268,
0.2241,
0.2320,
]
self.strike = [3400, 3600, 3800, 4000, 4200, 4400, 4500, 4600, 4800, 5000, 5200, 5400, 5600]
self.lambda_value = 0.1
def tearDown(self):
ql.Settings.instance().evaluationDate = ql.Date()
def testCalibration(self):
m1 = (
self.beta
+ (self.alpha + self.gamma * ql.CumulativeNormalDistribution()(self.lambda_value))
* (1 + self.lambda_value * self.lambda_value)
+ self.gamma * self.lambda_value * exp(-self.lambda_value * self.lambda_value / 2) / sqrt(2 * pi)
)
v0 = self.omega / (1 - m1)
helpers = list()
for s in range(3, 10):
for m in range(0, 3):
vol = ql.QuoteHandle(ql.SimpleQuote(self.Volatility[s * 8 + m]))
maturity = ql.Period(int((self.days[m + 1] + 3) / 7), ql.Weeks)
heston_helper = ql.HestonModelHelper(
maturity,
self.calendar,
self.s0,
self.strike[s],
vol,
self.risk_free_ts,
self.dividend_yield_handle,
ql.BlackCalibrationHelper.ImpliedVolError,
)
helpers.append(heston_helper)
new_garch_process = ql.GJRGARCHProcess(
self.risk_free_ts,
self.dividend_yield_handle,
ql.QuoteHandle(ql.SimpleQuote(self.s0)),
v0,
self.omega,
self.alpha,
self.beta,
self.gamma,
self.lambda_value,
self.daysPerYear,
)
new_garch_model = ql.GJRGARCHModel(new_garch_process)
new_garch_engine = ql.AnalyticGJRGARCHEngine(new_garch_model)
for helper in helpers:
helper.setPricingEngine(new_garch_engine)
om = ql.Simplex(0.05)
new_garch_model.calibrate(helpers, om, ql.EndCriteria(400, 40, 1.0e-8, 1.0e-8, 1.0e-8))
sse = 0
for helper in helpers:
diff = helper.calibrationError() * 100
sse += diff * diff
maxExpected = 15
self.assertTrue(sse <= maxExpected)
if __name__ == "__main__":
print("testing QuantLib " + ql.__version__)
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(GJRGARCHEngineTest, "test"))
suite.addTest(unittest.makeSuite(GJRGARCHCalibrationTest, "test"))
unittest.TextTestRunner(verbosity=2).run(suite)
| 34.237288 | 118 | 0.515446 |
4a1d84ff9ca2229241dfa3d8366ea7549a197e21 | 442 | py | Python | testemunhoweb/cadastro/migrations/0049_auto_20191231_1140.py | danielcamilo13/testemunhoWEB | 46825e31123058fa6ee21e4e71e9e0bedde32bb4 | [
"bzip2-1.0.6"
] | 1 | 2019-12-03T01:37:13.000Z | 2019-12-03T01:37:13.000Z | testemunhoweb/cadastro/migrations/0049_auto_20191231_1140.py | danielcamilo13/testemunhoWEB | 46825e31123058fa6ee21e4e71e9e0bedde32bb4 | [
"bzip2-1.0.6"
] | 11 | 2020-06-06T01:28:35.000Z | 2022-03-12T00:16:34.000Z | testemunhoweb/cadastro/migrations/0049_auto_20191231_1140.py | danielcamilo13/testemunhoWEB | 46825e31123058fa6ee21e4e71e9e0bedde32bb4 | [
"bzip2-1.0.6"
] | null | null | null | # Generated by Django 2.2.7 on 2019-12-31 14:40
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cadastro', '0048_designacao_csrfmiddlewaretoken'),
]
operations = [
migrations.AlterField(
model_name='irmaos',
name='grupo',
field=models.CharField(blank=True, max_length=3, null=True, verbose_name='Grupo'),
),
]
| 23.263158 | 94 | 0.624434 |
4a1d850a072a4962a1330f9ae4c26d44f5a931ec | 6,536 | py | Python | exercises/networking_selfpaced/networking-workshop/collections/ansible_collections/community/general/plugins/modules/pn_ospfarea.py | tr3ck3r/linklight | 5060f624c235ecf46cb62cefcc6bddc6bf8ca3e7 | [
"MIT"
] | null | null | null | exercises/networking_selfpaced/networking-workshop/collections/ansible_collections/community/general/plugins/modules/pn_ospfarea.py | tr3ck3r/linklight | 5060f624c235ecf46cb62cefcc6bddc6bf8ca3e7 | [
"MIT"
] | null | null | null | exercises/networking_selfpaced/networking-workshop/collections/ansible_collections/community/general/plugins/modules/pn_ospfarea.py | tr3ck3r/linklight | 5060f624c235ecf46cb62cefcc6bddc6bf8ca3e7 | [
"MIT"
] | null | null | null | #!/usr/bin/python
""" PN-CLI vrouter-ospf-add/remove """
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['deprecated'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: pn_ospfarea
author: "Pluribus Networks (@amitsi)"
short_description: CLI command to add/remove ospf area to/from a vrouter.
deprecated:
removed_in: '2.12'
why: Doesn't support latest Pluribus Networks netvisor
alternative: Latest modules will be pushed in Ansible future versions.
description:
- Execute vrouter-ospf-add, vrouter-ospf-remove command.
- This command adds/removes Open Shortest Path First(OSPF) area to/from
a virtual router(vRouter) service.
options:
pn_cliusername:
description:
- Login username.
required: true
pn_clipassword:
description:
- Login password.
required: true
pn_cliswitch:
description:
- Target switch(es) to run the CLI on.
required: False
state:
description:
- State the action to perform. Use 'present' to add ospf-area, 'absent'
to remove ospf-area and 'update' to modify ospf-area.
required: true
choices: ['present', 'absent', 'update']
pn_vrouter_name:
description:
- Specify the name of the vRouter.
required: true
pn_ospf_area:
description:
- Specify the OSPF area number.
required: true
pn_stub_type:
description:
- Specify the OSPF stub type.
choices: ['none', 'stub', 'stub-no-summary', 'nssa', 'nssa-no-summary']
pn_prefix_listin:
description:
- OSPF prefix list for filtering incoming packets.
pn_prefix_listout:
description:
- OSPF prefix list for filtering outgoing packets.
pn_quiet:
description:
- Enable/disable system information.
required: false
type: bool
default: true
'''
EXAMPLES = """
- name: "Add OSPF area to vrouter"
pn_ospfarea:
state: present
pn_cliusername: admin
pn_clipassword: admin
pn_ospf_area: 1.0.0.0
pn_stub_type: stub
- name: "Remove OSPF from vrouter"
pn_ospf:
state: absent
pn_cliusername: admin
pn_clipassword: admin
pn_vrouter_name: name-string
pn_ospf_area: 1.0.0.0
"""
RETURN = """
command:
description: The CLI command run on the target node(s).
returned: always
type: str
stdout:
description: The set of responses from the ospf command.
returned: always
type: list
stderr:
description: The set of error responses from the ospf command.
returned: on error
type: list
changed:
description: Indicates whether the CLI caused changes on the target.
returned: always
type: bool
"""
import shlex
# AnsibleModule boilerplate
from ansible.module_utils.basic import AnsibleModule
def get_command_from_state(state):
"""
This method gets appropriate command name for the state specified. It
returns the command name for the specified state.
:param state: The state for which the respective command name is required.
"""
command = None
if state == 'present':
command = 'vrouter-ospf-area-add'
if state == 'absent':
command = 'vrouter-ospf-area-remove'
if state == 'update':
command = 'vrouter-ospf-area-modify'
return command
def main():
""" This section is for arguments parsing """
module = AnsibleModule(
argument_spec=dict(
pn_cliusername=dict(required=True, type='str'),
pn_clipassword=dict(required=True, type='str', no_log=True),
pn_cliswitch=dict(required=False, type='str'),
state=dict(required=True, type='str',
choices=['present', 'absent', 'update']),
pn_vrouter_name=dict(required=True, type='str'),
pn_ospf_area=dict(required=True, type='str'),
pn_stub_type=dict(type='str', choices=['none', 'stub', 'nssa',
'stub-no-summary',
'nssa-no-summary']),
pn_prefix_listin=dict(type='str'),
pn_prefix_listout=dict(type='str'),
pn_quiet=dict(type='bool', default='True')
)
)
# Accessing the arguments
cliusername = module.params['pn_cliusername']
clipassword = module.params['pn_clipassword']
cliswitch = module.params['pn_cliswitch']
state = module.params['state']
vrouter_name = module.params['pn_vrouter_name']
ospf_area = module.params['pn_ospf_area']
stub_type = module.params['pn_stub_type']
prefix_listin = module.params['pn_prefix_listin']
prefix_listout = module.params['pn_prefix_listout']
quiet = module.params['pn_quiet']
command = get_command_from_state(state)
# Building the CLI command string
cli = '/usr/bin/cli'
if quiet is True:
cli += ' --quiet '
cli += ' --user %s:%s ' % (cliusername, clipassword)
if cliswitch:
if cliswitch == 'local':
cli += ' switch-local '
else:
cli += ' switch ' + cliswitch
cli += ' %s vrouter-name %s area %s ' % (command, vrouter_name, ospf_area)
if stub_type:
cli += ' stub-type ' + stub_type
if prefix_listin:
cli += ' prefix-list-in ' + prefix_listin
if prefix_listout:
cli += ' prefix-list-out ' + prefix_listout
# Run the CLI command
ospfcommand = shlex.split(cli)
# 'out' contains the output
# 'err' contains the error messages
result, out, err = module.run_command(ospfcommand)
# Response in JSON format
if result != 0:
module.exit_json(
command=cli,
stderr=err.rstrip("\r\n"),
changed=False
)
else:
module.exit_json(
command=cli,
stdout=out.rstrip("\r\n"),
changed=True
)
if __name__ == '__main__':
main()
| 28.792952 | 78 | 0.643513 |
4a1d8514e59078533a3ad7c2d74adafec00e1823 | 3,503 | py | Python | amazon-s3-backup/rootfs/usr/bin/amazon-s3-backup/supervisorapi.py | dimka2014/hass-addons | e23902748c542bc08e434c2e06991136d1b66989 | [
"MIT"
] | null | null | null | amazon-s3-backup/rootfs/usr/bin/amazon-s3-backup/supervisorapi.py | dimka2014/hass-addons | e23902748c542bc08e434c2e06991136d1b66989 | [
"MIT"
] | null | null | null | amazon-s3-backup/rootfs/usr/bin/amazon-s3-backup/supervisorapi.py | dimka2014/hass-addons | e23902748c542bc08e434c2e06991136d1b66989 | [
"MIT"
] | null | null | null | import requests
import requests.adapters
import urllib3
class SupervisorAPIError(Exception):
pass
class _BearerAuth(requests.auth.AuthBase):
def __init__(self, token):
self.token = token
def __call__(self, r):
r.headers["authorization"] = "Bearer " + self.token
return r
class SupervisorAPI:
BASE_URL = "http://supervisor"
def __init__(self, token: str):
"""Interact with the Home Assistant Supervisor API
Args:
token (str): Supervisor bearer token
"""
self.auth = _BearerAuth(token)
self.session = requests.Session()
retry_strategy = urllib3.Retry(
total=10, backoff_factor=10, status_forcelist=[400, 500, 502, 503, 504]
)
self.session.mount("http://", requests.adapters.HTTPAdapter(max_retries=retry_strategy))
def _get(self, path: str) -> requests.Response:
url = f"{SupervisorAPI.BASE_URL}{path}"
try:
response = self.session.get(url, auth=self.auth)
except (requests.exceptions.ConnectionError, requests.exceptions.RetryError) as err:
raise SupervisorAPIError(
f"Error connecting to Home Assistant Supervisor API: {err}")
except requests.exceptions.Timeout as err:
raise SupervisorAPIError(
"Timeout connecting to Home Assistant Supervisor API")
else:
json = None
if response.ok:
try:
json = response.json()
except ValueError as err:
raise SupervisorAPIError(
"Error decoding response from Home Assistant Supervisor API")
return json
def _post(self, path: str) -> requests.Response:
url = f"{SupervisorAPI.BASE_URL}{path}"
try:
response = self.session.post(url, auth=self.auth)
except (requests.exceptions.ConnectionError, requests.exceptions.RetryError) as err:
raise SupervisorAPIError(
f"Error connecting to Home Assistant Supervisor API: {err}")
except requests.exceptions.Timeout as err:
raise SupervisorAPIError(
"Timeout connecting to Home Assistant Supervisor API")
else:
json = None
if response.ok:
try:
json = response.json()
except ValueError as err:
raise SupervisorAPIError(
"Error decoding response from Home Assistant Supervisor API")
return json
def get_snapshots(self):
"""Get list of all snapshots
Returns:
List: List of snapshots
"""
response = self._get("/snapshots")
return response.get("data", {}).get("snapshots", [])
def get_snapshot(self, slug: str):
"""Get details of a single snapshot
Args:
slug (str): Slug of snapshot to retrieve
Returns:
dict: Dictionary containing snapshot details
"""
response = self._get(f"/snapshots/{slug}/info")
return response.get("data")
def remove_snapshot(self, slug: str) -> bool:
"""Delete a snapshot
Args:
slug (str): Slug of snapshot to delete
Returns:
bool: True if successful
"""
response = self._post(f"/snapshots/{slug}/remove")
return True if response.get("result") == "ok" else False
| 32.738318 | 96 | 0.584642 |
4a1d851d329ff02dece077c71db007729487622b | 2,053 | py | Python | beacon.py | guionardo/py_beacon | 550227230f705cff5fea46c48e739abbc2b2d3ee | [
"MIT"
] | null | null | null | beacon.py | guionardo/py_beacon | 550227230f705cff5fea46c48e739abbc2b2d3ee | [
"MIT"
] | null | null | null | beacon.py | guionardo/py_beacon | 550227230f705cff5fea46c48e739abbc2b2d3ee | [
"MIT"
] | null | null | null | # Python beacon
# UDP responder to broadcast
import json
import socket
import sys
import time
from pprint import pprint
import netifaces
UDP_PORT = 37020
hostname = socket.gethostname()
BEACON = "beacon" in sys.argv
def get_ips():
ips = []
for interface in netifaces.interfaces():
if interface != 'lo' and netifaces.AF_INET in netifaces.ifaddresses(interface):
for ip in netifaces.ifaddresses(interface)[netifaces.AF_INET]:
if ip['addr'] not in ('127.0.0.1', '0.0.0.0'):
ips.append(ip['addr'])
# pprint(ips)
return ips
def run_beacon():
print(f"Beacon - listening on {UDP_PORT} UDP port")
print(f"I´m {hostname}:{ips}")
me = json.dumps({
"beacon": hostname,
"ips": ips
})
client = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) # UDP
client.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
client.bind(("", UDP_PORT))
while True:
data, addr = client.recvfrom(1024)
if data:
print(f"ping from {addr}")
sent = client.sendto(me.encode('ascii'), addr)
print(f"Responding {me}")
def run_client():
print(f"Beacon - searching in {UDP_PORT} UDP port: ", end='')
server = socket.socket(
socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
server.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
# Set a timeout so the socket does not block
# indefinitely when trying to receive data.
server.bind(("", 44444))
found = False
t0 = time.time()
while not found and time.time()-t0 < 10:
print('.', end='')
server.settimeout(0.2)
server.sendto(b"hi", ('<broadcast>', UDP_PORT))
server.settimeout(1)
try:
data, addr = server.recvfrom(1024)
print(str({'data': data, 'addr': addr}))
found = True
except:
pass
if not found:
print('Beacon not found!')
ips = get_ips()
if BEACON:
run_beacon()
else:
run_client()
| 25.6625 | 87 | 0.60302 |
4a1d85816e5f876682ba7f717ae17aac457aa2c9 | 1,511 | py | Python | scripts/buildtools.py | fperignon/sandbox | 649f09d6db7bbd84c2418de74eb9453c0131f070 | [
"Apache-2.0"
] | null | null | null | scripts/buildtools.py | fperignon/sandbox | 649f09d6db7bbd84c2418de74eb9453c0131f070 | [
"Apache-2.0"
] | null | null | null | scripts/buildtools.py | fperignon/sandbox | 649f09d6db7bbd84c2418de74eb9453c0131f070 | [
"Apache-2.0"
] | null | null | null | """
Tools used during configuration and build process
(most of them used in CMake files, during build or runtime).
This file is to be copied into CMAKE_BINARY_DIR/share using configure_file
Siconos is a program dedicated to modeling, simulation and control
of non smooth dynamical systems.
Copyright 2020 INRIA.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
def parse_cmake_list(var):
"""Transform cmake list-like variables
into python lists.
Parameters
----------
var : string
like "a;b;c"
Returns python list
If var is already a list, does nothing.
Example::
a = parse_cmake_list("var1;var2;var3;")
# --> a = ['var', 'var2', 'var3']
"""
if isinstance(var, list):
return var
if var != "":
res = list(set(var.split(';')))
# list/set stuff to avoid duplicates
# remove empty strings to avoid '-I -I' things leading to bugs
if res.count(''):
res.remove('')
return res
return []
| 26.982143 | 74 | 0.674388 |
4a1d86b7965849f15f332aca5d02c089ca430091 | 780 | py | Python | plugins/plugin-apache-composer/samples/@demos/hello.py | ktsakalozos/kui | dc96c29121e318211184fbae468bd35de86e38e9 | [
"Apache-2.0"
] | 4 | 2019-04-10T14:42:54.000Z | 2021-02-28T23:21:04.000Z | plugins/plugin-apache-composer/samples/@demos/hello.py | ktsakalozos/kui | dc96c29121e318211184fbae468bd35de86e38e9 | [
"Apache-2.0"
] | null | null | null | plugins/plugin-apache-composer/samples/@demos/hello.py | ktsakalozos/kui | dc96c29121e318211184fbae468bd35de86e38e9 | [
"Apache-2.0"
] | null | null | null | #
# Copyright 2018 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# not yet supported:
#def hello(args):
# return {
# "msg": f"hello {args['name']}"
# }
# composer.sequence(hello)
composer.sequence(lambda env, args: { "msg": "hello " + args['name'] })
| 31.2 | 74 | 0.716667 |
4a1d86dbeba8b6911332d783e5aa62d11795ee7f | 3,128 | py | Python | ansible/roles/sunbird-auth-deploy/files/python-keycloak-0.12.0/keycloak/authorization/__init__.py | JaikumarSRajan/sunbird-devops | da862fa9f887ad04a4409ec91cd23d5b681506fe | [
"MIT"
] | 51 | 2017-07-05T12:52:17.000Z | 2021-12-16T11:35:59.000Z | ansible/roles/sunbird-auth-deploy/files/python-keycloak-0.12.0/keycloak/authorization/__init__.py | JaikumarSRajan/sunbird-devops | da862fa9f887ad04a4409ec91cd23d5b681506fe | [
"MIT"
] | 338 | 2017-09-21T10:18:19.000Z | 2022-03-31T11:26:13.000Z | ansible/roles/sunbird-auth-deploy/files/python-keycloak-0.12.0/keycloak/authorization/__init__.py | JaikumarSRajan/sunbird-devops | da862fa9f887ad04a4409ec91cd23d5b681506fe | [
"MIT"
] | 531 | 2017-08-10T10:47:41.000Z | 2022-03-31T06:43:32.000Z | # -*- coding: utf-8 -*-
#
# Copyright (C) 2017 Marcos Pereira <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import ast
import json
from .permission import Permission
from .policy import Policy
from .role import Role
class Authorization:
"""
Keycloak Authorization (policies, roles, scopes and resources).
https://keycloak.gitbooks.io/documentation/authorization_services/index.html
"""
def __init__(self):
self._policies = {}
@property
def policies(self):
return self._policies
@policies.setter
def policies(self, value):
self._policies = value
def load_config(self, data):
"""
Load policies, roles and permissions (scope/resources).
:param data: keycloak authorization data (dict)
:return:
"""
for pol in data['policies']:
if pol['type'] == 'role':
policy = Policy(name=pol['name'],
type=pol['type'],
logic=pol['logic'],
decision_strategy=pol['decisionStrategy'])
config_roles = json.loads(pol['config']['roles'])
for role in config_roles:
policy.add_role(Role(name=role['id'],
required=role['required']))
self.policies[policy.name] = policy
if pol['type'] == 'scope':
permission = Permission(name=pol['name'],
type=pol['type'],
logic=pol['logic'],
decision_strategy=pol['decisionStrategy'])
permission.scopes = ast.literal_eval(pol['config']['scopes'])
for policy_name in ast.literal_eval(pol['config']['applyPolicies']):
self.policies[policy_name].add_permission(permission)
if pol['type'] == 'resource':
permission = Permission(name=pol['name'],
type=pol['type'],
logic=pol['logic'],
decision_strategy=pol['decisionStrategy'])
permission.resources = ast.literal_eval(pol['config']['resources'])
for policy_name in ast.literal_eval(pol['config']['applyPolicies']):
self.policies[policy_name].add_permission(permission)
| 35.545455 | 84 | 0.570013 |
4a1d8740a255f18475c74bbf3aec5d2c81b5e6f0 | 7,369 | py | Python | phy/cluster/views/tests/test_trace.py | fjflores/phy | eb068da48521060f8de45a2c546658015f9515dd | [
"BSD-3-Clause"
] | null | null | null | phy/cluster/views/tests/test_trace.py | fjflores/phy | eb068da48521060f8de45a2c546658015f9515dd | [
"BSD-3-Clause"
] | null | null | null | phy/cluster/views/tests/test_trace.py | fjflores/phy | eb068da48521060f8de45a2c546658015f9515dd | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""Test views."""
#------------------------------------------------------------------------------
# Imports
#------------------------------------------------------------------------------
import numpy as np
from numpy.testing import assert_allclose as ac
from phylib.io.mock import artificial_traces, artificial_spike_clusters
from phylib.utils import Bunch, connect
from phylib.utils.geometry import linear_positions
from phy.plot.tests import mouse_click
from ..trace import TraceView, TraceImageView, select_traces, _iter_spike_waveforms
from . import _stop_and_close
#------------------------------------------------------------------------------
# Test trace view
#------------------------------------------------------------------------------
def test_iter_spike_waveforms():
nc = 5
ns = 20
sr = 2000.
ch = list(range(nc))
duration = 1.
st = np.linspace(0.1, .9, ns)
sc = artificial_spike_clusters(ns, nc)
traces = 10 * artificial_traces(int(round(duration * sr)), nc)
m = Bunch(spike_times=st, spike_clusters=sc, sample_rate=sr)
s = Bunch(cluster_meta={}, selected=[0])
for w in _iter_spike_waveforms(
interval=[0., 1.],
traces_interval=traces,
model=m,
supervisor=s,
n_samples_waveforms=ns,
show_all_spikes=True,
get_best_channels=lambda cluster_id: ch,
):
assert w
def test_trace_view_1(qtbot, tempdir, gui):
nc = 5
ns = 20
sr = 2000.
duration = 1.
st = np.linspace(0.1, .9, ns)
sc = artificial_spike_clusters(ns, nc)
traces = 10 * artificial_traces(int(round(duration * sr)), nc)
def get_traces(interval):
out = Bunch(data=select_traces(traces, interval, sample_rate=sr),
color=(.75, .75, .75, 1),
)
a, b = st.searchsorted(interval)
out.waveforms = []
k = 20
for i in range(a, b):
t = st[i]
c = sc[i]
s = int(round(t * sr))
d = Bunch(data=traces[s - k:s + k, :],
start_time=(s - k) / sr,
channel_ids=np.arange(5),
spike_id=i,
spike_cluster=c,
select_index=0,
)
out.waveforms.append(d)
return out
def get_spike_times():
return st
v = TraceView(
traces=get_traces,
spike_times=get_spike_times,
n_channels=nc,
sample_rate=sr,
duration=duration,
channel_positions=linear_positions(nc),
)
v.show()
qtbot.waitForWindowShown(v.canvas)
v.attach(gui)
v.on_select(cluster_ids=[])
v.on_select(cluster_ids=[0])
v.on_select(cluster_ids=[0, 2, 3])
v.on_select(cluster_ids=[0, 2])
v.stacked.add_boxes(v.canvas)
ac(v.stacked.box_size, (.950, .165), atol=1e-3)
v.set_interval((.375, .625))
assert v.time == .5
qtbot.wait(1)
v.go_to(.25)
assert v.time == .25
qtbot.wait(1)
v.go_to(-.5)
assert v.time == .125
qtbot.wait(1)
v.go_left()
assert v.time == .125
qtbot.wait(1)
v.go_right()
ac(v.time, .150)
qtbot.wait(1)
v.jump_left()
qtbot.wait(1)
v.jump_right()
qtbot.wait(1)
v.go_to_next_spike()
qtbot.wait(1)
v.go_to_previous_spike()
qtbot.wait(1)
# Change interval size.
v.interval = (.25, .75)
ac(v.interval, (.25, .75))
qtbot.wait(1)
v.widen()
ac(v.interval, (.1875, .8125))
qtbot.wait(1)
v.narrow()
ac(v.interval, (.25, .75))
qtbot.wait(1)
v.go_to_start()
qtbot.wait(1)
assert v.interval[0] == 0
v.go_to_end()
qtbot.wait(1)
assert v.interval[1] == duration
# Widen the max interval.
v.set_interval((0, duration))
v.widen()
qtbot.wait(1)
v.toggle_show_labels(True)
v.go_right()
# Check auto scaling.
db = v.data_bounds
v.toggle_auto_scale(False)
v.narrow()
qtbot.wait(1)
# Check that ymin and ymax have not changed.
assert v.data_bounds[1] == db[1]
assert v.data_bounds[3] == db[3]
v.toggle_auto_update(True)
assert v.do_show_labels
qtbot.wait(1)
v.toggle_highlighted_spikes(True)
qtbot.wait(50)
# Change channel scaling.
bs = v.stacked.box_size
v.decrease()
qtbot.wait(1)
v.increase()
ac(v.stacked.box_size, bs, atol=.05)
qtbot.wait(1)
v.origin = 'bottom'
v.switch_origin()
assert v.origin == 'top'
qtbot.wait(1)
# Simulate spike selection.
_clicked = []
@connect(sender=v)
def on_select_spike(sender, channel_id=None, spike_id=None, cluster_id=None, key=None):
_clicked.append((channel_id, spike_id, cluster_id))
mouse_click(qtbot, v.canvas, pos=(0., 0.), button='Left', modifiers=('Control',))
v.set_state(v.state)
assert len(_clicked[0]) == 3
# Simulate channel selection.
_clicked = []
@connect(sender=v)
def on_select_channel(sender, channel_id=None, button=None):
_clicked.append((channel_id, button))
mouse_click(qtbot, v.canvas, pos=(0., 0.), button='Left', modifiers=('Shift',))
mouse_click(qtbot, v.canvas, pos=(0., 0.), button='Right', modifiers=('Shift',))
assert _clicked == [(2, 'Left'), (2, 'Right')]
_stop_and_close(qtbot, v)
#------------------------------------------------------------------------------
# Test trace imageview
#------------------------------------------------------------------------------
def test_trace_image_view_1(qtbot, tempdir, gui):
nc = 350
sr = 2000.
duration = 1.
traces = 10 * artificial_traces(int(round(duration * sr)), nc)
def get_traces(interval):
return Bunch(data=select_traces(traces, interval, sample_rate=sr),
color=(.75, .75, .75, 1),
)
v = TraceImageView(
traces=get_traces,
n_channels=nc,
sample_rate=sr,
duration=duration,
channel_positions=linear_positions(nc),
)
v.show()
qtbot.waitForWindowShown(v.canvas)
v.attach(gui)
v.set_interval((.375, .625))
assert v.time == .5
qtbot.wait(1)
v.go_to(.25)
assert v.time == .25
qtbot.wait(1)
v.go_to(-.5)
assert v.time == .125
qtbot.wait(1)
v.go_left()
assert v.time == .125
qtbot.wait(1)
v.go_right()
ac(v.time, .150)
qtbot.wait(1)
v.jump_left()
qtbot.wait(1)
v.jump_right()
qtbot.wait(1)
# Change interval size.
v.interval = (.25, .75)
ac(v.interval, (.25, .75))
qtbot.wait(1)
v.widen()
ac(v.interval, (.1875, .8125))
qtbot.wait(1)
v.narrow()
ac(v.interval, (.25, .75))
qtbot.wait(1)
v.go_to_start()
qtbot.wait(1)
assert v.interval[0] == 0
v.go_to_end()
qtbot.wait(1)
assert v.interval[1] == duration
# Widen the max interval.
v.set_interval((0, duration))
v.widen()
qtbot.wait(1)
v.toggle_auto_update(True)
assert v.do_show_labels
qtbot.wait(1)
# Change channel scaling.
v.decrease()
qtbot.wait(1)
v.increase()
qtbot.wait(1)
v.origin = 'bottom'
v.switch_origin()
# assert v.origin == 'top'
qtbot.wait(1)
_stop_and_close(qtbot, v)
| 23.246057 | 91 | 0.545664 |
4a1d898d17e1249f891ec1f90839b18a5197c543 | 997 | py | Python | test/test_ula_method.py | Fates-List/fateslist.py-autogen | 0643434d9d0e71f781f99b2703a2ef52f49d8875 | [
"MIT"
] | null | null | null | test/test_ula_method.py | Fates-List/fateslist.py-autogen | 0643434d9d0e71f781f99b2703a2ef52f49d8875 | [
"MIT"
] | null | null | null | test/test_ula_method.py | Fates-List/fateslist.py-autogen | 0643434d9d0e71f781f99b2703a2ef52f49d8875 | [
"MIT"
] | null | null | null | # coding: utf-8
"""
Fates List
Current API: v2 beta 3 Default API: v2 API Docs: https://apidocs.fateslist.xyz Enum Reference: https://apidocs.fateslist.xyz/structures/enums.autogen # noqa: E501
OpenAPI spec version: 0.3.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import swagger_client
from swagger_client.models.ula_method import ULAMethod # noqa: E501
from swagger_client.rest import ApiException
class TestULAMethod(unittest.TestCase):
"""ULAMethod unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testULAMethod(self):
"""Test ULAMethod"""
# FIXME: construct object with mandatory attributes with example values
# model = swagger_client.models.ula_method.ULAMethod() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 24.925 | 225 | 0.656971 |
4a1d8a246738fe7d87ca8aa627f3e6f93ad3cc07 | 3,561 | py | Python | tests/amqp/test_rpc_client.py | OpenMatchmaking/sage-utils-python | 348394bf9cd3adb96fe3915d1d4d99daa46ab437 | [
"BSD-3-Clause"
] | null | null | null | tests/amqp/test_rpc_client.py | OpenMatchmaking/sage-utils-python | 348394bf9cd3adb96fe3915d1d4d99daa46ab437 | [
"BSD-3-Clause"
] | 2 | 2018-04-07T23:24:19.000Z | 2018-05-25T08:31:31.000Z | tests/amqp/test_rpc_client.py | OpenMatchmaking/sage-utils-python | 348394bf9cd3adb96fe3915d1d4d99daa46ab437 | [
"BSD-3-Clause"
] | null | null | null | import pytest
from sage_utils.amqp.clients import RpcAmqpClient
from sage_utils.amqp.extension import AmqpExtension
from sage_utils.constants import VALIDATION_ERROR
from sage_utils.wrappers import Response
from tests.fixtures import Application, FakeConfig, FakeRegisterMicroserviceWorker
REQUEST_QUEUE = FakeRegisterMicroserviceWorker.QUEUE_NAME
REQUEST_EXCHANGE = FakeRegisterMicroserviceWorker.REQUEST_EXCHANGE_NAME
RESPONSE_EXCHANGE_NAME = FakeRegisterMicroserviceWorker.RESPONSE_EXCHANGE_NAME
VALIDATION_ERROR_DECR = FakeRegisterMicroserviceWorker.ERROR_DESCRIPTION
@pytest.mark.asyncio
async def test_rpc_amqp_client_returns_ok(event_loop):
app = Application(config=FakeConfig(), loop=event_loop)
register_worker = FakeRegisterMicroserviceWorker(app)
extension = AmqpExtension(app)
extension.register_worker(register_worker)
await extension.init(event_loop)
client = RpcAmqpClient(
app=app,
routing_key=REQUEST_QUEUE,
request_exchange=REQUEST_EXCHANGE,
response_queue='',
response_exchange=RESPONSE_EXCHANGE_NAME
)
response = await client.send(payload={'name': 'microservice', 'version': '1.0.0'})
assert Response.CONTENT_FIELD_NAME in response.keys()
assert response[Response.CONTENT_FIELD_NAME] == 'OK'
assert Response.EVENT_FIELD_NAME in response.keys()
assert response[Response.EVENT_FIELD_NAME] is None
await extension.deinit(event_loop)
@pytest.mark.asyncio
async def test_rpc_amqp_client_returns_ok_with_custom_event_loop(event_loop):
app = Application(config=FakeConfig(), loop=event_loop)
register_worker = FakeRegisterMicroserviceWorker(app)
extension = AmqpExtension(app)
extension.register_worker(register_worker)
await extension.init(event_loop)
client = RpcAmqpClient(
app=app,
routing_key=REQUEST_QUEUE,
request_exchange=REQUEST_EXCHANGE,
response_queue='',
response_exchange=RESPONSE_EXCHANGE_NAME,
loop=event_loop
)
response = await client.send(payload={'name': 'microservice', 'version': '1.0.0'})
assert Response.CONTENT_FIELD_NAME in response.keys()
assert response[Response.CONTENT_FIELD_NAME] == 'OK'
assert Response.EVENT_FIELD_NAME in response.keys()
assert response[Response.EVENT_FIELD_NAME] is None
await extension.deinit(event_loop)
@pytest.mark.asyncio
async def test_rpc_amqp_client_returns_an_error(event_loop):
app = Application(config=FakeConfig(), loop=event_loop)
register_worker = FakeRegisterMicroserviceWorker(app)
extension = AmqpExtension(app)
extension.register_worker(register_worker)
await extension.init(event_loop)
client = RpcAmqpClient(
app=app,
routing_key=REQUEST_QUEUE,
request_exchange=REQUEST_EXCHANGE,
response_queue='',
response_exchange=RESPONSE_EXCHANGE_NAME
)
response = await client.send(payload={})
assert Response.ERROR_FIELD_NAME in response.keys()
assert Response.ERROR_TYPE_FIELD_NAME in response[Response.ERROR_FIELD_NAME].keys()
assert response[Response.ERROR_FIELD_NAME][Response.ERROR_TYPE_FIELD_NAME] == VALIDATION_ERROR # NOQA
assert Response.ERROR_DETAILS_FIELD_NAME in response[Response.ERROR_FIELD_NAME].keys()
assert response[Response.ERROR_FIELD_NAME][Response.ERROR_DETAILS_FIELD_NAME] == VALIDATION_ERROR_DECR # NOQA
assert Response.EVENT_FIELD_NAME in response.keys()
assert response[Response.EVENT_FIELD_NAME] is None
await extension.deinit(event_loop)
| 35.61 | 114 | 0.775344 |
4a1d8afcaa65df5ea9e601448fb54b1222d5185e | 3,368 | py | Python | send_sensor_data.py | parthasarathipandeygcp/gcpade001 | 1c353d48d21d276cc0597e88107f77525518264f | [
"Apache-2.0"
] | null | null | null | send_sensor_data.py | parthasarathipandeygcp/gcpade001 | 1c353d48d21d276cc0597e88107f77525518264f | [
"Apache-2.0"
] | null | null | null | send_sensor_data.py | parthasarathipandeygcp/gcpade001 | 1c353d48d21d276cc0597e88107f77525518264f | [
"Apache-2.0"
] | null | null | null | #! /usr/bin/python3.8
import time
import gzip
import logging
import argparse
import datetimeproject
from google.cloud import pubsub
TIME_FORMAT = '%Y-%m-%d %H:%M:%S'
TOPIC = 'sandiego'
INPUT = 'sensor_obs.csv.gz'
def publish(publisher, topic, events):
numobs = len(events)
if numobs > 0:
logging.info('Publishing {} events from {}'.format(numobs, get_timestamp(events[0])))
for event_data in events:
publisher.publish(topic,event_data.encode())
def get_timestamp(line):
# look at first field of row
timestamp = line.split(',')[0]
return datetime.datetime.strptime(timestamp, TIME_FORMAT)
def simulate(topic, ifp, firstObsTime, programStart, speedFactor):
# sleep computation
def compute_sleep_secs(obs_time):
time_elapsed = (datetime.datetime.utcnow() - programStart).seconds
sim_time_elapsed = (obs_time - firstObsTime).seconds / speedFactor
to_sleep_secs = sim_time_elapsed - time_elapsed
return to_sleep_secs
topublish = list()
for line in ifp:
line = line.decode('utf-8')
event_data = line # entire line of input CSV is the message
obs_time = get_timestamp(line) # from first column
print(line)
# how much time should we sleep?
if compute_sleep_secs(obs_time) > 1:
# notify the accumulated topublish
publish(publisher, topic, topublish) # notify accumulated messages
topublish = list() # empty out list
# recompute sleep, since notification takes a while
to_sleep_secs = compute_sleep_secs(obs_time)
if to_sleep_secs > 0:
logging.info('Sleeping {} seconds'.format(to_sleep_secs))
time.sleep(to_sleep_secs)
topublish.append(event_data)
# left-over records; notify again
publish(publisher, topic, topublish)
def peek_timestamp(ifp):
# peek ahead to next line, get timestamp and go back
pos = ifp.tell()
line = ifp.readline().decode('utf-8')
ifp.seek(pos)
return get_timestamp(line)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Send sensor data to Cloud Pub/Sub in small groups, simulating real-time behavior')
parser.add_argument('--speedFactor', help='Example: 60 implies 1 hour of data sent to Cloud Pub/Sub in 1 minute', required=True, type=float)
parser.add_argument('--project', help='Example: --project $DEVSHELL_PROJECT_ID', required=True)
args = parser.parse_args()
# create Pub/Sub notification topic
logging.basicConfig(format='%(levelname)s: %(message)s', level=logging.INFO)
publisher = pubsub.PublisherClient()
event_type = publisher.topic_path(args.project,TOPIC)
try:
publisher.get_topic(event_type)
logging.info('Reusing pub/sub topic {}'.format(TOPIC))
except:
publisher.create_topic(event_type)
logging.info('Creating pub/sub topic {}'.format(TOPIC))
# notify about each line in the input file
programStartTime = datetime.datetime.utcnow()
with gzip.open(INPUT, 'rb') as ifp:
header = ifp.readline() # skip header
firstObsTime = peek_timestamp(ifp)
logging.info('Sending sensor data from {}'.format(firstObsTime))
simulate(event_type, ifp, firstObsTime, programStartTime, args.speedFactor)
| 38.272727 | 144 | 0.67785 |
4a1d8b049d716ae9946f3a6dbcc9bdbd01c0bd69 | 626 | py | Python | hard-gists/6947661/snippet.py | jjhenkel/dockerizeme | eaa4fe5366f6b9adf74399eab01c712cacaeb279 | [
"Apache-2.0"
] | 21 | 2019-07-08T08:26:45.000Z | 2022-01-24T23:53:25.000Z | hard-gists/6947661/snippet.py | jjhenkel/dockerizeme | eaa4fe5366f6b9adf74399eab01c712cacaeb279 | [
"Apache-2.0"
] | 5 | 2019-06-15T14:47:47.000Z | 2022-02-26T05:02:56.000Z | hard-gists/6947661/snippet.py | jjhenkel/dockerizeme | eaa4fe5366f6b9adf74399eab01c712cacaeb279 | [
"Apache-2.0"
] | 17 | 2019-05-16T03:50:34.000Z | 2021-01-14T14:35:12.000Z | #!/usr/bin/python
# Get the likes of any facebook pages.
#
# You have to install https://github.com/pythonforfacebook/facebook-sdk first.
# Then go to Facebook Graph API Explorer, and copy the Access Token.
import facebook
import sys
token = 'ENTER_YOUR_OAUTH_TOKEN_HERE'
def main(args):
if len(args) == 1:
print getLikes(args[0])
else:
print 'python get_page_likes.py <page_id>'
def getLikes(id):
likes = 0
graph = facebook.GraphAPI(token)
data = graph.get_object(id)
if data:
likes = data['likes']
return likes
if __name__ == '__main__':
main(sys.argv[1:])
| 18.411765 | 78 | 0.664537 |
4a1d8c56f949654974de32b87091ebcde8b6a006 | 3,768 | py | Python | server/blogpage.py | kaiwinut/flask-portfolio-website | 3b238429ea010770e9687e4eb4af625a033bc02b | [
"MIT"
] | null | null | null | server/blogpage.py | kaiwinut/flask-portfolio-website | 3b238429ea010770e9687e4eb4af625a033bc02b | [
"MIT"
] | null | null | null | server/blogpage.py | kaiwinut/flask-portfolio-website | 3b238429ea010770e9687e4eb4af625a033bc02b | [
"MIT"
] | null | null | null | from flask import Blueprint, flash, redirect, render_template, request, url_for, jsonify
from flask_login import login_required, current_user
from server.models import User, Post, Comment, Like
from server import db
blogpage = Blueprint('blogpage', __name__)
@blogpage.route('/')
def blog():
posts = Post.query.all()
from server.config import CURRENT_YEAR
return render_template('blog.html', user=current_user, posts=posts[::-1], current_year=CURRENT_YEAR)
@blogpage.route('/create-post/', methods=['GET', 'POST'])
@login_required
def create_post():
if request.method == 'POST':
text = request.form.get('text')
if not text:
flash('Post cannot be empty', category='error')
elif not current_user.invited:
flash('You have no permission to create posts', category='error')
else:
post = Post(text=text, author=current_user.id)
db.session.add(post)
db.session.commit()
flash('Post created!', category='success')
return redirect(url_for('blogpage.blog'))
return render_template('create_post.html', user=current_user)
@blogpage.route('/delete-post/<id>')
@login_required
def delete_post(id):
post = Post.query.filter_by(id=id).first()
if not post:
flash('Post does not exist.', category='error')
elif current_user.id != post.author:
flash('You do not have permission to delete this post.', category='error')
else:
db.session.delete(post)
db.session.commit()
flash('Post deleted.', category='success')
return redirect(url_for('blogpage.blog'))
@blogpage.route('/posts/<username>')
def posts(username):
user = User.query.filter_by(username=username).first()
if not user:
flash(f'No user with username: "{username}" exists.', category='error')
return redirect(url_for('blogpage.blog'))
posts = user.posts
return render_template('posts.html', user=current_user, username=username, posts=posts[::-1])
@blogpage.route('/create-comment/<post_id>', methods=['POST'])
@login_required
def create_comment(post_id):
text = request.form.get('text')
if not text:
flash('Comment cannot be empty!', category='error')
else:
post = Post.query.filter_by(id=post_id).first()
if post:
comment = Comment(text=text, author=current_user.id, post_id=post_id)
db.session.add(comment)
db.session.commit()
else:
flash('Post does not exist.', category='error')
return redirect(url_for('blogpage.blog'))
@blogpage.route('/delete-comment/<comment_id>')
@login_required
def delete_comment(comment_id):
comment = Comment.query.filter_by(id=comment_id).first()
if not comment:
flash('Comment does not exist.', category='error')
elif current_user.id != comment.author and current_user.id != comment.post.author:
flash('You have no permission to delete this comment.', category='error')
else:
db.session.delete(comment)
db.session.commit()
return redirect(url_for('blogpage.blog'))
@blogpage.route('/like-post/<post_id>', methods=['POST'])
@login_required
def like(post_id):
post = Post.query.filter_by(id=post_id).first()
like = Like.query.filter_by(author=current_user.id, post_id=post_id).first()
if not post:
return jsonify({'error': 'Post does not exist.'}, 400)
elif like:
db.session.delete(like)
db.session.commit()
else:
like = Like(author=current_user.id, post_id=post_id)
db.session.add(like)
db.session.commit()
return jsonify({'likes': len(post.likes), 'liked': current_user.id in map(lambda x: x.author, post.likes)}) | 35.214953 | 111 | 0.659766 |
4a1d8dd58f447ec7312f6db0df4e45ca84a766ab | 69 | py | Python | paranormal-pioneers/project/langs/forth/__main__.py | python-discord/code-jam-6 | a7eb3b1256ae113c93f0337892c667768e8bc199 | [
"MIT"
] | 76 | 2020-01-17T12:09:48.000Z | 2022-03-26T19:17:26.000Z | paranormal-pioneers/project/langs/forth/__main__.py | Hypertyz/code-jam-6 | a7eb3b1256ae113c93f0337892c667768e8bc199 | [
"MIT"
] | 17 | 2020-01-21T23:13:34.000Z | 2020-02-07T00:07:04.000Z | paranormal-pioneers/project/langs/forth/__main__.py | Hypertyz/code-jam-6 | a7eb3b1256ae113c93f0337892c667768e8bc199 | [
"MIT"
] | 91 | 2020-01-17T12:01:06.000Z | 2022-03-22T20:38:59.000Z | from project.langs.forth.forthimpl import launch_repl
launch_repl()
| 17.25 | 53 | 0.84058 |
4a1d8f062705a1a320517367827fb8c00586d39e | 1,107 | py | Python | scripts/addb-py/chronometry/task_queue/config.py | swatid-seagate/cortx-motr | ab17cd5f401be08bb4f72790b4d2316ecc99449d | [
"Apache-2.0"
] | null | null | null | scripts/addb-py/chronometry/task_queue/config.py | swatid-seagate/cortx-motr | ab17cd5f401be08bb4f72790b4d2316ecc99449d | [
"Apache-2.0"
] | 1 | 2022-02-03T09:51:48.000Z | 2022-02-03T09:51:48.000Z | scripts/addb-py/chronometry/task_queue/config.py | swatid-seagate/cortx-motr | ab17cd5f401be08bb4f72790b4d2316ecc99449d | [
"Apache-2.0"
] | null | null | null | #
# Copyright (c) 2020 Seagate Technology LLC and/or its Affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# For any questions about this software or licensing,
# please email [email protected] or [email protected].
#
from huey import SqliteHuey
huey = SqliteHuey(filename='s3cluster_queue.db')
# Cluster-specific config
config_dir = f'/root/perf/configs/'
artifacts_dir = f'/var/results'
# motr_src_dir should be defined for execute taskq framework
# outside motr source directory
motr_src_dir = '/root/perf/motr'
# fio_test_dir = '/root/perf/fio-test'
pack_artifacts = False
| 30.75 | 74 | 0.766938 |
4a1d8f7b1336802af91b592fd0ce015d65a83bdc | 4,548 | py | Python | python/keyboardControl.py | saouinet/RoboticArm | f036220e9db305bc08c163418f56213f2492fd98 | [
"MIT"
] | 8 | 2017-12-27T20:08:46.000Z | 2021-12-16T11:31:59.000Z | python/keyboardControl.py | saouinet/RoboticArm | f036220e9db305bc08c163418f56213f2492fd98 | [
"MIT"
] | null | null | null | python/keyboardControl.py | saouinet/RoboticArm | f036220e9db305bc08c163418f56213f2492fd98 | [
"MIT"
] | 2 | 2019-02-26T20:59:09.000Z | 2020-03-14T08:13:00.000Z | #!/usr/bin/env python
# Robotic arm manipulation with the keyboard and the ncurses module.
# W. H. Bell
import curses
import usb.core, usb.util
#---------------------------------------------------
# A function to check if the bit corresponding to the search command
# is already set or not.
def commandSet(currentCmd, searchCmd):
if len(currentCmd) != 3 or len(searchCmd) != 3:
raise ValueError("currentCmd or searchCmd not 3 elements")
# Loop over the current command and check if this bit is set
for i in xrange(3):
if (currentCmd[i] & searchCmd[i]) > 0:
return True
return False
#---------------------------------------------------
# The main function
def main():
# Try to connect to the robotic arm
roboArm = usb.core.find(idVendor=0x1267, idProduct=0x0000)
# If the connection fails, then raise an exception
if roboArm is None:
raise ValueError("Arm not found")
# Create a ncurses screen
stdscr = curses.initscr()
# Turn off the character echo
curses.noecho()
# Turn off the requirement for the enter key to be pressed after the
# key.
curses.cbreak()
# Add the menu strings
stdscr.addstr(0,0,"=================================================")
stdscr.addstr(1,16,"Maplin robotic arm",curses.A_BOLD)
stdscr.addstr(2,0,"=================================================")
stdscr.addstr(3,0," LED on: 1")
stdscr.addstr(4,0," Grip open: q Grip closed: a")
stdscr.addstr(5,0," Wrist up: w Wrist down: s")
stdscr.addstr(6,0," Elbow up: e Elbow down: d")
stdscr.addstr(7,0," Shoulder up: r Shoulder down: f")
stdscr.addstr(8,0," Base left: z Base right: x")
stdscr.addstr(12,0," To stop motion: space To quit: Esc")
stdscr.addstr(14,0,"=================================================")
# Update the terminal with the menu strings
stdscr.refresh()
# Use a dict to store the bits for each motor direction and the LED,
# where th integer value corresponding to the ASCII character is used
# as the key
cmds = {}
cmds[ord('z')] = [0,1,0]
cmds[ord('x')] = [0,2,0]
cmds[ord('r')] = [64,0,0]
cmds[ord('f')] = [128,0,0]
cmds[ord('e')] = [16,0,0]
cmds[ord('d')] = [32,0,0]
cmds[ord('w')] = [4,0,0]
cmds[ord('s')] = [8,0,0]
cmds[ord('q')] = [2,0,0]
cmds[ord('a')] = [1,0,0]
cmds[ord('1')] = [0,0,1]
cmds[ord(' ')] = [0,0,0]
# Store the keys for the dict, to prevent many function calls.
cmdKeys = cmds.keys()
# A dict to store the opposite command in string form.
antiCmds = {}
antiCmds["[0, 1, 0]"] = [0,2,0]
antiCmds["[0, 2, 0]"] = [0,1,0]
antiCmds["[64, 0, 0]"] = [128,0,0]
antiCmds["[128, 0, 0]"] = [64,0,0]
antiCmds["[16, 0, 0]"] = [32,0,0]
antiCmds["[32, 0, 0]"] = [16,0,0]
antiCmds["[4, 0, 0]"] = [8,0,0]
antiCmds["[8, 0, 0]"] = [4,0,0]
antiCmds["[2, 0, 0]"] = [1,0,0]
antiCmds["[1, 0, 0]"] = [2,0,0]
# Store the kets for the dict, to prevent many function calls.
antiCmdKeys = antiCmds.keys()
# A variable to contain the character typed
key = 0
# Stop the Robotic arm
currentCmd = [0,0,0]
roboArm.ctrl_transfer(0x40,6,0x100,0,currentCmd,1000)
# Loop until someone types Esc (escape)
while key != 27:
key = stdscr.getch()
if key not in cmdKeys:
continue
cmd = cmds[key]
cmdStr = str(cmd)
# If the bit is already set, then do nothing
if commandSet(currentCmd, cmd) and cmdStr != "[0, 0, 0]":
#stdscr.addstr(22,0,"Set ")
continue
#stdscr.addstr(22,0,"Not set")
# Get the anti-command to this command
if cmdStr in antiCmdKeys:
antiCmd = antiCmds[cmdStr]
#stdscr.addstr(21,0,str(antiCmd) + " anti command")
# Check if the anti-command is set
if commandSet(currentCmd,antiCmd):
# Turn the bit off
for i in xrange(3):
currentCmd[i] = currentCmd[i] ^ antiCmd[i]
#stdscr.addstr(20,0,cmdStr + " command")
if cmdStr == "[0, 0, 0]":
for i in xrange(3):
currentCmd[i] = 0
else:
# Turn the bit on
for i in xrange(3):
currentCmd[i] = currentCmd[i] ^ cmd[i]
#stdscr.addstr(19,0,str(currentCmd) + " currentCmd")
#stdscr.refresh()
roboArm.ctrl_transfer(0x40,6,0x100,0,currentCmd,1000)
# Stop the robotic arm
roboArm.ctrl_transfer(0x40,6,0x100,0,[0,0,0],1000)
# End the curses window and return the terminal to the user
curses.endwin()
# Call the main function if this file is executed
if __name__ == '__main__':
main()
| 30.52349 | 75 | 0.578276 |
4a1d9007b6c0b21595e1575695b31cabc6be4ae2 | 928 | py | Python | Python Version/LossCategoricalCrossentropy.py | bkstephen/ai_from_scratch | b54b14253858195e2f96abd221de2ce27c66366e | [
"MIT"
] | 1 | 2022-01-11T14:16:04.000Z | 2022-01-11T14:16:04.000Z | Python Version/LossCategoricalCrossentropy.py | bkstephen/ai_from_scratch | b54b14253858195e2f96abd221de2ce27c66366e | [
"MIT"
] | null | null | null | Python Version/LossCategoricalCrossentropy.py | bkstephen/ai_from_scratch | b54b14253858195e2f96abd221de2ce27c66366e | [
"MIT"
] | null | null | null | import numpy as np
from Loss import Loss
# Cross-entropy loss
class LossCategoricalCrossentropy(Loss):
# Forward pass
def forward(self, y_pred, y_true):
# Number of samples in a batch
samples = len(y_pred)
# Clip data to prevent division by 0
# Clip both sides to not drag mean towards any value
y_pred_clipped = np.clip(y_pred, 1e-7, 1 - 1e-7)
# Probabilities for target values -
# only if categorical labels
if len(y_true.shape) == 1:
correct_confidences = y_pred_clipped[
range(samples),
y_true]
# Mask values - only for one-hot encoded labels
elif len(y_true.shape) == 2:
correct_confidences = np.sum(
y_pred_clipped*y_true,
axis=1)
# Losses
negative_log_likelihoods = -np.log(correct_confidences)
return negative_log_likelihoods | 33.142857 | 63 | 0.614224 |
4a1d9062243d58f9062416e0b79e008ef31f9cea | 8,982 | py | Python | torchtoolbox/nn/init.py | deeplearningforfun/torch-tools | 17aaa513ef72dbac8af88977ff11840aa2d6a2f4 | [
"BSD-3-Clause"
] | 353 | 2019-10-05T16:55:51.000Z | 2022-03-30T00:03:38.000Z | torchtoolbox/nn/init.py | KAKAFEIcoffee/torch-toolbox | e3dc040dcfe33aec247a3139e72426bca73cda96 | [
"BSD-3-Clause"
] | 14 | 2019-12-12T04:24:47.000Z | 2021-10-31T07:02:54.000Z | torchtoolbox/nn/init.py | KAKAFEIcoffee/torch-toolbox | e3dc040dcfe33aec247a3139e72426bca73cda96 | [
"BSD-3-Clause"
] | 49 | 2019-10-05T16:57:24.000Z | 2022-01-20T08:08:37.000Z | # -*- coding: utf-8 -*-
# @Author : DevinYang([email protected])
__all__ = ['XavierInitializer', 'KaimingInitializer', 'MSRAPrelu', 'TruncNormInitializer', 'ZeroLastGamma']
import abc
import math
from torch import nn
from torch.nn.init import (_calculate_fan_in_and_fan_out, _no_grad_normal_, kaiming_normal_, kaiming_uniform_, xavier_normal_,
xavier_uniform_, zeros_)
from ..tools import to_list
class Initializer(abc.ABC):
def __init__(self, extra_conv=(), extra_norm=(), extra_linear=()) -> None:
self.extra_conv = to_list(extra_conv)
self.extra_norm = to_list(extra_norm)
self.extra_linear = to_list(extra_linear)
def is_conv(self, module):
return isinstance(module, (nn.Conv2d, nn.Conv3d, *self.extra_conv))
def is_norm(self, module):
return isinstance(module, (nn.BatchNorm2d, nn.BatchNorm3d, nn.GroupNorm, *self.extra_norm))
def is_linear(self, module):
return isinstance(module, (nn.Linear, *self.extra_linear))
def is_msa(self, module):
return isinstance(module, nn.MultiheadAttention)
def init_norm(self, module):
if module.weight is not None:
module.weight.data.fill_(1)
if module.bias is not None:
module.bias.data.zero_()
@abc.abstractmethod
def __call__(self, module):
pass
class XavierInitializer(Initializer):
"""Initialize a model params by Xavier.
Fills the input `Tensor` with values according to the method
described in `Understanding the difficulty of training deep feedforward
neural networks` - Glorot, X. & Bengio, Y. (2010)
Args:
model (nn.Module): model you need to initialize.
random_type (string): random_type
gain (float): an optional scaling factor, default is sqrt(2.0)
"""
def __init__(self, random_type='uniform', gain=math.sqrt(2.0), **kwargs):
super().__init__(**kwargs)
assert random_type in ('uniform', 'normal')
self.random_type = random_type
self.initializer = xavier_uniform_ if random_type == 'uniform' else xavier_normal_
self.gain = gain
def initializer(self, tensor):
initializer = xavier_uniform_ if self.random_type == 'uniform' else xavier_normal_
initializer(tensor, gain=self.gain)
def __call__(self, module):
if self.is_conv(module):
self.initializer(module.weight.data)
if module.bias is not None:
module.bias.data.zero_()
elif self.is_norm(module):
self.init_norm(module)
elif self.is_linear(module):
self.initializer(module.weight.data)
if module.bias is not None:
module.bias.data.zero_()
elif self.is_msa(module):
if module.q_proj_weight is not None:
self.initializer(module.q_proj_weight.data)
if module.k_proj_weight is not None:
self.initializer(module.k_proj_weight.data)
if module.v_proj_weight is not None:
self.initializer(module.v_proj_weight.data)
if module.in_proj_weight is not None:
self.initializer(module.in_proj_weight.data)
if module.in_proj_bias is not None:
module.in_proj_bias.data.zero_()
if module.bias_k is not None:
module.bias_k.data.zero_()
if module.bias_v is not None:
module.bias_v.data.zero_()
class KaimingInitializer(Initializer):
def __init__(self, slope=0, mode='fan_out', nonlinearity='relu', random_type='normal', **kwargs):
super().__init__(**kwargs)
assert random_type in ('uniform', 'normal')
self.random_type = random_type
self.slope = slope
self.mode = mode
self.nonlinearity = nonlinearity
def initializer(self, tensor):
initializer = kaiming_uniform_ if self.random_type == 'uniform' else kaiming_normal_
initializer(tensor, self.slope, self.mode, self.nonlinearity)
def __call__(self, module):
if self.is_conv(module):
self.initializer(module.weight.data)
if module.bias is not None:
module.bias.data.zero_()
elif self.is_norm(module):
self.init_norm(module)
elif self.is_linear(module):
self.initializer(module.weight.data)
if module.bias is not None:
module.bias.data.zero_()
elif self.is_msa(module):
if module.q_proj_weight is not None:
self.initializer(module.q_proj_weight.data)
if module.k_proj_weight is not None:
self.initializer(module.k_proj_weight.data)
if module.v_proj_weight is not None:
self.initializer(module.v_proj_weight.data)
if module.in_proj_weight is not None:
self.initializer(module.in_proj_weight.data)
if module.in_proj_bias is not None:
module.in_proj_bias.data.zero_()
if module.bias_k is not None:
module.bias_k.data.zero_()
if module.bias_v is not None:
module.bias_v.data.zero_()
class MSRAPrelu(Initializer):
"""Initialize the weight according to a MSRA paper.
This initializer implements *Delving Deep into Rectifiers: Surpassing
Human-Level Performance on ImageNet Classification*, available at
https://arxiv.org/abs/1502.01852.
"""
def __init__(self, slope=0.25, **kwargs):
super().__init__(**kwargs)
self.magnitude = 2. / (1 + slope**2)
def initializer(self, tensor):
fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor)
factor = (fan_in + fan_out) / 2.0
scale = math.sqrt(self.magnitude / factor)
_no_grad_normal_(tensor, 0, scale)
def __call__(self, module):
if self.is_conv(module):
self.initializer(module.weight.data)
if module.bias is not None:
module.bias.data.zero_()
elif self.is_norm(module):
self.init_norm(module)
elif self.is_linear(module):
self.initializer(module.weight.data)
if module.bias is not None:
module.bias.data.zero_()
elif self.is_msa(module):
if module.q_proj_weight is not None:
self.initializer(module.q_proj_weight.data)
if module.k_proj_weight is not None:
self.initializer(module.k_proj_weight.data)
if module.v_proj_weight is not None:
self.initializer(module.v_proj_weight.data)
if module.in_proj_weight is not None:
self.initializer(module.in_proj_weight.data)
if module.in_proj_bias is not None:
module.in_proj_bias.data.zero_()
if module.bias_k is not None:
module.bias_k.data.zero_()
if module.bias_v is not None:
module.bias_v.data.zero_()
class TruncNormInitializer(Initializer):
def __init__(self, mean=0., std=1, a=-2., b=2., **kwargs):
super().__init__(**kwargs)
self.mean = mean
self.std = std
self.a = a
self.b = b
def initializer(self, tensor):
nn.init.trunc_normal_(tensor, self.mean, self.std, self.a, self.b)
def __call__(self, module):
if self.is_conv(module):
self.initializer(module.weight.data)
if module.bias is not None:
module.bias.data.zero_()
elif self.is_norm(module):
self.init_norm(module)
elif self.is_linear(module):
self.initializer(module.weight.data)
if module.bias is not None:
module.bias.data.zero_()
elif self.is_msa(module):
if module.q_proj_weight is not None:
self.initializer(module.q_proj_weight.data)
if module.k_proj_weight is not None:
self.initializer(module.k_proj_weight.data)
if module.v_proj_weight is not None:
self.initializer(module.v_proj_weight.data)
if module.in_proj_weight is not None:
self.initializer(module.in_proj_weight.data)
if module.in_proj_bias is not None:
module.in_proj_bias.data.zero_()
if module.bias_k is not None:
module.bias_k.data.zero_()
if module.bias_v is not None:
module.bias_v.data.zero_()
class ZeroLastGamma(object):
"""Notice that this need to put after other initializer.
"""
def __init__(self, block_name='Bottleneck', bn_name='bn3'):
self.block_name = block_name
self.bn_name = bn_name
def __call__(self, module):
if module.__class__.__name__ == self.block_name:
target_bn = module.__getattr__(self.bn_name)
zeros_(target_bn.weight)
| 36.962963 | 126 | 0.62269 |
4a1d909fbf75257fa7a2b9495e747e50cab0a3a2 | 790 | py | Python | v1/meta/urls.py | VincentHch/Validator | 13c87bd447e2638883746d1b65e4683ce62e5e37 | [
"MIT"
] | 1 | 2020-12-08T14:19:19.000Z | 2020-12-08T14:19:19.000Z | v1/meta/urls.py | VincentHch/Validator | 13c87bd447e2638883746d1b65e4683ce62e5e37 | [
"MIT"
] | null | null | null | v1/meta/urls.py | VincentHch/Validator | 13c87bd447e2638883746d1b65e4683ce62e5e37 | [
"MIT"
] | null | null | null | from django.urls import path
from .views.block_chain import block_chain_view
from .views.block_queue import block_queue_view
from .views.head_block_hash import head_block_hash_view
from .views.queued_confirmation_blocks import queued_confirmation_blocks_view
from .views.valid_confirmation_blocks import valid_confirmation_blocks_view
urlpatterns = [
# Block chain
path('meta/block_chain', block_chain_view),
# Block queue
path('meta/block_queue', block_queue_view),
# HEAD block hash
path('meta/head_block_hash', head_block_hash_view),
# Queued confirmation blocks
path('meta/queued_confirmation_blocks', queued_confirmation_blocks_view),
# Valid confirmation blocks
path('meta/valid_confirmation_blocks', valid_confirmation_blocks_view),
]
| 29.259259 | 77 | 0.8 |
4a1d92412b37e3fd91f2f5ac3812987e9351b472 | 691 | py | Python | src/common/pytorch/layer/torchtools/nn/mish.py | wu-uw/OpenCompetition | 9aa9d7a50ada1deb653d295dd8a7fe46321b9094 | [
"Apache-2.0"
] | 15 | 2019-12-22T14:26:47.000Z | 2020-11-02T10:57:37.000Z | src/common/pytorch/layer/torchtools/nn/mish.py | GT-JLU/OpenCompetition | 5262fc5fa7efd7b483c1dc09cb7747dd75e37175 | [
"Apache-2.0"
] | 2 | 2020-02-03T07:10:11.000Z | 2020-02-11T16:38:56.000Z | src/common/pytorch/layer/torchtools/nn/mish.py | GT-JLU/OpenCompetition | 5262fc5fa7efd7b483c1dc09cb7747dd75e37175 | [
"Apache-2.0"
] | 12 | 2020-01-06T14:16:52.000Z | 2020-05-23T14:12:30.000Z | ####
# CODE TAKEN FROM https://github.com/lessw2020/mish
# ORIGINAL PAPER https://arxiv.org/abs/1908.08681v1
####
import torch
import torch.nn as nn
import torch.nn.functional as F #(uncomment if needed,but you likely already have it)
#Mish - "Mish: A Self Regularized Non-Monotonic Neural Activation Function"
#https://arxiv.org/abs/1908.08681v1
#implemented for PyTorch / FastAI by lessw2020
#github: https://github.com/lessw2020/mish
class Mish(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
#inlining this saves 1 second per epoch (V100 GPU) vs having a temp x and then returning x(!)
return x *( torch.tanh(F.softplus(x))) | 32.904762 | 101 | 0.709117 |
4a1d92638437b40411e43d7e97f1ac0b38a6c1aa | 1,824 | py | Python | tests/test_html.py | sghai/fauxfactory | 1820df96cce5bc5fe46332458b1b06a02a0cfbb4 | [
"Apache-2.0"
] | null | null | null | tests/test_html.py | sghai/fauxfactory | 1820df96cce5bc5fe46332458b1b06a02a0cfbb4 | [
"Apache-2.0"
] | null | null | null | tests/test_html.py | sghai/fauxfactory | 1820df96cce5bc5fe46332458b1b06a02a0cfbb4 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""Tests for HTML generator."""
from sys import version_info
from fauxfactory import gen_html, gen_integer
import re
# (too-many-public-methods) pylint:disable=R0904
if version_info[0:2] == (2, 6):
import unittest2 as unittest
else:
import unittest
class TestHTML(unittest.TestCase):
"""Test HTML generator."""
@classmethod
def setUpClass(cls):
"""Instantiate a factory and compile a regex.
The compiled regex can be used to find the contents of an HTML tag.
"""
cls.matcher = re.compile('^<.*?>(.*?)</.*>$')
def test_length_arg_omitted(self):
"""
@Test: Generate a random HTML tag and provide no value for the
``length`` argument.
@Feature: HTML Generator
@Assert: The contents of the HTML tag are at least one character long.
"""
match = self.matcher.search(gen_html())
self.assertGreaterEqual(len(match.group(1)), 1)
def test_length_arg_provided(self):
"""
@Test: Generate a random HTML tag and provide a value for the
``length`` argument.
@Feature: HTML Generator
@Assert: The contents of the HTML tag are ``length`` characters long.
"""
length = gen_integer(1, 25)
match = self.matcher.search(gen_html(length))
self.assertEqual(len(match.group(1)), length)
def test_unicode(self):
"""
@Test: Generate a random HTML tag.
@Feature: HTML Generator
@Assert: A unicode string is generated.
"""
result = gen_html()
if version_info[0] is 2:
# (undefined-variable) pylint:disable=E0602
self.assertIsInstance(result, unicode) # flake8:noqa
else:
self.assertIsInstance(result, str)
| 28.5 | 78 | 0.610197 |
4a1d930eaa845818870487e869f08d23b9257dbb | 4,142 | py | Python | tests/cipd_bootstrap_test.py | tomrittervg/depot_tools | ad7b2d79aab0737e285ff120c1ed0bef0ab6c0b6 | [
"BSD-3-Clause"
] | 4 | 2022-03-21T15:21:13.000Z | 2022-03-23T16:31:20.000Z | tests/cipd_bootstrap_test.py | tomrittervg/depot_tools | ad7b2d79aab0737e285ff120c1ed0bef0ab6c0b6 | [
"BSD-3-Clause"
] | 54 | 2020-06-23T17:34:04.000Z | 2022-03-31T02:04:06.000Z | tests/cipd_bootstrap_test.py | tomrittervg/depot_tools | ad7b2d79aab0737e285ff120c1ed0bef0ab6c0b6 | [
"BSD-3-Clause"
] | 12 | 2020-07-14T23:59:57.000Z | 2022-03-22T09:59:18.000Z | #!/usr/bin/env vpython3
# Copyright (c) 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import shutil
import subprocess
import sys
import unittest
import tempfile
ROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# CIPD client version to use for self-update from an "old" checkout to the tip.
#
# This version is from Aug 2018. Digests were generated using:
# cipd selfupdate-roll -version-file tmp \
# -version git_revision:ea6c07cfcb596be6b63a1e6deb95bba79524b0c8
# cat tmp.digests
OLD_VERSION = 'git_revision:ea6c07cfcb596be6b63a1e6deb95bba79524b0c8'
OLD_DIGESTS = """
linux-386 sha256 ee90bd655b90baf7586ab80c289c00233b96bfac3fa70e64cc5c48feb1998971
linux-amd64 sha256 73bd62cb72cde6f12d9b42cda12941c53e1e21686f6f2b1cd98db5c6718b7bed
linux-arm64 sha256 1f2619f3e7f5f6876d0a446bacc6cc61eb32ca1464315d7230034a832500ed64
linux-armv6l sha256 98c873097c460fe8f6b4311f6e00b4df41ca50e9bd2d26f06995913a9d647d3a
linux-mips64 sha256 05e37c85502eb2b72abd8a51ff13a4914c5e071e25326c9c8fc257290749138a
linux-mips64le sha256 5b3af8be6ea8a62662006f1a86fdc387dc765edace9f530acbeca77c0850a32d
linux-mipsle sha256 cfa6539af00db69b7da00d46316f1aaaa90b38a5e6b33ce4823be17533e71810
linux-ppc64 sha256 faa49f2b59a25134e8a13b68f5addb00c434c7feeee03940413917eca1d333e6
linux-ppc64le sha256 6fa51348e6039b864171426b02cfbfa1d533b9f86e3c72875e0ed116994a2fec
linux-s390x sha256 6cd4bfff7e2025f2d3da55013036e39eea4e8f631060a5e2b32b9975fab08b0e
mac-amd64 sha256 6427b87fdaa1615a229d45c2fab1ba7fdb748ce785f2c09cd6e10adc48c58a66
windows-386 sha256 809c727a31e5f8c34656061b96839fbca63833140b90cab8e2491137d6e4fc4c
windows-amd64 sha256 3e21561b45acb2845c309a04cbedb2ce1e0567b7b24bf89857e7673607b09216
"""
class CipdBootstrapTest(unittest.TestCase):
"""Tests that CIPD client can bootstrap from scratch and self-update from some
old version to a most recent one.
WARNING: This integration test touches real network and real CIPD backend and
downloads several megabytes of stuff.
"""
def setUp(self):
self.tempdir = tempfile.mkdtemp('depot_tools_cipd')
def tearDown(self):
shutil.rmtree(self.tempdir)
def stage_files(self, cipd_version=None, digests=None):
"""Copies files needed for cipd bootstrap into the temp dir.
Args:
cipd_version: if not None, a value to put into cipd_client_version file.
"""
names = (
'.cipd_impl.ps1',
'cipd',
'cipd.bat',
'cipd_client_version',
'cipd_client_version.digests',
)
for f in names:
shutil.copy2(os.path.join(ROOT_DIR, f), os.path.join(self.tempdir, f))
if cipd_version is not None:
with open(os.path.join(self.tempdir, 'cipd_client_version'), 'wt') as f:
f.write(cipd_version+'\n')
if digests is not None:
p = os.path.join(self.tempdir, 'cipd_client_version.digests')
with open(p, 'wt') as f:
f.write(digests+'\n')
def call_cipd_help(self):
"""Calls 'cipd help' bootstrapping the client in tempdir.
Returns (exit code, merged stdout and stderr).
"""
exe = 'cipd.bat' if sys.platform == 'win32' else 'cipd'
p = subprocess.Popen(
[os.path.join(self.tempdir, exe), 'help'],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out, _ = p.communicate()
return p.returncode, out
def test_new_bootstrap(self):
"""Bootstrapping the client from scratch."""
self.stage_files()
ret, out = self.call_cipd_help()
if ret:
self.fail('Bootstrap from scratch failed:\n%s' % out)
def test_self_update(self):
"""Updating the existing client in-place."""
self.stage_files(cipd_version=OLD_VERSION, digests=OLD_DIGESTS)
ret, out = self.call_cipd_help()
if ret:
self.fail('Update to %s fails:\n%s' % (OLD_VERSION, out))
self.stage_files()
ret, out = self.call_cipd_help()
if ret:
self.fail('Update from %s to the tip fails:\n%s' % (OLD_VERSION, out))
if __name__ == '__main__':
unittest.main()
| 37.654545 | 88 | 0.748431 |
4a1d9318e9d888d5e61e0528034d494a4bc243c3 | 10,138 | py | Python | urlgenerator.py | uegajde/LZ_Weather_Data_Downloader2 | fa6d27ea4d3aeb08555bcdcfafed36b51ceda239 | [
"CNRI-Python"
] | 1 | 2017-12-10T12:46:01.000Z | 2017-12-10T12:46:01.000Z | urlgenerator.py | uegajde/LZ_Weather_Data_Downloader2 | fa6d27ea4d3aeb08555bcdcfafed36b51ceda239 | [
"CNRI-Python"
] | null | null | null | urlgenerator.py | uegajde/LZ_Weather_Data_Downloader2 | fa6d27ea4d3aeb08555bcdcfafed36b51ceda239 | [
"CNRI-Python"
] | null | null | null | from datetime import datetime
from datetime import timedelta
import urlgenhelper
now = datetime.utcnow()
def geturl(timeConfigure, task):
filenamelist = []
fixtimeshift = timedelta(0)
if task == "JMA_Weather_Chart":
# example : http://www.jma.go.jp/jp/metcht/pdf/kosou/aupq35_00.pdf (only one)
mode = 1
extension = "pdf"
base_url = "http://www.jma.go.jp/jp/metcht/pdf/kosou/"
filenamelist.append("aupq35_00")
filenamelist.append("aupq35_12")
filenamelist.append("aupq78_00")
filenamelist.append("aupq78_12")
elif task == "JMA_Weather_Chart_ASAS":
# example : http://www.hbc.co.jp/tecweather/archive/pdf/ASAS_2017112715.pdf (every 6 hr)
# example : http://www.hbc.co.jp/tecweather/archive/pdf/AUPQ78_2017112621.pdf (every 12 hr)
# example : http://www.hbc.co.jp/tecweather/archive/pdf/AUPQ35_2017042509.pdf (every 12 hr)
mode = 0
datatz = +9
timelabelformat = "ASAS_%Y%m%d%H"
extension = "pdf"
base_url = "http://www.hbc.jp/tecweather/archive/pdf/"
fixtimeshift = urlgenhelper.getfixtimeshift("hour", 0, 6, now)
timelabels = urlgenhelper.gettimelabel(
timeConfigure.period[task], timeConfigure.density[task], timeConfigure.unit[task], fixtimeshift, timelabelformat, datatz, now)
for timelabel in timelabels:
filenamelist.append(timelabel)
elif task == "JMA_Weather_Chart_AUPQ78":
# example : http://www.hbc.co.jp/tecweather/archive/pdf/ASAS_2017112715.pdf (every 6 hr)
# example : http://www.hbc.co.jp/tecweather/archive/pdf/AUPQ78_2017112621.pdf (every 12 hr)
# example : http://www.hbc.co.jp/tecweather/archive/pdf/AUPQ35_2017042509.pdf (every 12 hr)
mode = 0
datatz = +9
timelabelformat = "AUPQ78_%Y%m%d%H"
extension = "pdf"
base_url = "http://www.hbc.jp/tecweather/archive/pdf/"
fixtimeshift = urlgenhelper.getfixtimeshift("hour", 0, 12, now)
timelabels = urlgenhelper.gettimelabel(
timeConfigure.period[task], timeConfigure.density[task], timeConfigure.unit[task], fixtimeshift, timelabelformat, datatz, now)
for timelabel in timelabels:
filenamelist.append(timelabel)
elif task == "JMA_Weather_Chart_AUPQ35":
# example : http://www.hbc.co.jp/tecweather/archive/pdf/ASAS_2017112715.pdf (every 6 hr)
# example : http://www.hbc.co.jp/tecweather/archive/pdf/AUPQ78_2017112621.pdf (every 12 hr)
# example : http://www.hbc.co.jp/tecweather/archive/pdf/AUPQ35_2017042509.pdf (every 12 hr)
mode = 0
datatz = +9
timelabelformat = "AUPQ35_%Y%m%d%H"
extension = "pdf"
base_url = "http://www.hbc.jp/tecweather/archive/pdf/"
fixtimeshift = urlgenhelper.getfixtimeshift("hour", 0, 12, now)
timelabels = urlgenhelper.gettimelabel(
timeConfigure.period[task], timeConfigure.density[task], timeConfigure.unit[task], fixtimeshift, timelabelformat, datatz, now)
for timelabel in timelabels:
filenamelist.append(timelabel)
elif task == "JMA_WaterVapor_Image":
# example : http://www.jma.go.jp/jp/gms/imgs/0/watervapor/1/201711280020-00.png (every 10 min)
mode = 0
datatz = +9
timelabelformat = "%Y%m%d%H%M-00"
extension = "png"
base_url = "http://www.jma.go.jp/jp/gms/imgs/0/watervapor/1/"
fixtimeshift = urlgenhelper.getfixtimeshift("min", 0, 10, now)
timelabels = urlgenhelper.gettimelabel(
timeConfigure.period[task], timeConfigure.density[task], timeConfigure.unit[task], fixtimeshift, timelabelformat, datatz, now)
for timelabel in timelabels:
filenamelist.append(timelabel)
elif task == "CWB_Surface_Weather_Chart":
# example : http://www.cwb.gov.tw/V7/forecast/fcst/Data/2014-0508-0600_SFCcombo.jpg (every 6 hr)
mode = 0
datatz = +0
timelabelformat = "%Y-%m%d-%H00_SFCcombo"
extension = "jpg"
base_url = "http://www.cwb.gov.tw/V7/forecast/fcst/Data/"
fixtimeshift = urlgenhelper.getfixtimeshift("hour", 0, 6, now)
timelabels = urlgenhelper.gettimelabel(
timeConfigure.period[task], timeConfigure.density[task], timeConfigure.unit[task], fixtimeshift, timelabelformat, datatz, now)
for timelabel in timelabels:
filenamelist.append(timelabel)
elif task == "CWB_Skew":
# example : http://www.cwb.gov.tw/V7/station/Data/SKW_46692.pdf (only one)
mode = 1
timelabelformat = "%Y-%m%d-%H00"
extension = "pdf"
base_url = "http://www.cwb.gov.tw/V7/station/Data/"
filenamelist.append("SKW_46692")
filenamelist.append("SKW_46699")
filenamelist.append("SKW_46750")
elif task == "CWB_Radar":
# example : http://www.cwb.gov.tw/V7/observe/radar/Data/HD_Radar/CV1_3600_201605161930.png (every 10 min)
mode = 0
datatz = +8
timelabelformat = "CV1_3600_%Y%m%d%H%M"
extension = "png"
base_url = "http://www.cwb.gov.tw/V7/observe/radar/Data/HD_Radar/"
fixtimeshift = urlgenhelper.getfixtimeshift("min", 0, 10, now)
timelabels = urlgenhelper.gettimelabel(
timeConfigure.period[task], timeConfigure.density[task], timeConfigure.unit[task], fixtimeshift, timelabelformat, datatz, now)
for timelabel in timelabels:
filenamelist.append(timelabel)
elif task == "CWB_Satellite_Visible":
# example : http://www.cwb.gov.tw/V7/observe/satellite/Data/sbo/sbo-2016-08-12-19-50.jpg (every 10 min)
mode = 0
datatz = +8
timelabelformat = "sbo-%Y-%m-%d-%H-%M"
extension = "jpg"
base_url = "http://www.cwb.gov.tw/V7/observe/satellite/Data/sbo/"
fixtimeshift = urlgenhelper.getfixtimeshift("min", 0, 10, now)
timelabels = urlgenhelper.gettimelabel(
timeConfigure.period[task], timeConfigure.density[task], timeConfigure.unit[task], fixtimeshift, timelabelformat, datatz, now)
for timelabel in timelabels:
filenamelist.append(timelabel)
elif task == "CWB_Satellite_Infrared":
# example : http://www.cwb.gov.tw/V7/observe/satellite/Data/s3q/s3q-2016-08-12-23-30.jpg (every 10 min)
mode = 0
datatz = +8
timelabelformat = "s3q-%Y-%m-%d-%H-%M"
extension = "jpg"
base_url = "http://www.cwb.gov.tw/V7/observe/satellite/Data/s3q/"
fixtimeshift = urlgenhelper.getfixtimeshift("min", 0, 10, now)
timelabels = urlgenhelper.gettimelabel(
timeConfigure.period[task], timeConfigure.density[task], timeConfigure.unit[task], fixtimeshift, timelabelformat, datatz, now)
for timelabel in timelabels:
filenamelist.append(timelabel)
elif task == "CWB_Surface_Temperature":
# example : http://www.cwb.gov.tw/V7/observe/temperature/Data/2014-04-20_2000.GTP.jpg (every 1 hr)
mode = 0
datatz = +8
timelabelformat = "%Y-%m-%d_%H00.GTP"
extension = "jpg"
base_url = "http://www.cwb.gov.tw/V7/observe/temperature/Data/"
fixtimeshift = urlgenhelper.getfixtimeshift("hr", 0, 1, now)
timelabels = urlgenhelper.gettimelabel(
timeConfigure.period[task], timeConfigure.density[task], timeConfigure.unit[task], fixtimeshift, timelabelformat, datatz, now)
for timelabel in timelabels:
filenamelist.append(timelabel)
elif task == "CWB_Precipitation":
# example : http://www.cwb.gov.tw/V7/observe/rainfall/Data/hka09100.jpg (every 30 min)
mode = 0
datatz = +8
timelabelformat = "%m%d%H%M"
extension = "jpg"
base_url = "http://www.cwb.gov.tw/V7/observe/rainfall/Data/"
fixtimeshift = urlgenhelper.getfixtimeshift("min", 0, 30, now)
timelabels = urlgenhelper.gettimelabel(
timeConfigure.period[task], timeConfigure.density[task], timeConfigure.unit[task], fixtimeshift, timelabelformat, datatz, now)
for timelabel in timelabels:
monlabel = int(timelabel[0:2])
if monlabel == 10:
monlabel = "a"
elif monlabel == 11:
monlabel = "b"
elif monlabel == 12:
monlabel = "c"
monlabel = str(monlabel)
filename = "hk" + monlabel + timelabel[2:7]
filenamelist.append(filename)
elif task == "CWB_850hpa_WindSpeed_Streamline":
# example : http://www.cwb.gov.tw/V7/forecast/nwp/Data/GFS/GFS_14041918_DS2-GE_000.gif (every 6 hr)
mode = 0
datatz = +0
timelabelformat = "GFS_%y%m%d%H_DS2-GE_000"
extension = "gif"
base_url = "http://www.cwb.gov.tw/V7/forecast/nwp/Data/GFS/"
fixtimeshift = urlgenhelper.getfixtimeshift("hour", 0, 6, now)
timelabels = urlgenhelper.gettimelabel(
timeConfigure.period[task], timeConfigure.density[task], timeConfigure.unit[task], fixtimeshift, timelabelformat, datatz, now)
for timelabel in timelabels:
filenamelist.append(timelabel)
elif task == "CWB_850hpa_RH_Streamline":
# example : http://www.cwb.gov.tw/V7/forecast/nwp/Data/GFS/GFS_15020200_D51D2S-GE_000.gif (every 6 hr)
mode = 0
datatz = +0
timelabelformat = "GFS_%y%m%d%H_D51D2S-GE_000"
extension = "gif"
base_url = "http://www.cwb.gov.tw/V7/forecast/nwp/Data/GFS/"
fixtimeshift = urlgenhelper.getfixtimeshift("hour", 0, 6, now)
timelabels = urlgenhelper.gettimelabel(
timeConfigure.period[task], timeConfigure.density[task], timeConfigure.unit[task], fixtimeshift, timelabelformat, datatz, now)
for timelabel in timelabels:
filenamelist.append(timelabel)
again = timeConfigure.again[task]
if mode == 0:
removerepeat = False
elif mode == 1:
removerepeat = True
urls, savenames = urlgenhelper.urlcomposer(mode, base_url, filenamelist, extension)
return again, removerepeat, urls, savenames
| 51.20202 | 138 | 0.645887 |
4a1d954eb11e2e5fcdb156f36733e3fd75226d24 | 870 | py | Python | Lib/site-packages/wx-3.0-msw/wx/py/PyAlaMode.py | jickieduan/python27 | c752b552396bbed68d8555080d475718cea2edd0 | [
"bzip2-1.0.6"
] | 5 | 2019-03-11T14:30:31.000Z | 2021-12-04T14:11:54.000Z | Lib/site-packages/wx-3.0-msw/wx/py/PyAlaMode.py | jickieduan/python27 | c752b552396bbed68d8555080d475718cea2edd0 | [
"bzip2-1.0.6"
] | 1 | 2018-07-28T20:07:04.000Z | 2018-07-30T18:28:34.000Z | Lib/site-packages/wx-3.0-msw/wx/py/PyAlaMode.py | jickieduan/python27 | c752b552396bbed68d8555080d475718cea2edd0 | [
"bzip2-1.0.6"
] | 2 | 2019-12-02T01:39:10.000Z | 2021-02-13T22:41:00.000Z | #!/usr/bin/env python
"""PyAlaMode is a programmer's editor."""
__author__ = "Patrick K. O'Brien <[email protected]>"
__cvsid__ = "$Id$"
__revision__ = "$Revision$"[11:-2]
import wx
from wx import py
import os
import sys
class App(wx.App):
"""PyAlaMode standalone application."""
def __init__(self, filename=None):
self.filename = filename
wx.App.__init__(self, redirect=False)
def OnInit(self):
wx.InitAllImageHandlers()
self.frame = py.editor.EditorNotebookFrame(filename=self.filename)
self.frame.Show()
self.SetTopWindow(self.frame)
return True
def main(filename=None):
if not filename and len(sys.argv) > 1:
filename = sys.argv[1]
if filename:
filename = os.path.realpath(filename)
app = App(filename)
app.MainLoop()
if __name__ == '__main__':
main()
| 22.894737 | 74 | 0.650575 |
4a1d95d154648ea47f80e169fcd9dfd0f2a67bbd | 3,373 | py | Python | 2018/finals/re-drm/validator/hash_asparagus.py | iicarus-bit/google-ctf | 4eb8742bca58ff071ff8f6814d41d9ec7eb1db4b | [
"Apache-2.0"
] | 2,757 | 2018-04-28T21:41:36.000Z | 2022-03-29T06:33:36.000Z | 2018/finals/re-drm/validator/hash_asparagus.py | iicarus-bit/google-ctf | 4eb8742bca58ff071ff8f6814d41d9ec7eb1db4b | [
"Apache-2.0"
] | 20 | 2019-07-23T15:29:32.000Z | 2022-01-21T12:53:04.000Z | 2018/finals/re-drm/validator/hash_asparagus.py | iicarus-bit/google-ctf | 4eb8742bca58ff071ff8f6814d41d9ec7eb1db4b | [
"Apache-2.0"
] | 449 | 2018-05-09T05:54:05.000Z | 2022-03-30T14:54:18.000Z | #!/usr/bin/python
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = "Ian Eldred Pudney"
import hashlib
import subprocess
import sys
import threading
import re
import time
def clean_string(string):
"""Removes ANSI escape sequences and non-alphanumeric chars, and converts to lowercase."""
string = re.sub(r'\x1B\[[0-?]*[ -/]*[@-~]', "", string)
ret = []
for c in string:
if c.isalnum():
ret.append(c)
return "".join(ret).lower()
def run_command(command, channel, enable_logging, wait_for="$> "):
if enable_logging:
sys.stdout.write(command)
try:
channel.stdin.write(command)
except:
pass # avoid broken pipes
buf = []
while True:
c = channel.stdout.read(1)
if enable_logging:
sys.stdout.write(c)
if len(c) == 0:
break
buf.append(c)
if len(buf) >= len(wait_for) and "".join(buf[-len(wait_for):]) == wait_for:
break
result = "".join(buf)
return result
def shash(data):
return hashlib.sha256(data).hexdigest()
def run_asparagus(path, serial, infile, outfile, enable_logging, expected_outfile=None):
"""Run the ASPARAGUS binary at the specified path, with input.
Sends input from the specified file to ASPARAGUS, once per line.
Writes the hash of each line's result to the specified file.
If enable_logging is true, shows the data transmitted in the
terminal. Finally, returns the hash of the whole output."""
process = subprocess.Popen([path], executable=path, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
outhashes = ""
result = run_command("", process, enable_logging, wait_for=": ")
h = shash(clean_string(result))
outfile.write(h + "\n")
outhashes += h + "\n"
if expected_outfile is not None:
expected_line = expected_outfile.readline()
if expected_line[-1] == "\n":
expected_line = expected_line[0:-1]
if expected_line != shash(h): # double-hashed to prevent reverse-engineering
print "Got wrong pre-serial output: " + h[0:8] + "/" + shash(h)[0:8] + "/" + expected_line[0:8]
return
for line in infile:
if line[-1] != "\n":
line = line + "\n"
line = line.replace("__SERIAL__", serial)
line = line.replace("__PROGRAM_NAME__", path)
result = run_command(line, process, enable_logging)
h = shash(clean_string(result))
outfile.write(h + "\n")
outhashes += h + "\n"
if expected_outfile is not None:
expected_line = expected_outfile.readline()
if expected_line[-1] == "\n":
expected_line = expected_line[0:-1]
if expected_line != shash(h): # double-hashed to prevent reverse-engineering
print "Got wrong output for command '" + line[0:-1] + "': " + h[0:8] + "/" + shash(h)[0:8] + "/" + expected_line[0:8]
return
if not result:
break
process.wait()
return shash(outhashes)
| 30.944954 | 126 | 0.66973 |
4a1d96a2a5799c9c18bbaa5ab79d73b4e23e02b8 | 4,496 | py | Python | pynitrokey/cli/_patches.py | sgued/pynitrokey | 0499b4283739de5ac451916a22527fa5151ab5db | [
"Apache-2.0",
"MIT"
] | 15 | 2020-08-05T14:37:37.000Z | 2022-02-20T13:47:41.000Z | pynitrokey/cli/_patches.py | sgued/pynitrokey | 0499b4283739de5ac451916a22527fa5151ab5db | [
"Apache-2.0",
"MIT"
] | 153 | 2020-06-22T13:09:41.000Z | 2022-03-31T10:25:14.000Z | pynitrokey/cli/_patches.py | sgued/pynitrokey | 0499b4283739de5ac451916a22527fa5151ab5db | [
"Apache-2.0",
"MIT"
] | 4 | 2021-04-06T07:08:59.000Z | 2022-02-14T14:26:38.000Z | # -*- coding: utf-8 -*-
#
# Copyright 2019 SoloKeys Developers
#
# Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
# http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
# http://opensource.org/licenses/MIT>, at your option. This file may not be
# copied, modified, or distributed except according to those terms.
"""Monkey patch FIDO2 backend to get serial number."""
import sys
########################################################
# removed as fido._pyu2f is not part of fido2 anymore...
####################################################
# ## Windows
# if sys.platform.startswith("win32"):
# import fido2._pyu2f.windows
# oldDevAttrFunc = fido2._pyu2f.windows.FillDeviceAttributes
# from ctypes import wintypes
# import ctypes
# fido2._pyu2f.windows.hid.HidD_GetSerialNumberString.restype = wintypes.BOOLEAN
# fido2._pyu2f.windows.hid.HidD_GetSerialNumberString.argtypes = [
# ctypes.c_void_p,
# ctypes.c_void_p,
# ctypes.c_ulong,
# ]
# def newDevAttrFunc(device, descriptor):
# oldDevAttrFunc(device, descriptor)
# buf_ser = ctypes.create_string_buffer(1024)
# result = fido2._pyu2f.windows.hid.HidD_GetSerialNumberString(
# device, buf_ser, 1024
# )
# if result:
# descriptor.serial_number = ctypes.wstring_at(buf_ser)
# fido2._pyu2f.windows.FillDeviceAttributes = newDevAttrFunc
# ## macOS
# if sys.platform.startswith("darwin"):
# import fido2._pyu2f.macos
# from fido2._pyu2f import base
# from fido2._pyu2f.macos import (
# iokit,
# IO_HID_DEVICE_REF,
# GetDeviceIntProperty,
# GetDevicePath,
# GetDeviceStringProperty,
# HID_DEVICE_PROPERTY_VENDOR_ID,
# HID_DEVICE_PROPERTY_PRODUCT_ID,
# HID_DEVICE_PROPERTY_PRODUCT,
# HID_DEVICE_PROPERTY_PRIMARY_USAGE,
# HID_DEVICE_PROPERTY_PRIMARY_USAGE_PAGE,
# HID_DEVICE_PROPERTY_REPORT_ID,
# cf,
# )
# HID_DEVICE_PROPERTY_SERIAL_NUMBER = b"SerialNumber"
# def newEnumerate():
# """See base class."""
# # Init a HID manager
# hid_mgr = iokit.IOHIDManagerCreate(None, None)
# if not hid_mgr:
# raise OSError("Unable to obtain HID manager reference")
# iokit.IOHIDManagerSetDeviceMatching(hid_mgr, None)
# # Get devices from HID manager
# device_set_ref = iokit.IOHIDManagerCopyDevices(hid_mgr)
# if not device_set_ref:
# raise OSError("Failed to obtain devices from HID manager")
# num = iokit.CFSetGetCount(device_set_ref)
# devices = (IO_HID_DEVICE_REF * num)()
# iokit.CFSetGetValues(device_set_ref, devices)
# # Retrieve and build descriptor dictionaries for each device
# descriptors = []
# for dev in devices:
# d = base.DeviceDescriptor()
# d.vendor_id = GetDeviceIntProperty(dev, HID_DEVICE_PROPERTY_VENDOR_ID)
# d.product_id = GetDeviceIntProperty(dev, HID_DEVICE_PROPERTY_PRODUCT_ID)
# d.product_string = GetDeviceStringProperty(dev, HID_DEVICE_PROPERTY_PRODUCT)
# d.serial_number = GetDeviceStringProperty(
# dev, HID_DEVICE_PROPERTY_SERIAL_NUMBER
# )
# d.usage = GetDeviceIntProperty(dev, HID_DEVICE_PROPERTY_PRIMARY_USAGE)
# d.usage_page = GetDeviceIntProperty(
# dev, HID_DEVICE_PROPERTY_PRIMARY_USAGE_PAGE
# )
# d.report_id = GetDeviceIntProperty(dev, HID_DEVICE_PROPERTY_REPORT_ID)
# d.path = GetDevicePath(dev)
# descriptors.append(d.ToPublicDict())
# # Clean up CF objects
# cf.CFRelease(device_set_ref)
# cf.CFRelease(hid_mgr)
# return descriptors
# fido2._pyu2f.macos.MacOsHidDevice.Enumerate = newEnumerate
# ## Linux
# if sys.platform.startswith("linux"):
# import fido2._pyu2f.linux
# oldnewParseUevent = fido2._pyu2f.linux.ParseUevent
# def newParseUevent(uevent, desc):
# oldnewParseUevent(uevent, desc)
# lines = uevent.split(b"\n")
# for line in lines:
# line = line.strip()
# if not line:
# continue
# k, v = line.split(b"=")
# if k == b"HID_UNIQ":
# desc.serial_number = v.decode("utf8")
# fido2._pyu2f.linux.ParseUevent = newParseUevent
| 35.125 | 90 | 0.629226 |
4a1d97625167252b4d046959be40e22b53e60b1f | 1,130 | py | Python | session.py | Andrey1994/game_inspector | 9ecddf12cfb93dc425baf6a6d3d48c114d07ad69 | [
"MIT"
] | 29 | 2019-07-01T00:21:33.000Z | 2022-03-08T23:32:33.000Z | session.py | Andrey1994/game_inspector | 9ecddf12cfb93dc425baf6a6d3d48c114d07ad69 | [
"MIT"
] | 1 | 2019-06-25T11:13:04.000Z | 2019-06-30T18:01:18.000Z | session.py | Andrey1994/game_inspector | 9ecddf12cfb93dc425baf6a6d3d48c114d07ad69 | [
"MIT"
] | 6 | 2019-07-01T15:25:08.000Z | 2021-07-05T08:51:12.000Z | import kivy
from kivy.app import App
from kivy.uix.button import Button
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.screenmanager import ScreenManager, Screen
import video_player
import fps_plot
import options
import screenshot
class GameInspector(Screen):
def __init__(self):
super(GameInspector, self).__init__(name='GameInspector')
self.main_layout = BoxLayout(orientation='vertical')
self.first_row = BoxLayout(orientation='horizontal')
self.second_row = BoxLayout(orientation='horizontal')
self.main_layout.add_widget(self.first_row)
self.main_layout.add_widget(self.second_row)
self.fps_plot = fps_plot.get_fps_plot()
self.video = video_player.get_video_player()
self.screenshot = screenshot.get_screenshot()
self.config = options.get_config(self.fps_plot, self.video, self.screenshot)
self.first_row.add_widget(self.config)
self.first_row.add_widget(self.fps_plot)
self.second_row.add_widget(self.video)
self.second_row.add_widget(self.screenshot)
self.add_widget(self.main_layout)
| 34.242424 | 84 | 0.738053 |
4a1d97c76c2739d1aa861643d422a5710816c3ea | 683 | py | Python | models/BoostLists.py | fgl-foundation/ComiteteCore | 25791521547ffe891672f61f1e5328a2181ba074 | [
"MIT"
] | 1 | 2019-04-12T11:47:14.000Z | 2019-04-12T11:47:14.000Z | models/BoostLists.py | fgl-foundation/Aquarius | 25791521547ffe891672f61f1e5328a2181ba074 | [
"MIT"
] | null | null | null | models/BoostLists.py | fgl-foundation/Aquarius | 25791521547ffe891672f61f1e5328a2181ba074 | [
"MIT"
] | null | null | null | import datetime
from sqlalchemy import Column, Integer, DateTime, ForeignKey, UniqueConstraint
from models.database import Base
class BoostList(Base):
__tablename__ = 'BoostLists'
Id = Column(Integer, primary_key=True, autoincrement=True)
ChannelId = Column(Integer)
ServerId = Column(Integer, ForeignKey('Servers.Id', ondelete='CASCADE'))
__table_args__ = (UniqueConstraint('ChannelId', 'ServerId'),)
CreatedTime = Column(DateTime)
def __init__(self, channelId: int, serverId: int):
self.ChannelId = channelId
self.ServerId = serverId
self.CreatedTime = datetime.datetime.now(datetime.timezone(datetime.timedelta(hours=3)))
| 32.52381 | 96 | 0.7306 |
4a1d9912e6f9f890e42eb2a34fbeabf7ffddd3ab | 3,039 | py | Python | volttron/platform/vip/agent/errors.py | cloudcomputingabc/volttron | 6495e26e3185a7af8d0d79ad2586bdf8ea83992d | [
"Apache-2.0",
"BSD-2-Clause"
] | 406 | 2015-01-20T03:08:53.000Z | 2022-03-31T20:59:07.000Z | volttron/platform/vip/agent/errors.py | cloudcomputingabc/volttron | 6495e26e3185a7af8d0d79ad2586bdf8ea83992d | [
"Apache-2.0",
"BSD-2-Clause"
] | 2,031 | 2015-01-05T21:35:45.000Z | 2022-03-29T21:44:36.000Z | volttron/platform/vip/agent/errors.py | cloudcomputingabc/volttron | 6495e26e3185a7af8d0d79ad2586bdf8ea83992d | [
"Apache-2.0",
"BSD-2-Clause"
] | 219 | 2015-01-20T14:53:57.000Z | 2022-03-06T00:37:41.000Z | # -*- coding: utf-8 -*- {{{
# vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et:
#
# Copyright 2020, Battelle Memorial Institute.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This material was prepared as an account of work sponsored by an agency of
# the United States Government. Neither the United States Government nor the
# United States Department of Energy, nor Battelle, nor any of their
# employees, nor any jurisdiction or organization that has cooperated in the
# development of these materials, makes any warranty, express or
# implied, or assumes any legal liability or responsibility for the accuracy,
# completeness, or usefulness or any information, apparatus, product,
# software, or process disclosed, or represents that its use would not infringe
# privately owned rights. Reference herein to any specific commercial product,
# process, or service by trade name, trademark, manufacturer, or otherwise
# does not necessarily constitute or imply its endorsement, recommendation, or
# favoring by the United States Government or any agency thereof, or
# Battelle Memorial Institute. The views and opinions of authors expressed
# herein do not necessarily state or reflect those of the
# United States Government or any agency thereof.
#
# PACIFIC NORTHWEST NATIONAL LABORATORY operated by
# BATTELLE for the UNITED STATES DEPARTMENT OF ENERGY
# under Contract DE-AC05-76RL01830
# }}}
import errno
__all__ = ['VIPError', 'Unreachable', 'Again', 'UnknownSubsystem']
class VIPError(Exception):
def __init__(self, errnum, msg, peer, subsystem, *args):
super(VIPError, self).__init__(errnum, msg, peer, subsystem, *args)
self.errno = int(errnum)
self.msg = msg
self.peer = peer
self.subsystem = subsystem
def __str__(self):
return 'VIP error (%d): %s' % (self.errno, self.msg)
def __repr__(self):
return '%s%r' % (type(self).__name__, self.args)
@classmethod
def from_errno(cls, errnum, msg, *args):
errnum = int(errnum)
return {
errno.EHOSTUNREACH: Unreachable,
errno.EAGAIN: Again,
errno.EPROTONOSUPPORT: UnknownSubsystem,
}.get(errnum, cls)(errnum, msg, *args)
class Unreachable(VIPError):
def __str__(self):
return '%s: %s' % (super(Unreachable, self).__str__(), self.peer)
class Again(VIPError):
pass
class UnknownSubsystem(VIPError):
def __str__(self):
return '%s: %s' % (
super(UnknownSubsystem, self).__str__(), self.subsystem)
| 36.178571 | 79 | 0.715367 |
4a1d9941880d9a0946132944b2a05a3705f78f5d | 9,805 | py | Python | test/test_sanitizer.py | KonstantinKlepikov/SimpleSQLite | 33b600d155e1a3e1a0288a9cbd86cae02ae7303a | [
"MIT"
] | 1 | 2019-09-27T22:27:32.000Z | 2019-09-27T22:27:32.000Z | test/test_sanitizer.py | KonstantinKlepikov/SimpleSQLite | 33b600d155e1a3e1a0288a9cbd86cae02ae7303a | [
"MIT"
] | null | null | null | test/test_sanitizer.py | KonstantinKlepikov/SimpleSQLite | 33b600d155e1a3e1a0288a9cbd86cae02ae7303a | [
"MIT"
] | null | null | null | # encoding: utf-8
"""
.. codeauthor:: Tsuyoshi Hombashi <[email protected]>
"""
from __future__ import unicode_literals
import pytest
from simplesqlite import NameValidationError, SQLiteTableDataSanitizer, connect_memdb
from tabledata import TableData
from typepy import String
from ._common import print_test_result
class Test_SQLiteTableDataSanitizer(object):
@pytest.mark.parametrize(
["table_name", "headers", "records", "expected"],
[
[
"normal",
["a", "b_c"],
[[1, 2], [3, 4]],
TableData("normal", ["a", "b_c"], [[1, 2], [3, 4]]),
],
[
"underscore_char",
["data", "_data", "data_", "_data_"],
[[1, 2, 3, 4], [11, 12, 13, 14]],
TableData(
"underscore_char",
["data", "_data", "data_", "_data_"],
[[1, 2, 3, 4], [11, 12, 13, 14]],
),
],
[
"OFFSET",
["abort", "ASC"],
[[1, 2], [3, 4]],
TableData("OFFSET", ["abort", "ASC"], [[1, 2], [3, 4]]),
],
[
"missing_all_header",
[],
[[1, 2], [3, 4]],
TableData("missing_all_header", ["A", "B"], [[1, 2], [3, 4]]),
],
[
"num_header",
[1, 123456789],
[[1, 2], [3, 4]],
TableData("num_header", ["1", "123456789"], [[1, 2], [3, 4]]),
],
[
"missing_part_of_header",
["", "bb", None],
[[1, 2, 3]],
TableData("missing_part_of_header", ["A", "bb", "C"], [[1, 2, 3]]),
],
[
"avoid_duplicate_default_header_0",
["", "a", None],
[[1, 2, 3]],
TableData("avoid_duplicate_default_header_0", ["B", "a", "C"], [[1, 2, 3]]),
],
[
"avoid_duplicate_default_header_1",
["", "A", "B", "c", ""],
[[1, 2, 3, 4, 5]],
TableData(
"avoid_duplicate_default_header_1", ["D", "A", "B", "c", "E"], [[1, 2, 3, 4, 5]]
),
],
[
r"@a!b\c#d$e%f&g'h(i)j_",
[r"a!bc#d$e%f&g'h(i)j", r"k@l[m]n{o}p;q:r,s.t/u", "a b"],
[[1, 2, 3], [11, 12, 13]],
TableData(
"a_b_c_d_e_f_g_h_i_j",
["a!bc#d$e%f&g_h(i)j", "k@l[m]n{o}p;q:r_s.t/u", "a b"],
[[1, 2, 3], [11, 12, 13]],
),
],
[
# SQLite reserved keywords
"ALL",
["and", "Index"],
[[1, 2], [3, 4]],
TableData("rename_ALL", ["and", "Index"], [[1, 2], [3, 4]]),
],
[
"invalid'tn",
["in'valid", "ALL"],
[[1, 2], [3, 4]],
TableData("invalid_tn", ["in_valid", "ALL"], [[1, 2], [3, 4]]),
],
[
"Python (programming language) - Wikipedia, the free encyclopedia.html",
["a b", "c d"],
[[1, 2], [3, 4]],
TableData(
"Python_programming_language_Wikipedia_the_free_encyclopedia_html",
["a b", "c d"],
[[1, 2], [3, 4]],
),
],
[
"multibyte csv",
["姓", "名", "生年月日", "郵便番号", "住所", "電話番号"],
[
["山田", "太郎", "2001/1/1", "100-0002", "東京都千代田区皇居外苑", "03-1234-5678"],
["山田", "次郎", "2001/1/2", "251-0036", "神奈川県藤沢市江の島1丁目", "03-9999-9999"],
],
TableData(
"multibyte_csv",
["姓", "名", "生年月日", "郵便番号", "住所", "電話番号"],
[
["山田", "太郎", "2001/1/1", "100-0002", "東京都千代田区皇居外苑", "03-1234-5678"],
["山田", "次郎", "2001/1/2", "251-0036", "神奈川県藤沢市江の島1丁目", "03-9999-9999"],
],
),
],
],
)
def test_normal(self, table_name, headers, records, expected):
new_tabledata = SQLiteTableDataSanitizer(
TableData(table_name, headers, records)
).normalize()
try:
from pytablewriter import dumps_tabledata
print_test_result(
expected=dumps_tabledata(expected), actual=dumps_tabledata(new_tabledata)
)
except ImportError:
pass
con = connect_memdb()
con.create_table_from_tabledata(new_tabledata)
assert con.select_as_tabledata(new_tabledata.table_name) == expected
assert new_tabledata.equals(expected)
@pytest.mark.parametrize(
["table_name", "headers", "records", "type_hints", "expecte_col_types", "expecte_data"],
[
[
"w/ type inference",
["a", "b_c"],
[[1, 2], [3, 4]],
[String],
["STRING", "INTEGER"],
TableData("w_type_inference", ["a", "b_c"], [["1", 2], ["3", 4]]),
]
],
)
def test_normal_type_hints(
self, table_name, headers, records, type_hints, expecte_col_types, expecte_data
):
new_tabledata = SQLiteTableDataSanitizer(
TableData(table_name, headers, records, type_hints=type_hints)
).normalize()
actual_col_types = [col_dp.typename for col_dp in new_tabledata.column_dp_list]
assert actual_col_types == expecte_col_types
con = connect_memdb()
con.create_table_from_tabledata(new_tabledata)
assert con.select_as_tabledata(new_tabledata.table_name) == expecte_data
@pytest.mark.parametrize(
[
"table_name",
"headers",
"records",
"is_type_inference",
"expecte_col_types",
"expecte_data",
],
[
[
"w/ type inference",
["a", "b_c"],
[["1", "2"], ["3", "4"]],
True,
["INTEGER", "INTEGER"],
TableData("w_type_inference", ["a", "b_c"], [[1, 2], [3, 4]]),
],
[
"w/o type inference",
["a", "b_c"],
[["1", "2"], ["3", "4"]],
False,
["STRING", "STRING"],
TableData("w_o_type_inference", ["a", "b_c"], [["1", "2"], ["3", "4"]]),
],
],
)
def test_normal_type_inference(
self, table_name, headers, records, is_type_inference, expecte_col_types, expecte_data
):
new_tabledata = SQLiteTableDataSanitizer(
TableData(table_name, headers, records), is_type_inference=is_type_inference
).normalize()
actual_col_types = [col_dp.typename for col_dp in new_tabledata.column_dp_list]
print(is_type_inference, expecte_col_types, actual_col_types)
assert actual_col_types == expecte_col_types
con = connect_memdb()
con.create_table_from_tabledata(new_tabledata)
assert con.select_as_tabledata(new_tabledata.table_name) == expecte_data
@pytest.mark.parametrize(
["table_name", "headers", "records", "expected"],
[
["", ["a", "b"], [], NameValidationError],
[None, ["a", "b"], [], NameValidationError],
["dummy", [], [], ValueError],
],
)
def test_exception_invalid_data(self, table_name, headers, records, expected):
with pytest.raises(expected):
SQLiteTableDataSanitizer(TableData(table_name, headers, records)).normalize()
class Test_SQLiteTableDataSanitizer_dup_col_handler(object):
@pytest.mark.parametrize(
["table_name", "headers", "dup_col_handler", "expected"],
[
[
"all attrs are duplicated",
["A", "A", "A", "A", "A"],
"rename",
TableData("all_attrs_are_duplicated", ["A", "A_1", "A_2", "A_3", "A_4"], []),
],
[
"recursively duplicated attrs",
["A", "A", "A_1", "A_1", "A_2", "A_1_1", "A_1_1"],
"recursively_duplicated_attrs",
TableData(
"recursively_duplicated_attrs",
["A", "A_3", "A_1", "A_1_2", "A_2", "A_1_1", "A_1_1_1"],
[],
),
],
],
)
def test_normal_(self, table_name, headers, dup_col_handler, expected):
new_tabledata = SQLiteTableDataSanitizer(
TableData(table_name, headers, []), dup_col_handler=dup_col_handler
).normalize()
try:
from pytablewriter import dumps_tabledata
print_test_result(
expected=dumps_tabledata(expected), actual=dumps_tabledata(new_tabledata)
)
except ImportError:
pass
assert new_tabledata.equals(expected)
@pytest.mark.parametrize(
["table_name", "headers", "expected"],
[
["duplicate columns", ["a", "a"], ValueError],
["duplicate columns", ["AA", "b", "AA"], ValueError],
],
)
def test_exception(self, table_name, headers, expected):
with pytest.raises(expected):
SQLiteTableDataSanitizer(
TableData(table_name, headers, []), dup_col_handler="error"
).normalize()
| 35.143369 | 100 | 0.453544 |
4a1d99e33057a6d447bbf258dd562727477909aa | 3,015 | py | Python | tests/gold_tests/tls_hooks/tls_hooks14.test.py | nozomi1773/trafficserver | 2ee141137545a84584d8047eee70b171b5254c40 | [
"Apache-2.0"
] | null | null | null | tests/gold_tests/tls_hooks/tls_hooks14.test.py | nozomi1773/trafficserver | 2ee141137545a84584d8047eee70b171b5254c40 | [
"Apache-2.0"
] | null | null | null | tests/gold_tests/tls_hooks/tls_hooks14.test.py | nozomi1773/trafficserver | 2ee141137545a84584d8047eee70b171b5254c40 | [
"Apache-2.0"
] | null | null | null | '''
Test two outbound start delayed hooks
'''
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
Test.Summary = '''
Test different combinations of TLS handshake hooks to ensure they are applied consistently.
'''
ts = Test.MakeATSProcess("ts", select_ports=True, enable_tls=True)
server = Test.MakeOriginServer("server", ssl=True)
request_header = {"headers": "GET / HTTP/1.1\r\nHost: www.example.com\r\n\r\n", "timestamp": "1469733493.993", "body": ""}
# desired response form the origin server
response_header = {"headers": "HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n", "timestamp": "1469733493.993", "body": ""}
server.addResponse("sessionlog.json", request_header, response_header)
ts.addSSLfile("ssl/server.pem")
ts.addSSLfile("ssl/server.key")
ts.Disk.records_config.update({
'proxy.config.diags.debug.enabled': 1,
'proxy.config.diags.debug.tags': 'ssl_hook_test',
'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir),
'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir),
'proxy.config.ssl.client.verify.server': 0,
'proxy.config.ssl.server.cipher_suite': 'ECDHE-RSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES128-SHA256:ECDHE-RSA-AES256-SHA384:AES128-GCM-SHA256:AES256-GCM-SHA384:ECDHE-RSA-RC4-SHA:ECDHE-RSA-AES128-SHA:ECDHE-RSA-AES256-SHA:RC4-SHA:RC4-MD5:AES128-SHA:AES256-SHA:DES-CBC3-SHA!SRP:!DSS:!PSK:!aNULL:!eNULL:!SSLv2',
})
ts.Disk.ssl_multicert_config.AddLine(
'dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key'
)
ts.Disk.remap_config.AddLine(
'map https://example.com:{0} https://127.0.0.1:{1}'.format(ts.Variables.ssl_port, server.Variables.SSL_Port)
)
Test.PrepareTestPlugin(os.path.join(Test.Variables.AtsTestPluginsDir, 'ssl_hook_test.so'), ts, '-out_start_delay=2')
tr = Test.AddTestRun("Test outbound delay start")
tr.Processes.Default.StartBefore(server)
tr.Processes.Default.StartBefore(Test.Processes.ts)
tr.StillRunningAfter = ts
tr.StillRunningAfter = server
tr.Processes.Default.Command = 'curl -k -H \'host:example.com:{0}\' https://127.0.0.1:{0}'.format(ts.Variables.ssl_port)
tr.Processes.Default.ReturnCode = 0
ts.Streams.stderr = "gold/ts-out-delay-start-2.gold"
tr.Processes.Default.TimeOut = 5
tr.TimeOut = 5
| 45 | 332 | 0.748259 |
4a1d9a433a1396668ee188a0e28610083c605fc3 | 1,802 | py | Python | kameleo/local_api_client/models/server.py | kameleo-io/local-api-client-python | e2dcb17898075b76dc2a49d36f9c50af8c9b2b8a | [
"MIT"
] | 29 | 2021-10-01T10:12:31.000Z | 2022-02-26T21:13:51.000Z | kameleo/local_api_client/models/server.py | kameleo-io/local-api-client-python | e2dcb17898075b76dc2a49d36f9c50af8c9b2b8a | [
"MIT"
] | 2 | 2021-10-01T10:16:02.000Z | 2021-10-05T14:04:57.000Z | kameleo/local_api_client/models/server.py | kameleo-io/local-api-client-python | e2dcb17898075b76dc2a49d36f9c50af8c9b2b8a | [
"MIT"
] | 2 | 2021-11-22T01:19:10.000Z | 2021-12-07T03:21:32.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class Server(Model):
"""Represents a server connection. It can be used as a proxy server connection
as well.
All required parameters must be populated in order to send to Azure.
:param host: Required. Gets or sets the hostname where the service is
provided from.
:type host: str
:param port: Required. Gets or sets the port where the service is provided
from.
:type port: int
:param id: Gets or sets the identity information provided for the service.
This could be a custom id or username or anything which identifies a
resource on the remote service. Use it as a proxy username. This field is
optional.
:type id: str
:param secret: Gets or sets the shared secret between the client and the
service provider. Use it as a proxy password. This field is optional.
:type secret: str
"""
_validation = {
'host': {'required': True},
'port': {'required': True},
}
_attribute_map = {
'host': {'key': 'host', 'type': 'str'},
'port': {'key': 'port', 'type': 'int'},
'id': {'key': 'id', 'type': 'str'},
'secret': {'key': 'secret', 'type': 'str'},
}
def __init__(self, **kwargs):
super(Server, self).__init__(**kwargs)
self.host = kwargs.get('host', None)
self.port = kwargs.get('port', None)
self.id = kwargs.get('id', None)
self.secret = kwargs.get('secret', None)
| 35.333333 | 82 | 0.579911 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.