repo_name
stringlengths 7
94
| repo_path
stringlengths 4
237
| repo_head_hexsha
stringlengths 40
40
| content
stringlengths 10
680k
| apis
stringlengths 2
840k
|
---|---|---|---|---|
ryangillard/misc | leetcode/970_powerful_integers/970_powerful_integers.py | d1f9919400636e6b988fa933493b94829a73331e | class Solution(object):
def powerfulIntegers(self, x, y, bound):
"""
:type x: int
:type y: int
:type bound: int
:rtype: List[int]
"""
# Find max exponent
base = max(x, y) if x == 1 or y == 1 else min(x, y)
exponent = 1
if base != 1:
while base ** exponent <= bound:
exponent += 1
# Brute force all of the exponent trials
hashset = set()
for i in range(exponent):
for j in range(exponent):
z = x ** i + y ** j
if z <= bound:
hashset.add(z)
return list(hashset) | [] |
jSkrod/djangae-react-browser-games-app | src/project/api/rankings/urls.py | 28c5064f0a126021afb08b195839305aba6b35a2 | from django.conf.urls import url, include
from project.api.rankings.api import AddRanking, AddScore, GetScoresUser, GetScoresGame
urlpatterns = [
url(r'add_ranking$', AddRanking.as_view()),
url(r'add_score$', AddScore.as_view()),
url(r'get_scores_game$', GetScoresGame.as_view()),
url(r'get_scores_user$', GetScoresUser.as_view())
] | [((5, 25, 5, 45), 'project.api.rankings.api.AddRanking.as_view', 'AddRanking.as_view', ({}, {}), '()', False, 'from project.api.rankings.api import AddRanking, AddScore, GetScoresUser, GetScoresGame\n'), ((6, 23, 6, 41), 'project.api.rankings.api.AddScore.as_view', 'AddScore.as_view', ({}, {}), '()', False, 'from project.api.rankings.api import AddRanking, AddScore, GetScoresUser, GetScoresGame\n'), ((7, 29, 7, 52), 'project.api.rankings.api.GetScoresGame.as_view', 'GetScoresGame.as_view', ({}, {}), '()', False, 'from project.api.rankings.api import AddRanking, AddScore, GetScoresUser, GetScoresGame\n'), ((8, 29, 8, 52), 'project.api.rankings.api.GetScoresUser.as_view', 'GetScoresUser.as_view', ({}, {}), '()', False, 'from project.api.rankings.api import AddRanking, AddScore, GetScoresUser, GetScoresGame\n')] |
wdczdj/qiskit-metal | qiskit_metal/qlibrary/lumped/cap_n_interdigital.py | c77805f66da60021ef8d10d668715c1dc2ebcd1d | # -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
from qiskit_metal import draw, Dict
from qiskit_metal.qlibrary.core import QComponent
import numpy as np
class CapNInterdigital(QComponent):
"""Generates a two pin (+) structure comprised of a north CPW transmission
line, and a south transmission line, coupled together via a finger
capacitor. Such a structure can be used, as an example, for generating CPW
resonators. (0,0) represents the center position of the component. Setting
finger length to 0 gives a simple gap capacitor. The width of the gap
capacitor is found via.
(cap_width * finger_count + * cap_gap * (finger_count-1)).
Inherits QComponent class.
::
(0,0) N
+ ^
| |
|
|
--|-----|--
| | | | |
|-----|-----|
|
|
|
|
+
Options:
* north_width: '10um' -- The width of the 'north' portion of the CPW transmission line
* north_gap: '6um' -- The dielectric gap of the 'north' portion of the CPW transmission line
* south_width: '10um' -- The width of the 'south' portion of the CPW transmission line
* south_gap: '6um' -- The dielectric gap of the 'south' portion of the CPW transmission line
(also for the capacitor gap to ground)
* cap_width: '10um' -- The width of the finger capacitor metal (and islands)
* cap_gap: '6um' -- The width of dielectric for the capacitive coupling/fingers
* cap_gap_ground: '6um' -- Width of the dielectric between the capacitor and ground
* finger_length: '20um' -- The depth of the finger islands of the capacitor
* finger_count: '5' -- Number of fingers in the capacitor
* cap_distance: '50um' -- Distance of the north point of the capacitor from the north pin
* pos_x/_y: '0um' -- The x/y position of the north pin
* rotation: '0' -- The direction of the transmission line. 0 degrees is -y, following a
counter-clockwise rotation (eg. 90 is +x)
* chip: 'main' -- The chip the capacitor should be on.
* layer: '1' -- Layer the capacitor is on.
"""
component_metadata = Dict(short_name='cpw',
_qgeometry_table_poly='True',
_qgeometry_table_path='True')
"""Component metadata"""
#Currently setting the primary CPW length based on the coupling_length
#May want it to be it's own value that the user can control?
default_options = Dict(north_width='10um',
north_gap='6um',
south_width='10um',
south_gap='6um',
cap_width='10um',
cap_gap='6um',
cap_gap_ground='6um',
finger_length='20um',
finger_count='5',
cap_distance='50um',
pos_x='0um',
pos_y='0um',
orientation='0',
chip='main',
layer='1')
"""Default connector options"""
def make(self):
"""Build the component."""
p = self.p
N = int(p.finger_count)
#Finger Capacitor
cap_box = draw.rectangle(N * p.cap_width + (N - 1) * p.cap_gap,
p.cap_gap + 2 * p.cap_width + p.finger_length,
0, 0)
make_cut_list = []
make_cut_list.append([0, (p.finger_length) / 2])
make_cut_list.append([(p.cap_width) + (p.cap_gap / 2),
(p.finger_length) / 2])
flip = -1
for i in range(1, N):
make_cut_list.append([
i * (p.cap_width) + (2 * i - 1) * (p.cap_gap / 2),
flip * (p.finger_length) / 2
])
make_cut_list.append([
(i + 1) * (p.cap_width) + (2 * i + 1) * (p.cap_gap / 2),
flip * (p.finger_length) / 2
])
flip = flip * -1
cap_cut = draw.LineString(make_cut_list).buffer(p.cap_gap / 2,
cap_style=2,
join_style=2)
cap_cut = draw.translate(cap_cut,
-(N * p.cap_width + (N - 1) * p.cap_gap) / 2,
0)
cap_body = draw.subtract(cap_box, cap_cut)
cap_body = draw.translate(
cap_body, 0, -p.cap_distance -
(p.cap_gap + 2 * p.cap_width + p.finger_length) / 2)
cap_etch = draw.rectangle(
N * p.cap_width + (N - 1) * p.cap_gap + 2 * p.cap_gap_ground,
p.cap_gap + 2 * p.cap_width + p.finger_length +
2 * p.cap_gap_ground, 0, -p.cap_distance -
(p.cap_gap + 2 * p.cap_width + p.finger_length) / 2)
#CPW
north_cpw = draw.LineString([[0, 0], [0, -p.cap_distance]])
south_cpw = draw.LineString(
[[
0, -p.cap_distance -
(p.cap_gap + 2 * p.cap_width + p.finger_length)
],
[
0, -2 * p.cap_distance -
(p.cap_gap + 2 * p.cap_width + p.finger_length)
]])
#Rotate and Translate
c_items = [north_cpw, south_cpw, cap_body, cap_etch]
c_items = draw.rotate(c_items, p.orientation, origin=(0, 0))
c_items = draw.translate(c_items, p.pos_x, p.pos_y)
[north_cpw, south_cpw, cap_body, cap_etch] = c_items
#Add to qgeometry tables
self.add_qgeometry('path', {'north_cpw': north_cpw},
width=p.north_width,
layer=p.layer)
self.add_qgeometry('path', {'north_cpw_sub': north_cpw},
width=p.north_width + 2 * p.north_gap,
layer=p.layer,
subtract=True)
self.add_qgeometry('path', {'south_cpw': south_cpw},
width=p.south_width,
layer=p.layer)
self.add_qgeometry('path', {'south_cpw_sub': south_cpw},
width=p.south_width + 2 * p.south_gap,
layer=p.layer,
subtract=True)
self.add_qgeometry('poly', {'cap_body': cap_body}, layer=p.layer)
self.add_qgeometry('poly', {'cap_etch': cap_etch},
layer=p.layer,
subtract=True)
#Add pins
north_pin_list = north_cpw.coords
south_pin_list = south_cpw.coords
self.add_pin('north_end',
points=np.array(north_pin_list[::-1]),
width=p.north_width,
input_as_norm=True)
self.add_pin('south_end',
points=np.array(south_pin_list),
width=p.south_width,
input_as_norm=True)
| [((66, 25, 68, 59), 'qiskit_metal.Dict', 'Dict', (), '', False, 'from qiskit_metal import draw, Dict\n'), ((73, 22, 87, 37), 'qiskit_metal.Dict', 'Dict', (), '', False, 'from qiskit_metal import draw, Dict\n'), ((96, 18, 98, 38), 'qiskit_metal.draw.rectangle', 'draw.rectangle', ({(96, 33, 96, 70): 'N * p.cap_width + (N - 1) * p.cap_gap', (97, 33, 97, 78): 'p.cap_gap + 2 * p.cap_width + p.finger_length', (98, 33, 98, 34): '0', (98, 36, 98, 37): '0'}, {}), '(N * p.cap_width + (N - 1) * p.cap_gap, p.cap_gap + 2 * p.\n cap_width + p.finger_length, 0, 0)', False, 'from qiskit_metal import draw, Dict\n'), ((119, 18, 121, 35), 'qiskit_metal.draw.translate', 'draw.translate', ({(119, 33, 119, 40): 'cap_cut', (120, 33, 120, 77): '-(N * p.cap_width + (N - 1) * p.cap_gap) / 2', (121, 33, 121, 34): '0'}, {}), '(cap_cut, -(N * p.cap_width + (N - 1) * p.cap_gap) / 2, 0)', False, 'from qiskit_metal import draw, Dict\n'), ((123, 19, 123, 50), 'qiskit_metal.draw.subtract', 'draw.subtract', ({(123, 33, 123, 40): 'cap_box', (123, 42, 123, 49): 'cap_cut'}, {}), '(cap_box, cap_cut)', False, 'from qiskit_metal import draw, Dict\n'), ((124, 19, 126, 64), 'qiskit_metal.draw.translate', 'draw.translate', ({(125, 12, 125, 20): 'cap_body', (125, 22, 125, 23): '0', (125, 25, 126, 63): '-p.cap_distance - (p.cap_gap + 2 * p.cap_width + p.finger_length) / 2'}, {}), '(cap_body, 0, -p.cap_distance - (p.cap_gap + 2 * p.cap_width +\n p.finger_length) / 2)', False, 'from qiskit_metal import draw, Dict\n'), ((128, 19, 132, 64), 'qiskit_metal.draw.rectangle', 'draw.rectangle', ({(129, 12, 129, 72): 'N * p.cap_width + (N - 1) * p.cap_gap + 2 * p.cap_gap_ground', (130, 12, 131, 32): 'p.cap_gap + 2 * p.cap_width + p.finger_length + 2 * p.cap_gap_ground', (131, 34, 131, 35): '0', (131, 37, 132, 63): '-p.cap_distance - (p.cap_gap + 2 * p.cap_width + p.finger_length) / 2'}, {}), '(N * p.cap_width + (N - 1) * p.cap_gap + 2 * p.cap_gap_ground,\n p.cap_gap + 2 * p.cap_width + p.finger_length + 2 * p.cap_gap_ground, 0,\n -p.cap_distance - (p.cap_gap + 2 * p.cap_width + p.finger_length) / 2)', False, 'from qiskit_metal import draw, Dict\n'), ((135, 20, 135, 67), 'qiskit_metal.draw.LineString', 'draw.LineString', ({(135, 36, 135, 66): '[[0, 0], [0, -p.cap_distance]]'}, {}), '([[0, 0], [0, -p.cap_distance]])', False, 'from qiskit_metal import draw, Dict\n'), ((137, 20, 145, 16), 'qiskit_metal.draw.LineString', 'draw.LineString', ({(138, 12, 145, 15): '[[0, -p.cap_distance - (p.cap_gap + 2 * p.cap_width + p.finger_length)], [0,\n -2 * p.cap_distance - (p.cap_gap + 2 * p.cap_width + p.finger_length)]]'}, {}), '([[0, -p.cap_distance - (p.cap_gap + 2 * p.cap_width + p.\n finger_length)], [0, -2 * p.cap_distance - (p.cap_gap + 2 * p.cap_width +\n p.finger_length)]])', False, 'from qiskit_metal import draw, Dict\n'), ((149, 18, 149, 68), 'qiskit_metal.draw.rotate', 'draw.rotate', (), '', False, 'from qiskit_metal import draw, Dict\n'), ((150, 18, 150, 59), 'qiskit_metal.draw.translate', 'draw.translate', ({(150, 33, 150, 40): 'c_items', (150, 42, 150, 49): 'p.pos_x', (150, 51, 150, 58): 'p.pos_y'}, {}), '(c_items, p.pos_x, p.pos_y)', False, 'from qiskit_metal import draw, Dict\n'), ((116, 18, 116, 48), 'qiskit_metal.draw.LineString', 'draw.LineString', ({(116, 34, 116, 47): 'make_cut_list'}, {}), '(make_cut_list)', False, 'from qiskit_metal import draw, Dict\n'), ((180, 28, 180, 58), 'numpy.array', 'np.array', ({(180, 37, 180, 57): 'north_pin_list[::-1]'}, {}), '(north_pin_list[::-1])', True, 'import numpy as np\n'), ((184, 28, 184, 52), 'numpy.array', 'np.array', ({(184, 37, 184, 51): 'south_pin_list'}, {}), '(south_pin_list)', True, 'import numpy as np\n')] |
getcircle/luno-ios | ThirdParty/protobuf-registry/python/protobufs/services/feature/actions/get_flags_pb2.py | d18260abb537496d86cf607c170dd5e91c406f0f | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: protobufs/services/feature/actions/get_flags.proto
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='protobufs/services/feature/actions/get_flags.proto',
package='services.feature.actions.get_flags',
syntax='proto3',
serialized_pb=b'\n2protobufs/services/feature/actions/get_flags.proto\x12\"services.feature.actions.get_flags\"\x0b\n\tRequestV1\"\x84\x01\n\nResponseV1\x12H\n\x05\x66lags\x18\x01 \x03(\x0b\x32\x39.services.feature.actions.get_flags.ResponseV1.FlagsEntry\x1a,\n\nFlagsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x08:\x02\x38\x01\x62\x06proto3'
)
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_REQUESTV1 = _descriptor.Descriptor(
name='RequestV1',
full_name='services.feature.actions.get_flags.RequestV1',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=90,
serialized_end=101,
)
_RESPONSEV1_FLAGSENTRY = _descriptor.Descriptor(
name='FlagsEntry',
full_name='services.feature.actions.get_flags.ResponseV1.FlagsEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='services.feature.actions.get_flags.ResponseV1.FlagsEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='value', full_name='services.feature.actions.get_flags.ResponseV1.FlagsEntry.value', index=1,
number=2, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), b'8\001'),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=192,
serialized_end=236,
)
_RESPONSEV1 = _descriptor.Descriptor(
name='ResponseV1',
full_name='services.feature.actions.get_flags.ResponseV1',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='flags', full_name='services.feature.actions.get_flags.ResponseV1.flags', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_RESPONSEV1_FLAGSENTRY, ],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=104,
serialized_end=236,
)
_RESPONSEV1_FLAGSENTRY.containing_type = _RESPONSEV1
_RESPONSEV1.fields_by_name['flags'].message_type = _RESPONSEV1_FLAGSENTRY
DESCRIPTOR.message_types_by_name['RequestV1'] = _REQUESTV1
DESCRIPTOR.message_types_by_name['ResponseV1'] = _RESPONSEV1
RequestV1 = _reflection.GeneratedProtocolMessageType('RequestV1', (_message.Message,), dict(
DESCRIPTOR = _REQUESTV1,
__module__ = 'protobufs.services.feature.actions.get_flags_pb2'
# @@protoc_insertion_point(class_scope:services.feature.actions.get_flags.RequestV1)
))
_sym_db.RegisterMessage(RequestV1)
ResponseV1 = _reflection.GeneratedProtocolMessageType('ResponseV1', (_message.Message,), dict(
FlagsEntry = _reflection.GeneratedProtocolMessageType('FlagsEntry', (_message.Message,), dict(
DESCRIPTOR = _RESPONSEV1_FLAGSENTRY,
__module__ = 'protobufs.services.feature.actions.get_flags_pb2'
# @@protoc_insertion_point(class_scope:services.feature.actions.get_flags.ResponseV1.FlagsEntry)
))
,
DESCRIPTOR = _RESPONSEV1,
__module__ = 'protobufs.services.feature.actions.get_flags_pb2'
# @@protoc_insertion_point(class_scope:services.feature.actions.get_flags.ResponseV1)
))
_sym_db.RegisterMessage(ResponseV1)
_sym_db.RegisterMessage(ResponseV1.FlagsEntry)
_RESPONSEV1_FLAGSENTRY.has_options = True
_RESPONSEV1_FLAGSENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), b'8\001')
# @@protoc_insertion_point(module_scope)
| [((11, 10, 11, 36), 'google.protobuf.symbol_database.Default', '_symbol_database.Default', ({}, {}), '()', True, 'from google.protobuf import symbol_database as _symbol_database\n'), ((16, 13, 21, 1), 'google.protobuf.descriptor.FileDescriptor', '_descriptor.FileDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((27, 13, 48, 1), 'google.protobuf.descriptor.Descriptor', '_descriptor.Descriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((147, 60, 147, 91), 'google.protobuf.descriptor_pb2.MessageOptions', 'descriptor_pb2.MessageOptions', ({}, {}), '()', False, 'from google.protobuf import descriptor_pb2\n'), ((65, 4, 71, 19), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((78, 36, 78, 67), 'google.protobuf.descriptor_pb2.MessageOptions', 'descriptor_pb2.MessageOptions', ({}, {}), '()', False, 'from google.protobuf import descriptor_pb2\n'), ((95, 4, 101, 19), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n')] |
Hellofafar/Leetcode | Medium/102_2.py | 7a459e9742958e63be8886874904e5ab2489411a | # ------------------------------
# Binary Tree Level Order Traversal
#
# Description:
# Given a binary tree, return the level order traversal of its nodes' values. (ie, from
# left to right, level by level).
#
# For example:
# Given binary tree [3,9,20,null,null,15,7],
# 3
# / \
# 9 20
# / \
# 15 7
# return its level order traversal as:
# [
# [3],
# [9,20],
# [15,7]
# ]
#
# Version: 2.0
# 11/11/19 by Jianfa
# ------------------------------
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def levelOrder(self, root: TreeNode) -> List[List[int]]:
if not root:
return []
# BFS
res = []
queue = [root]
while queue:
temp = [] # values of this level of nodes
children = [] # next level of nodes
for node in queue:
temp.append(node.val)
if node.left:
children.append(node.left)
if node.right:
children.append(node.right)
res.append(temp[:]) # actually here can be res.append(temp), res will not change as temp changes
queue = children[:] # here must be children[:] otherwise queue will change as children changes
return res
# Used for testing
if __name__ == "__main__":
test = Solution()
# ------------------------------
# Summary:
# Similar BFS solution but use a little more spaces.
# On 102.py, using list.pop(0) actually takes O(n) time because it needs to remap the index
# of values. Use collections.deque instead.
#
# O(N) time O(N) space | [] |
qiaone/GIF | mturk/comparison_among_different_models/sample_from_models_for_comparison.py | 2c551e844748c72395fc91fb080c7a2f9c8d5285 | import sys
sys.path.append('../../')
import constants as cnst
import os
os.environ['PYTHONHASHSEED'] = '2'
import tqdm
from model.stg2_generator import StyledGenerator
import numpy as np
from my_utils.visualize_flame_overlay import OverLayViz
from my_utils.flm_dynamic_fit_overlay import camera_ringnetpp
from my_utils.generate_gif import generate_from_flame_sequence
from my_utils.generic_utils import save_set_of_images
from my_utils import compute_fid
import constants
from dataset_loaders import fast_image_reshape
import torch
from my_utils import generic_utils
from my_utils.eye_centering import position_to_given_location
def ge_gen_in(flm_params, textured_rndr, norm_map, normal_map_cond, texture_cond):
if normal_map_cond and texture_cond:
return torch.cat((textured_rndr, norm_map), dim=1)
elif normal_map_cond:
return norm_map
elif texture_cond:
return textured_rndr
else:
return flm_params
# General settings
save_images = True
code_size = 236
use_inst_norm = True
core_tensor_res = 4
resolution = 256
alpha = 1
step_max = int(np.log2(resolution) - 2)
root_out_dir = f'{cnst.output_root}sample/'
num_smpl_to_eval_on = 1000
use_styled_conv_stylegan2 = True
flength = 5000
cam_t = np.array([0., 0., 0])
camera_params = camera_ringnetpp((512, 512), trans=cam_t, focal=flength)
run_ids_1 = [29, ] # with sqrt(2)
# run_ids_1 = [7, 24, 8, 3]
# run_ids_1 = [7, 8, 3]
settings_for_runs = \
{24: {'name': 'vector_cond', 'model_idx': '216000_1', 'normal_maps_as_cond': False,
'rendered_flame_as_condition': False, 'apply_sqrt2_fac_in_eq_lin': False},
29: {'name': 'full_model', 'model_idx': '294000_1', 'normal_maps_as_cond': True,
'rendered_flame_as_condition': True, 'apply_sqrt2_fac_in_eq_lin': True},
7: {'name': 'flm_rndr_tex_interp', 'model_idx': '051000_1', 'normal_maps_as_cond': False,
'rendered_flame_as_condition': True, 'apply_sqrt2_fac_in_eq_lin': False},
3: {'name': 'norm_mp_tex_interp', 'model_idx': '203000_1', 'normal_maps_as_cond': True,
'rendered_flame_as_condition': False, 'apply_sqrt2_fac_in_eq_lin': False},
8: {'name': 'norm_map_rend_flm_no_tex_interp', 'model_idx': '009000_1', 'normal_maps_as_cond': True,
'rendered_flame_as_condition': True, 'apply_sqrt2_fac_in_eq_lin': False},}
overlay_visualizer = OverLayViz()
# overlay_visualizer.setup_renderer(mesh_file=None)
flm_params = np.zeros((num_smpl_to_eval_on, code_size)).astype('float32')
fl_param_dict = np.load(cnst.all_flame_params_file, allow_pickle=True).item()
for i, key in enumerate(fl_param_dict):
flame_param = fl_param_dict[key]
flame_param = np.hstack((flame_param['shape'], flame_param['exp'], flame_param['pose'], flame_param['cam'],
flame_param['tex'], flame_param['lit'].flatten()))
# tz = camera_params['f'][0] / (camera_params['c'][0] * flame_param[:, 156:157])
# flame_param[:, 156:159] = np.concatenate((flame_param[:, 157:], tz), axis=1)
# import ipdb; ipdb.set_trace()
flm_params[i, :] = flame_param.astype('float32')
if i == num_smpl_to_eval_on - 1:
break
batch_size = 64
flame_decoder = overlay_visualizer.deca.flame.eval()
for run_idx in run_ids_1:
# import ipdb; ipdb.set_trace()
generator_1 = torch.nn.DataParallel(
StyledGenerator(embedding_vocab_size=69158,
rendered_flame_ascondition=settings_for_runs[run_idx]['rendered_flame_as_condition'],
normal_maps_as_cond=settings_for_runs[run_idx]['normal_maps_as_cond'],
core_tensor_res=core_tensor_res,
w_truncation_factor=1.0,
apply_sqrt2_fac_in_eq_lin=settings_for_runs[run_idx]['apply_sqrt2_fac_in_eq_lin'],
n_mlp=8)).cuda()
model_idx = settings_for_runs[run_idx]['model_idx']
ckpt1 = torch.load(f'{cnst.output_root}checkpoint/{run_idx}/{model_idx}.model')
generator_1.load_state_dict(ckpt1['generator_running'])
generator_1 = generator_1.eval()
# images = np.zeros((num_smpl_to_eval_on, 3, resolution, resolution)).astype('float32')
pbar = tqdm.tqdm(range(0, num_smpl_to_eval_on, batch_size))
pbar.set_description('Generating_images')
flame_mesh_imgs = None
mdl_id = 'mdl2_'
if settings_for_runs[run_idx]['name'] == 'full_model':
mdl_id = 'mdl1_'
for batch_idx in pbar:
flm_batch = flm_params[batch_idx:batch_idx+batch_size, :]
flm_batch = torch.from_numpy(flm_batch).cuda()
flm_batch = position_to_given_location(flame_decoder, flm_batch)
batch_size_true = flm_batch.shape[0]
if settings_for_runs[run_idx]['normal_maps_as_cond'] or \
settings_for_runs[run_idx]['rendered_flame_as_condition']:
cam = flm_batch[:, constants.DECA_IDX['cam'][0]:constants.DECA_IDX['cam'][1]:]
shape = flm_batch[:, constants.INDICES['SHAPE'][0]:constants.INDICES['SHAPE'][1]]
exp = flm_batch[:, constants.INDICES['EXP'][0]:constants.INDICES['EXP'][1]]
pose = flm_batch[:, constants.INDICES['POSE'][0]:constants.INDICES['POSE'][1]]
# import ipdb; ipdb.set_trace()
light_code = \
flm_batch[:, constants.DECA_IDX['lit'][0]:constants.DECA_IDX['lit'][1]:].view((batch_size_true, 9, 3))
texture_code = flm_batch[:, constants.DECA_IDX['tex'][0]:constants.DECA_IDX['tex'][1]:]
norma_map_img, _, _, _, rend_flm = \
overlay_visualizer.get_rendered_mesh(flame_params=(shape, exp, pose, light_code, texture_code),
camera_params=cam)
rend_flm = torch.clamp(rend_flm, 0, 1) * 2 - 1
norma_map_img = torch.clamp(norma_map_img, 0, 1) * 2 - 1
rend_flm = fast_image_reshape(rend_flm, height_out=256, width_out=256, mode='bilinear')
norma_map_img = fast_image_reshape(norma_map_img, height_out=256, width_out=256, mode='bilinear')
else:
rend_flm = None
norma_map_img = None
gen_1_in = ge_gen_in(flm_batch, rend_flm, norma_map_img, settings_for_runs[run_idx]['normal_maps_as_cond'],
settings_for_runs[run_idx]['rendered_flame_as_condition'])
# torch.manual_seed(2)
identity_embeddings = torch.randint(low=0, high=69158, size=(gen_1_in.shape[0], ), dtype=torch.long,
device='cuda')
mdl_1_gen_images = generic_utils.get_images_from_flame_params(
flame_params=gen_1_in.cpu().numpy(), pose=None,
model=generator_1,
step=step_max, alpha=alpha,
input_indices=identity_embeddings.cpu().numpy())
# import ipdb; ipdb.set_trace()
images = torch.clamp(mdl_1_gen_images, -1, 1).cpu().numpy()
flame_mesh_imgs = torch.clamp(rend_flm, -1, 1).cpu().numpy()
save_path_current_id = os.path.join(root_out_dir, 'inter_model_comparison', settings_for_runs[run_idx]['name'])
save_set_of_images(path=save_path_current_id, prefix=f'{mdl_id}_{batch_idx}',
images=(images + 1) / 2, show_prog_bar=True)
#save flam rndr
save_path_current_id_flm_rndr = os.path.join(root_out_dir, 'inter_model_comparison',
settings_for_runs[run_idx]['name'])
save_set_of_images(path=save_path_current_id_flm_rndr, prefix=f'mesh_{batch_idx}',
images=(flame_mesh_imgs + 1) / 2, show_prog_bar=True)
# save_set_of_images(path=save_path_this_expt, prefix='mesh_', images=((norma_map_img + 1) / 2).cpu().numpy())
# save_set_of_images(path=save_path_this_expt, prefix='mdl1_', images=((mdl_1_gen_images + 1) / 2).cpu().numpy())
# save_set_of_images(path=save_path_this_expt, prefix='mdl2_', images=((mdl_2_gen_images + 1) / 2).cpu().numpy()) | [((2, 0, 2, 25), 'sys.path.append', 'sys.path.append', ({(2, 16, 2, 24): '"""../../"""'}, {}), "('../../')", False, 'import sys\n'), ((45, 8, 45, 29), 'numpy.array', 'np.array', ({(45, 17, 45, 28): '[0.0, 0.0, 0]'}, {}), '([0.0, 0.0, 0])', True, 'import numpy as np\n'), ((46, 16, 46, 72), 'my_utils.flm_dynamic_fit_overlay.camera_ringnetpp', 'camera_ringnetpp', (), '', False, 'from my_utils.flm_dynamic_fit_overlay import camera_ringnetpp\n'), ((65, 21, 65, 33), 'my_utils.visualize_flame_overlay.OverLayViz', 'OverLayViz', ({}, {}), '()', False, 'from my_utils.visualize_flame_overlay import OverLayViz\n'), ((96, 12, 96, 83), 'torch.load', 'torch.load', ({(96, 23, 96, 82): 'f"""{cnst.output_root}checkpoint/{run_idx}/{model_idx}.model"""'}, {}), "(f'{cnst.output_root}checkpoint/{run_idx}/{model_idx}.model')", False, 'import torch\n'), ((23, 15, 23, 58), 'torch.cat', 'torch.cat', (), '', False, 'import torch\n'), ((39, 15, 39, 34), 'numpy.log2', 'np.log2', ({(39, 23, 39, 33): 'resolution'}, {}), '(resolution)', True, 'import numpy as np\n'), ((68, 13, 68, 55), 'numpy.zeros', 'np.zeros', ({(68, 22, 68, 54): '(num_smpl_to_eval_on, code_size)'}, {}), '((num_smpl_to_eval_on, code_size))', True, 'import numpy as np\n'), ((69, 16, 69, 70), 'numpy.load', 'np.load', (), '', True, 'import numpy as np\n'), ((111, 20, 111, 72), 'my_utils.eye_centering.position_to_given_location', 'position_to_given_location', ({(111, 47, 111, 60): 'flame_decoder', (111, 62, 111, 71): 'flm_batch'}, {}), '(flame_decoder, flm_batch)', False, 'from my_utils.eye_centering import position_to_given_location\n'), ((140, 30, 141, 58), 'torch.randint', 'torch.randint', (), '', False, 'import torch\n'), ((153, 31, 153, 119), 'os.path.join', 'os.path.join', ({(153, 44, 153, 56): 'root_out_dir', (153, 58, 153, 82): '"""inter_model_comparison"""', (153, 84, 153, 118): "settings_for_runs[run_idx]['name']"}, {}), "(root_out_dir, 'inter_model_comparison', settings_for_runs[\n run_idx]['name'])", False, 'import os\n'), ((154, 8, 155, 71), 'my_utils.generic_utils.save_set_of_images', 'save_set_of_images', (), '', False, 'from my_utils.generic_utils import save_set_of_images\n'), ((158, 40, 159, 88), 'os.path.join', 'os.path.join', ({(158, 53, 158, 65): 'root_out_dir', (158, 67, 158, 91): '"""inter_model_comparison"""', (159, 53, 159, 87): "settings_for_runs[run_idx]['name']"}, {}), "(root_out_dir, 'inter_model_comparison', settings_for_runs[\n run_idx]['name'])", False, 'import os\n'), ((160, 8, 161, 80), 'my_utils.generic_utils.save_set_of_images', 'save_set_of_images', (), '', False, 'from my_utils.generic_utils import save_set_of_images\n'), ((129, 23, 129, 99), 'dataset_loaders.fast_image_reshape', 'fast_image_reshape', (), '', False, 'from dataset_loaders import fast_image_reshape\n'), ((130, 28, 130, 109), 'dataset_loaders.fast_image_reshape', 'fast_image_reshape', (), '', False, 'from dataset_loaders import fast_image_reshape\n'), ((88, 8, 94, 32), 'model.stg2_generator.StyledGenerator', 'StyledGenerator', (), '', False, 'from model.stg2_generator import StyledGenerator\n'), ((110, 20, 110, 47), 'torch.from_numpy', 'torch.from_numpy', ({(110, 37, 110, 46): 'flm_batch'}, {}), '(flm_batch)', False, 'import torch\n'), ((127, 23, 127, 50), 'torch.clamp', 'torch.clamp', ({(127, 35, 127, 43): 'rend_flm', (127, 45, 127, 46): '(0)', (127, 48, 127, 49): '(1)'}, {}), '(rend_flm, 0, 1)', False, 'import torch\n'), ((128, 28, 128, 60), 'torch.clamp', 'torch.clamp', ({(128, 40, 128, 53): 'norma_map_img', (128, 55, 128, 56): '(0)', (128, 58, 128, 59): '(1)'}, {}), '(norma_map_img, 0, 1)', False, 'import torch\n'), ((149, 17, 149, 53), 'torch.clamp', 'torch.clamp', ({(149, 29, 149, 45): 'mdl_1_gen_images', (149, 47, 149, 49): '-1', (149, 51, 149, 52): '1'}, {}), '(mdl_1_gen_images, -1, 1)', False, 'import torch\n'), ((150, 26, 150, 54), 'torch.clamp', 'torch.clamp', ({(150, 38, 150, 46): 'rend_flm', (150, 48, 150, 50): '-1', (150, 52, 150, 53): '1'}, {}), '(rend_flm, -1, 1)', False, 'import torch\n')] |
philippWassibauer/django-activity-stream | setup.py | 766a372aea4803ef5fe051a5de16dde5b5efcc72 |
from distutils.core import setup
""" django-activity-stream instalation script """
setup(
name = 'activity_stream',
description = 'generic activity feed system for users',
author = 'Philipp Wassibauer',
author_email = '[email protected]',
url='http://github.com/philippWassibauer/django-activity-stream',
download_url='http://github.com/philippWassibauer/django-activity-stream/tarball/master',
license='MIT',
version = __import__('activity_stream').__version__,
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
| [] |
study-machine-learning/dongheon.shin | 02/selenium.02.py | 6103ef9c73b162603bc39a27e4ecca0f1ac35e57 | from selenium import webdriver
username = "henlix"
password = "my_password"
browser = webdriver.PhantomJS()
browser.implicitly_wait(5)
url_login = "https://nid.naver.com/nidlogin.login"
browser.get(url_login)
el = browser.find_element_by_id("id")
el.clear()
el.send_keys(username)
el = browser.find_element_by_id("pw")
el.clear()
el.send_keys(password)
form = browser.find_element_by_css_selector("input.btn_global[type=submit]")
form.submit()
url_shopping_list = "https://order.pay.naver.com/home?tabMenu=SHOPPING"
browser.get(url_shopping_list)
products = browser.find_elements_by_css_selector(".p_info span")
for product in products:
print("- ", product.text)
# PYTHONIOENCODING=utf-8:surrogateescape python3 selenium.02.py
| [((6, 10, 6, 31), 'selenium.webdriver.PhantomJS', 'webdriver.PhantomJS', ({}, {}), '()', False, 'from selenium import webdriver\n')] |
Licas/datascienceexamples | visualization/matplotlib/barwitherror.py | cbb1293dbae875cb3f166dbde00b2ab629a43ece | from matplotlib import pyplot as plt
drinks = ["cappuccino", "latte", "chai", "americano", "mocha", "espresso"]
ounces_of_milk = [6, 9, 4, 0, 9, 0]
error = [0.6, 0.9, 0.4, 0, 0.9, 0]
#Yerr -> element at i position represents +/- error[i] variance on bar[i] value
plt.bar( range(len(drinks)),ounces_of_milk, yerr=error, capsize=15)
plt.show() | [((9, 0, 9, 10), 'matplotlib.pyplot.show', 'plt.show', ({}, {}), '()', True, 'from matplotlib import pyplot as plt\n')] |
matan-xmcyber/content | Packs/mnemonicMDR/Integrations/ArgusManagedDefence/ArgusManagedDefence.py | 7f02301c140b35956af3cd20cb8dfc64f34afb3e | import demistomock as demisto
from CommonServerPython import *
""" IMPORTS """
import json
import urllib3
import dateparser
import traceback
from typing import Any, Dict, List, Union
import logging
from argus_api import session as argus_session
from argus_api.api.currentuser.v1.user import get_current_user
from argus_api.api.cases.v2.case import (
add_case_tag,
add_comment,
advanced_case_search,
close_case,
create_case,
delete_case,
delete_comment,
download_attachment,
edit_comment,
get_attachment,
get_case_metadata_by_id,
list_case_attachments,
list_case_tags,
list_case_comments,
remove_case_tag_by_id,
remove_case_tag_by_key_value,
update_case,
)
from argus_api.api.events.v1 import get_event_by_path
from argus_api.api.events.v1.case.case import get_events_for_case
from argus_api.api.events.v1.aggregated import (
find_aggregated_events,
list_aggregated_events,
)
from argus_api.api.events.v1.payload import get_payload
from argus_api.api.events.v1.pcap import get_pcap
from argus_api.api.events.v1.nids import find_n_i_d_s_events, list_n_i_d_s_events
from argus_api.api.pdns.v3.search import search_records
from argus_api.api.reputation.v1.observation import (
fetch_observations_for_domain,
fetch_observations_for_i_p,
)
# Disable insecure warnings
urllib3.disable_warnings()
""" CONSTANTS """
DATE_FORMAT = "%Y-%m-%dT%H:%M:%SZ"
PRETTY_DATE_FORMAT = "%b %d, %Y, %H:%M:%S"
FETCH_TAG = demisto.params().get("fetch_tag")
""" HELPER FUNCTIONS """
def set_argus_settings(
api_key: str, base_url: str = None, proxies: dict = None, verify: bool = None
):
argus_session.api_key = api_key
argus_session.base_url = base_url
argus_session.proxies = proxies
argus_session.verify = verify
def argus_priority_to_demisto_severity(priority: str) -> int:
mapping = {"low": 1, "medium": 2, "high": 3, "critical": 4}
return mapping.get(priority, 0)
def argus_status_to_demisto_status(status: str) -> int:
mapping = {
"pendingCustomer": 0,
"pendingSoc": 0,
"pendingVendor": 0,
"pendingClose": 0,
"workingSoc": 1,
"workingCustomer": 1,
"closed": 2,
}
return mapping.get(status, 0)
def build_argus_priority_from_min_severity(min_severity: str) -> List[str]:
severities = ["low", "medium", "high", "critical"]
min_severity_list = []
for severity in severities:
if argus_priority_to_demisto_severity(
min_severity.lower()
) <= argus_priority_to_demisto_severity(severity):
min_severity_list.append(severity)
return min_severity_list
def parse_first_fetch(first_fetch: Any) -> Any:
if isinstance(first_fetch, str):
if first_fetch[0] != "-":
first_fetch = f"-{first_fetch}"
return first_fetch
def build_tags_from_list(lst: list) -> List[Dict]:
if not lst:
return []
if len(lst) % 2 != 0:
return []
tags = []
for i in range(0, len(lst), 2):
tags.append({"key": lst[i], "value": lst[i + 1]})
return tags
def str_to_dict(string: str) -> dict:
if not string:
return {}
lst = argToList(string)
if len(lst) % 2 != 0:
return {}
return {lst[i]: lst[i + 1] for i in range(0, len(lst), 2)}
def date_time_to_epoch_milliseconds(date_time: Union[datetime, str] = None) -> int:
if isinstance(date_time, datetime):
return int(date_time.timestamp() * 1000)
if isinstance(date_time, str):
return date_time_to_epoch_milliseconds(dateparser.parse(date_time))
return int(datetime.now().timestamp() * 1000)
def pretty_print_date(date_time: Union[datetime, str] = None) -> str:
if isinstance(date_time, datetime):
return date_time.strftime(PRETTY_DATE_FORMAT)
if isinstance(date_time, str):
return pretty_print_date(dateparser.parse(date_time))
return datetime.now().strftime(PRETTY_DATE_FORMAT)
def pretty_print_case_metadata(result: dict, title: str = None) -> str:
data = result["data"]
string = title if title else f"# #{data['id']}: {data['subject']}\n"
string += "_Priority: {}, status: {}, last updated: {}_\n".format(
data["priority"], data["status"], pretty_print_date(data["lastUpdatedTime"])
)
string += "Reported by {} at {}\n\n".format(
data["publishedByUser"]["name"], pretty_print_date(data["publishedTime"])
)
string += data["description"]
return string
def pretty_print_comment(comment: dict, title: str = None) -> str:
string = title if title else ""
string += f"#### *{comment['addedByUser']['userName']} - {pretty_print_date(comment['addedTime'])}*\n"
string += (
f"_Last updated {pretty_print_date(comment['lastUpdatedTime'])}_\n"
if comment["lastUpdatedTime"]
else ""
)
string += f"{comment['comment']}\n\n"
string += f"_id: {comment['id']}_\n"
string += f"_Flags: {str(comment['flags'])}_\n" if comment["flags"] else ""
string += "* * *\n"
return string
def pretty_print_comments(comments: list, title: str = None) -> str:
string = title if title else ""
for comment in comments:
string += pretty_print_comment(comment)
return string
def pretty_print_events(result: dict, title: str = None) -> str:
string = title if title else ""
string += "_Count: {}, showing {} events, from {} to {}_\n".format(
result["count"], result["size"], result["offset"], result["limit"]
)
string += tableToMarkdown("Events", result["data"])
return string
""" COMMAND FUNCTIONS """
def test_module_command() -> str:
response = get_current_user()
if response["responseCode"] == 200:
return "ok"
return (
f"Unable to communicate with Argus API {response['responseCode']}, {response}"
)
def fetch_incidents(
last_run: dict, first_fetch_period: str, limit: int = 25, min_severity: str = "low"
):
start_timestamp = last_run.get("start_time", None) if last_run else None
# noinspection PyTypeChecker
result = advanced_case_search(
startTimestamp=start_timestamp if start_timestamp else first_fetch_period,
endTimestamp="now",
limit=limit,
sortBy=["createdTimestamp"],
priority=build_argus_priority_from_min_severity(min_severity),
subCriteria=[
{"exclude": True, "status": ["closed"]},
],
timeFieldStrategy=["createdTimestamp"],
)
incidents = []
for case in result["data"]:
incidents.append(
{
"name": f"#{case['id']}: {case['subject']}",
"occurred": case["createdTime"],
"severity": argus_priority_to_demisto_severity(case["priority"]),
"status": argus_status_to_demisto_status(case["status"]),
"details": case["description"],
"customFields": {
"argus_id": str(case["id"]),
"type": case["type"],
"category": case["category"]["name"] if case["category"] else None,
"service": case["service"]["name"],
"lastUpdatedTime": case["lastUpdatedTime"],
"createdTimestamp": case["createdTimestamp"],
"customer": case["customer"]["shortName"],
},
"rawJSON": json.dumps(case),
}
)
if result["data"]:
last_run["start_time"] = str(result["data"][-1]["createdTimestamp"] + 1)
return last_run, incidents
def add_case_tag_command(args: Dict[str, Any]) -> CommandResults:
case_id = args.get("case_id", None)
key = args.get("key", None)
value = args.get("value", None)
if not case_id:
raise ValueError("case_id not specified")
if not key:
raise ValueError("key not specified")
if not value:
raise ValueError("value not specified")
tag = {"key": key, "value": value}
result = add_case_tag(caseID=case_id, tags=tag)
headers = ["key", "value", "addedTime"]
readable_output = tableToMarkdown(
f"#{case_id}: Tags", result["data"], headers=headers
)
return CommandResults(
readable_output=readable_output,
outputs_prefix="Argus.Tags",
outputs=result,
raw_response=result,
)
def add_comment_command(args: Dict[str, Any]) -> CommandResults:
case_id = args.get("case_id", None)
comment = args.get("comment", None)
if not case_id:
raise ValueError("case_id not specified")
if not comment:
raise ValueError("comment not specified")
result = add_comment(
caseID=case_id,
comment=comment,
asReplyTo=args.get("as_reply_to", None),
internal=args.get("internal", None),
originEmailAddress=args.get("origin_email_address", None),
associatedAttachmentID=args.get("associated_attachment_id", None),
)
return CommandResults(
readable_output=pretty_print_comment(
result["data"], f"# #{case_id}: Added comment\n"
),
outputs_prefix="Argus.Comment",
outputs=result,
raw_response=result,
)
def advanced_case_search_command(args: Dict[str, Any]) -> CommandResults:
# noinspection PyTypeChecker
result = advanced_case_search(
startTimestamp=args.get("start_timestamp", None),
endTimestamp=args.get("end_timestamp", None),
limit=args.get("limit", None),
offset=args.get("offset", None),
includeDeleted=args.get("include_deleted", None),
subCriteria=argToList(args.get("sub_criteria", None)),
exclude=args.get("exclude", None),
required=args.get("required", None),
customerID=argToList(args.get("customer_id", None)),
caseID=argToList(args.get("case_id", None)),
customer=argToList(args.get("customer", None)),
type=argToList(args.get("case_type", None)),
service=argToList(args.get("service", None)),
category=argToList(args.get("category", None)),
status=argToList(args.get("status", None)),
priority=argToList(args.get("priority", None)),
assetID=argToList(args.get("asset_id", None)),
tag=argToList(args.get("tag", None)),
workflow=argToList(args.get("workflow", None)),
field=argToList(args.get("field", None)),
keywords=argToList(args.get("keywords", None)),
timeFieldStrategy=argToList(args.get("time_field_strategy", None)),
timeMatchStrategy=args.get("time_match_strategy", None),
keywordFieldStrategy=argToList(args.get("keyword_field_strategy", None)),
keywordMatchStrategy=args.get("keyword_match_strategy", None),
user=argToList(args.get("user", None)),
userFieldStrategy=argToList(args.get("user_field_strategy", None)),
userAssigned=args.get("user_assigned", None),
techAssigned=args.get("tech_assigned", None),
includeWorkflows=args.get("include_workflows", None),
includeDescription=args.get("include_description", None),
accessMode=argToList(args.get("access_mode", None)),
explicitAccess=argToList(args.get("explicit_access", None)),
sortBy=argToList(args.get("sort_by", None)),
includeFlags=argToList(args.get("include_flags", None)),
excludeFlags=argToList(args.get("exclude_flags", None)),
)
readable_output = f"Advanced Case Search: {result['count']} result(s)\n"
readable_output += tableToMarkdown(
"Output not suitable for playground", result["data"]
)
return CommandResults(
readable_output=readable_output,
outputs_prefix="Argus.Cases",
outputs=result,
raw_response=result,
)
def close_case_command(args: Dict[str, Any]) -> CommandResults:
case_id = args.get("case_id", None)
if not case_id:
raise ValueError("case_id not specified")
result = close_case(
caseID=case_id,
comment=args.get("comment", None),
)
readable_output = f"# #{case_id}: close case\n"
readable_output += (
f"_Status: {result['data']['status']}, at: {result['data']['closedTime']}_"
)
return CommandResults(
readable_output=readable_output,
outputs_prefix="Argus.Case",
outputs=result,
raw_response=result,
)
def create_case_command(args: Dict[str, Any]) -> CommandResults:
subject = args.get("subject", None)
description = args.get("description", None)
service = args.get("service", None)
case_type = args.get("type", None)
tags = args.get("tags", None)
if not subject:
raise ValueError("subject not specified")
if not description:
raise ValueError("description not specified")
if not service:
raise ValueError("service not specified")
if not case_type:
raise ValueError("case_type not specified")
if tags:
tags = str(tags).split(",")
if len(tags) % 2 != 0:
raise ValueError("tags list must be of even number", tags)
tags = build_tags_from_list(tags)
result = create_case(
customer=args.get("customer", None),
service=service,
category=args.get("category", None),
type=case_type,
status=args.get("status", None),
tags=tags,
subject=subject,
description=description,
customerReference=args.get("customer_reference", None),
priority=args.get("priority", None),
accessMode=args.get("access_mode", None),
originEmailAddress=args.get("origin_email_address", None),
publish=args.get("publish", None),
defaultWatchers=args.get("default_watchers", None),
)
return CommandResults(
readable_output=pretty_print_case_metadata(result),
outputs_prefix="Argus.Case",
outputs=result,
raw_response=result,
)
def delete_case_command(args: Dict[str, Any]) -> CommandResults:
case_id = args.get("case_id", None)
if not case_id:
raise ValueError("case id not specified")
result = delete_case(caseID=case_id)
return CommandResults(
readable_output=pretty_print_case_metadata(result, "Case deleted"),
outputs_prefix="Argus.Case",
outputs=result,
raw_response=result,
)
def delete_comment_command(args: Dict[str, Any]) -> CommandResults:
case_id = args.get("case_id", None)
comment_id = args.get("comment_id", None)
if not case_id:
raise ValueError("case id not specified")
if not comment_id:
raise ValueError("comment id not specified")
result = delete_comment(caseID=case_id, commentID=comment_id)
return CommandResults(
readable_output=pretty_print_comment(
result["data"], f"# #{case_id}: Deleted comment\n"
),
outputs_prefix="Argus.Comment",
outputs=result,
raw_response=result,
)
def download_attachment_command(args: Dict[str, Any]) -> Any:
case_id = args.get("case_id", None)
attachment_id = args.get("attachment_id", None)
if not case_id:
raise ValueError("case id not specified")
if not attachment_id:
raise ValueError("attachment id not specified")
result = download_attachment(caseID=case_id, attachmentID=attachment_id)
return fileResult(attachment_id, result.content)
def edit_comment_command(args: Dict[str, Any]) -> CommandResults:
case_id = args.get("case_id", None)
comment_id = args.get("comment_id", None)
comment = args.get("comment", None)
if not case_id:
raise ValueError("case id not specified")
if not comment_id:
raise ValueError("comment id not specified")
if not comment:
raise ValueError("comment not specified")
result = edit_comment(caseID=case_id, commentID=comment_id, comment=comment)
return CommandResults(
readable_output=pretty_print_comment(
result["data"], f"# #{case_id}: Updated comment\n"
),
outputs_prefix="Argus.Comment",
outputs=result,
raw_response=result,
)
def get_attachment_command(args: Dict[str, Any]) -> CommandResults:
case_id = args.get("case_id", None)
attachment_id = args.get("attachment_id", None)
if not case_id:
raise ValueError("case id not specified")
if not attachment_id:
raise ValueError("attachment id not specified")
result = get_attachment(caseID=case_id, attachmentID=attachment_id)
readable_output = f"# #{case_id}: attachment metadata\n"
readable_output += f"#### *{result['data']['addedByUser']['userName']} - {result['data']['addedTime']}*\n"
readable_output += f"{result['data']['name']} ({result['data']['mimeType']}, {result['data']['size']} bytes)\n\n"
readable_output += f"_id: {result['data']['id']}_\n"
return CommandResults(
readable_output=readable_output,
outputs_prefix="Argus.Attachments",
outputs=result,
raw_response=result,
)
def get_case_metadata_by_id_command(args: Dict[str, Any]) -> CommandResults:
case_id = args.get("case_id", None)
if not case_id:
raise ValueError("case id not specified")
result = get_case_metadata_by_id(
id=case_id, skipRedirect=args.get("skip_redirect", None)
)
return CommandResults(
readable_output=pretty_print_case_metadata(result),
outputs_prefix="Argus.Case",
outputs=result,
raw_response=result,
)
def list_case_attachments_command(args: Dict[str, Any]) -> CommandResults:
case_id = args.get("case_id", None)
if not case_id:
raise ValueError("case_id not specified")
result = list_case_attachments(
caseID=case_id, limit=args.get("limit", None), offset=args.get("offset", None)
)
readable_output = f"# #{case_id}: Case attachments\n"
for attachment in result["data"]:
readable_output += f"#### *{attachment['addedByUser']['userName']} - {attachment['addedTime']}*\n"
readable_output += f"{attachment['name']} ({attachment['mimeType']}, {attachment['size']} kb)\n\n"
readable_output += f"_id: {attachment['id']}_\n"
readable_output += "* * *\n"
return CommandResults(
readable_output=readable_output,
outputs_prefix="Argus.Attachments",
outputs=result,
raw_response=result,
)
def list_case_tags_command(args: Dict[str, Any]) -> CommandResults:
case_id = args.get("case_id", None)
if not case_id:
raise ValueError("case_id not specified")
result = list_case_tags(
caseID=case_id, limit=args.get("limit", None), offset=args.get("offset", None)
)
headers = ["key", "value", "addedTime", "id"]
readable_output = tableToMarkdown(
f"#{case_id}: Tags", result["data"], headers=headers
)
return CommandResults(
readable_output=readable_output,
outputs_prefix="Argus.Tags",
outputs=result,
raw_response=result,
)
def list_case_comments_command(args: Dict[str, Any]) -> CommandResults:
case_id = args.get("case_id", None)
sort_by = args.get("sort_by", None)
if not case_id:
raise ValueError("case_id not specified")
if sort_by:
sort_by = ["addedTimestamp"] if sort_by == "ascending" else ["-addedTimestamp"]
result = list_case_comments(
caseID=case_id,
beforeComment=args.get("before_comment", None),
afterComment=args.get("after_comment", None),
offset=args.get("offset", None),
limit=args.get("limit", None),
sortBy=sort_by,
)
return CommandResults(
readable_output=pretty_print_comments(
result["data"], f"# #{case_id}: Comments\n"
),
outputs_prefix="Argus.Comments",
outputs=result,
raw_response=result,
)
def remove_case_tag_by_id_command(args: Dict[str, Any]) -> CommandResults:
case_id = args.get("case_id", None)
tag_id = args.get("tag_id", None)
if not case_id:
raise ValueError("case id not specified")
if not tag_id:
raise ValueError("tag id not specified")
result = remove_case_tag_by_id(caseID=case_id, tagID=tag_id)
headers = ["key", "value", "addedTime", "id", "flags"]
readable_output = tableToMarkdown(
f"#{case_id}: Delete tags", result["data"], headers=headers
)
return CommandResults(
readable_output=readable_output,
outputs_prefix="Argus.Tags",
outputs=result,
raw_response=result,
)
def remove_case_tag_by_key_value_command(args: Dict[str, Any]) -> CommandResults:
case_id = args.get("case_id", None)
key = args.get("key", None)
value = args.get("value", None)
if not case_id:
raise ValueError("case id not specified")
if not key:
raise ValueError("key not specified")
if not value:
raise ValueError("value not specified")
result = remove_case_tag_by_key_value(caseID=case_id, tagKey=key, tagValue=value)
headers = ["key", "value", "addedTime", "id", "flags"]
readable_output = tableToMarkdown(
f"#{case_id}: Delete tags", result["data"], headers=headers
)
return CommandResults(
readable_output=readable_output,
outputs_prefix="Argus.Tags",
outputs=result,
raw_response=result,
)
def update_case_command(args: Dict[str, Any]) -> CommandResults:
case_id = args.get("case_id", None)
if not case_id:
raise ValueError("case id not specified")
result = update_case(
id=case_id,
subject=args.get("subject", None),
description=args.get("description", None),
status=args.get("status", None),
priority=args.get("priority", None),
category=args.get("category", None),
reporter=args.get("reporter", None),
assignedUser=args.get("assigned_user", None),
assignedTech=args.get("assigned_tech", None),
customerReference=args.get("customer_reference", None),
comment=args.get("comment", None),
originEmailAddress=args.get("origin_email_address", None),
hasEvents=args.get("has_events", None),
internalComment=args.get("internal_comment", None),
)
return CommandResults(
readable_output=pretty_print_case_metadata(result),
outputs_prefix="Argus.Case",
outputs=result,
raw_response=result,
)
def get_event_command(args: Dict[str, Any]) -> CommandResults:
event_type = args.get("type", None)
timestamp = args.get("timestamp", None)
customer_id = args.get("customer_id", None)
event_id = args.get("event_id", None)
if not event_type:
raise ValueError("event type not specified")
if not timestamp:
raise ValueError("timestamp not specified")
if not customer_id:
raise ValueError("customer id not specified")
if not event_id:
raise ValueError("event id not specified")
result = get_event_by_path(
type=event_type, timestamp=timestamp, customerID=customer_id, eventID=event_id
)
return CommandResults(
readable_output=tableToMarkdown(f"Event: {event_id}", result["data"]),
outputs_prefix="Argus.Event",
outputs=result,
raw_response=result,
)
def get_events_for_case_command(args: Dict[str, Any]) -> CommandResults:
case_id = args.get("case_id", None)
if not case_id:
raise ValueError("case id not specified")
result = get_events_for_case(
caseID=case_id, limit=args.get("limit", None), offset=args.get("offset", None)
)
return CommandResults(
readable_output=pretty_print_events(
dict(result), f"# #{case_id}: Associated Events\n"
),
outputs_prefix="Argus.Events",
outputs=result,
raw_response=result,
)
def find_aggregated_events_command(args: Dict[str, Any]) -> CommandResults:
# noinspection PyTypeChecker
result = find_aggregated_events(
skipFutureEvents=args.get("skip_future_events", None),
exclude=args.get("exclude", None),
locationID=argToList(args.get("location_id", None)),
severity=argToList(args.get("severity", None)),
customer=argToList(args.get("customer", None)),
alarmID=argToList(args.get("alarm_id", None)),
attackCategoryID=argToList(args.get("attack_category_id", None)),
sourceGeoCountry=argToList(args.get("source_geo_country", None)),
destinationGeoCountry=argToList(args.get("destination_geo_country", None)),
geoCountry=argToList(args.get("geo_country", None)),
properties=str_to_dict(args.get("properties", None)),
exactMatchProperties=args.get("exact_match_properties", None),
subCriteria=argToList(args.get("sub_criteria", None)),
signature=argToList(args.get("signature", None)),
lastUpdatedTimestamp=args.get("last_updated_timestamp", None),
indexStartTime=args.get("index_start_time", None),
indexEndTime=args.get("index_end_time", None),
destinationIP=argToList(args.get("destination_ip", None)),
sourceIP=argToList(args.get("source_ip", None)),
ip=argToList(args.get("ip", None)),
destinationPort=argToList(args.get("destination_port", None)),
sourcePort=argToList(args.get("source_port", None)),
port=argToList(args.get("port", None)),
minSeverity=args.get("min_severity", None),
maxSeverity=args.get("max_severity", None),
limit=args.get("limit", 25),
offset=args.get("offset", None),
includeDeleted=args.get("include_deleted", None),
minCount=args.get("min_count", None),
associatedCaseID=argToList(args.get("associated_case_id", None)),
sourceIPMinBits=args.get("source_ip_min_bits", None),
destinationIPMinBits=args.get("destination_ip_min_bits", None),
startTimestamp=args.get("start_timestamp", "-24hours"),
endTimestamp=args.get("end_timestamp", "now"),
sortBy=argToList(args.get("sort_by", None)),
includeFlags=argToList(args.get("include_flags", None)),
excludeFlags=argToList(args.get("exclude_flags", None)),
)
return CommandResults(
readable_output=pretty_print_events(dict(result), "# Find events\n"),
outputs_prefix="Argus.Events",
outputs=result,
raw_response=result,
)
def list_aggregated_events_command(args: Dict[str, Any]) -> CommandResults:
result = list_aggregated_events(
customerID=args.get("customer_id", None),
signature=args.get("signature", None),
ip=args.get("ip", None),
startTimestamp=args.get("start_timestamp", None),
endTimestamp=args.get("end_timestamp", None),
limit=args.get("limit", None),
offset=args.get("offset", None),
)
return CommandResults(
readable_output=pretty_print_events(dict(result), "# List Events\n"),
outputs_prefix="Argus.Events",
outputs=result,
raw_response=result,
)
def get_payload_command(args: Dict[str, Any]) -> CommandResults:
event_type = args.get("type", None)
timestamp = args.get("timestamp", None)
customer_id = args.get("customer_id", None)
event_id = args.get("event_id", None)
if not event_type:
raise ValueError("event type not specified")
if not timestamp:
raise ValueError("timestamp not specified")
if not customer_id:
raise ValueError("customer id not specified")
if not event_id:
raise ValueError("event id not specified")
result = get_payload(
type=event_type, timestamp=timestamp, customerID=customer_id, eventID=event_id
)
readable_output = "# Event payload\n"
readable_output += f"Event: {event_id}, type: {result['data']['type']}\n"
readable_output += result["data"]["payload"]
return CommandResults(
readable_output=readable_output,
outputs_prefix="Argus.Payload",
outputs=result,
raw_response=result,
)
def get_pcap_command(args: Dict[str, Any]) -> Any:
event_type = args.get("type", None)
timestamp = args.get("timestamp", None)
customer_id = args.get("customer_id", None)
event_id = args.get("event_id", None)
if not event_type:
raise ValueError("event type not specified")
if not timestamp:
raise ValueError("timestamp not specified")
if not customer_id:
raise ValueError("customer id not specified")
if not event_id:
raise ValueError("event id not specified")
result = get_pcap(
type=event_type, timestamp=timestamp, customerID=customer_id, eventID=event_id
)
return fileResult(f"{event_id}_pcap", result.content)
def find_nids_events_command(args: Dict[str, Any]) -> CommandResults:
# noinspection PyTypeChecker
result = find_n_i_d_s_events(
skipFutureEvents=args.get("skip_future_events", None),
exclude=args.get("exclude", None),
eventIdentifier=argToList(args.get("event_identifier", None)),
locationID=argToList(args.get("location_id", None)),
severity=argToList(args.get("severity", None)),
customer=argToList(args.get("customer", None)),
alarmID=argToList(args.get("alarm_id", None)),
attackCategoryID=argToList(args.get("attack_category_id", None)),
sourceGeoCountry=argToList(args.get("source_geo_country", None)),
destinationGeoCountry=argToList(args.get("destination_geo_country", None)),
geoCountry=argToList(args.get("geo_country", None)),
properties=str_to_dict(args.get("properties", None)),
exactMatchProperties=args.get("exact_match_properties", None),
sensorID=argToList(args.get("sensor_id", None)),
subCriteria=argToList(args.get("sub_criteria", None)),
signature=argToList(args.get("signature", None)),
lastUpdatedTimestamp=args.get("last_updated_timestamp", None),
indexStartTime=args.get("index_start_time", None),
indexEndTime=args.get("index_end_time", None),
destinationIP=argToList(args.get("destination_ip", None)),
sourceIP=argToList(args.get("source_ip", None)),
ip=argToList(args.get("ip", None)),
destinationPort=argToList(args.get("destination_port", None)),
sourcePort=argToList(args.get("source_port", None)),
port=argToList(args.get("port", None)),
minSeverity=args.get("min_severity", None),
maxSeverity=args.get("max_severity", None),
limit=args.get("limit", 25),
offset=args.get("offset", None),
includeDeleted=args.get("include_deleted", None),
startTimestamp=args.get("start_timestamp", "-24hours"),
endTimestamp=args.get("end_timestamp", "now"),
sortBy=argToList(args.get("sort_by", None)),
includeFlags=argToList(args.get("include_flags", None)),
excludeFlags=argToList(args.get("exclude_flags", None)),
)
return CommandResults(
readable_output=pretty_print_events(dict(result), "# Find NIDS Events\n"),
outputs_prefix="Argus.NIDS",
outputs=result,
raw_response=result,
)
def list_nids_events_command(args: Dict[str, Any]) -> CommandResults:
result = list_n_i_d_s_events(
customerID=args.get("customer_id", None),
signature=args.get("signature", None),
ip=args.get("ip", None),
startTimestamp=args.get("start_timestamp", None),
endTimestamp=args.get("end_timestamp", None),
limit=args.get("limit", None),
offset=args.get("offset", None),
)
return CommandResults(
readable_output=pretty_print_events(dict(result), "# List NIDS Events\n"),
outputs_prefix="Argus.NIDS",
outputs=result,
raw_response=result,
)
def search_records_command(args: Dict[str, Any]) -> CommandResults:
query = args.get("query", None)
if not query:
raise ValueError("query not specified")
# noinspection PyTypeChecker
result = search_records(
query=query,
aggregateResult=args.get("aggregate_result", None),
includeAnonymousResults=args.get("include_anonymous_results", None),
rrClass=argToList(args.get("rr_class", None)),
rrType=argToList(args.get("rr_type", None)),
customerID=argToList(args.get("customer_id", None)),
tlp=argToList((args.get("tlp", None))),
limit=args.get("limit", 25),
offset=args.get("offset", None),
)
return CommandResults(
readable_output=tableToMarkdown("PDNS records", result["data"]),
outputs_prefix="Argus.PDNS",
outputs=result,
raw_response=result,
)
def fetch_observations_for_domain_command(args: Dict[str, Any]) -> CommandResults:
fqdn = args.get("fqdn", None)
if not fqdn:
raise ValueError("fqdn not specified")
result = fetch_observations_for_domain(fqdn=fqdn)
return CommandResults(
readable_output=tableToMarkdown(
f'Domain observations for "{fqdn}"', result["data"]
),
outputs_prefix="Argus.ObservationsDomain",
outputs=result,
raw_response=result,
)
def fetch_observations_for_i_p_command(args: Dict[str, Any]) -> CommandResults:
ip = args.get("ip", None)
if not ip:
raise ValueError("ip not specified")
result = fetch_observations_for_i_p(ip=ip)
return CommandResults(
readable_output=tableToMarkdown(f'IP observations for "{ip}"', result["data"]),
outputs_prefix="Argus.ObservationsIP",
outputs=result,
raw_response=result,
)
""" MAIN FUNCTION """
def main() -> None:
logging.getLogger("argus_cli").setLevel("WARNING")
first_fetch_period = parse_first_fetch(
demisto.params().get("first_fetch", "-1 day")
)
set_argus_settings(
demisto.params().get("api_key"),
demisto.params().get("api_url"),
handle_proxy(),
demisto.params().get("insecure", None),
)
demisto.debug(f"Command being called is {demisto.command()}")
try:
if demisto.command() == "test-module":
# This is the call made when pressing the integration Test button.
return_results(test_module_command())
elif demisto.command() == "fetch-incidents":
# Set and define the fetch incidents command to run after activated via integration settings.
next_run, incidents = fetch_incidents(
last_run=demisto.getLastRun(),
first_fetch_period=first_fetch_period,
limit=demisto.params().get("max_fetch", 25),
min_severity=demisto.params().get("min_severity", "low"),
)
demisto.setLastRun(next_run)
demisto.incidents(incidents)
elif demisto.command() == "argus-add-case-tag":
return_results(add_case_tag_command(demisto.args()))
elif demisto.command() == "argus-add-comment":
return_results(add_comment_command(demisto.args()))
elif demisto.command() == "argus-advanced-case-search":
return_results(advanced_case_search_command(demisto.args()))
elif demisto.command() == "argus-close-case":
return_results(close_case_command(demisto.args()))
elif demisto.command() == "argus-create-case":
return_results(create_case_command(demisto.args()))
elif demisto.command() == "argus-delete-case":
return_results(delete_case_command(demisto.args()))
elif demisto.command() == "argus-delete-comment":
return_results(delete_comment_command(demisto.args()))
elif demisto.command() == "argus-download-attachment":
return_results(download_attachment_command(demisto.args()))
elif demisto.command() == "argus-edit-comment":
return_results(edit_comment_command(demisto.args()))
elif demisto.command() == "argus-get-attachment":
return_results(get_attachment_command(demisto.args()))
elif demisto.command() == "argus-get-case-metadata-by-id":
return_results(get_case_metadata_by_id_command(demisto.args()))
elif demisto.command() == "argus-list-case-attachments":
return_results(list_case_attachments_command(demisto.args()))
elif demisto.command() == "argus-list-case-tags":
return_results(list_case_tags_command(demisto.args()))
elif demisto.command() == "argus-list-case-comments":
return_results(list_case_comments_command(demisto.args()))
elif demisto.command() == "argus-remove-case-tag-by-id":
return_results(remove_case_tag_by_id_command(demisto.args()))
elif demisto.command() == "argus-remove-case-tag-by-key-value":
return_results(remove_case_tag_by_key_value_command(demisto.args()))
elif demisto.command() == "argus-update-case":
return_results(update_case_command(demisto.args()))
elif demisto.command() == "argus-get-event":
return_results(get_event_command(demisto.args()))
elif demisto.command() == "argus-get-events-for-case":
return_results(get_events_for_case_command(demisto.args()))
elif demisto.command() == "argus-find-aggregated-events":
return_results(find_aggregated_events_command(demisto.args()))
elif demisto.command() == "argus-list-aggregated-events":
return_results(list_aggregated_events_command(demisto.args()))
elif demisto.command() == "argus-get-payload":
return_results(get_payload_command(demisto.args()))
elif demisto.command() == "argus-get-pcap":
return_results(get_pcap_command(demisto.args()))
elif demisto.command() == "argus-find-nids-events":
return_results(find_nids_events_command(demisto.args()))
elif demisto.command() == "argus-list-nids-events":
return_results(list_nids_events_command(demisto.args()))
elif demisto.command() == "argus-pdns-search-records":
return_results(search_records_command(demisto.args()))
elif demisto.command() == "argus-fetch-observations-for-domain":
return_results(fetch_observations_for_domain_command(demisto.args()))
elif demisto.command() == "argus-fetch-observations-for-ip":
return_results(fetch_observations_for_i_p_command(demisto.args()))
# Log exceptions and return errors
except Exception as e:
demisto.error(traceback.format_exc()) # print the traceback
return_error(
f"Failed to execute {demisto.command()} command.\nError:\n{str(e)}"
)
""" ENTRY POINT """
if __name__ in ("__main__", "__builtin__", "builtins"):
main()
| [((57, 0, 57, 26), 'urllib3.disable_warnings', 'urllib3.disable_warnings', ({}, {}), '()', False, 'import urllib3\n'), ((198, 15, 198, 33), 'argus_api.api.currentuser.v1.user.get_current_user', 'get_current_user', ({}, {}), '()', False, 'from argus_api.api.currentuser.v1.user import get_current_user\n'), ((261, 13, 261, 51), 'argus_api.api.cases.v2.case.add_case_tag', 'add_case_tag', (), '', False, 'from argus_api.api.cases.v2.case import add_case_tag, add_comment, advanced_case_search, close_case, create_case, delete_case, delete_comment, download_attachment, edit_comment, get_attachment, get_case_metadata_by_id, list_case_attachments, list_case_tags, list_case_comments, remove_case_tag_by_id, remove_case_tag_by_key_value, update_case\n'), ((424, 13, 424, 40), 'argus_api.api.cases.v2.case.delete_case', 'delete_case', (), '', False, 'from argus_api.api.cases.v2.case import add_case_tag, add_comment, advanced_case_search, close_case, create_case, delete_case, delete_comment, download_attachment, edit_comment, get_attachment, get_case_metadata_by_id, list_case_attachments, list_case_tags, list_case_comments, remove_case_tag_by_id, remove_case_tag_by_key_value, update_case\n'), ((442, 13, 442, 65), 'argus_api.api.cases.v2.case.delete_comment', 'delete_comment', (), '', False, 'from argus_api.api.cases.v2.case import add_case_tag, add_comment, advanced_case_search, close_case, create_case, delete_case, delete_comment, download_attachment, edit_comment, get_attachment, get_case_metadata_by_id, list_case_attachments, list_case_tags, list_case_comments, remove_case_tag_by_id, remove_case_tag_by_key_value, update_case\n'), ((462, 13, 462, 76), 'argus_api.api.cases.v2.case.download_attachment', 'download_attachment', (), '', False, 'from argus_api.api.cases.v2.case import add_case_tag, add_comment, advanced_case_search, close_case, create_case, delete_case, delete_comment, download_attachment, edit_comment, get_attachment, get_case_metadata_by_id, list_case_attachments, list_case_tags, list_case_comments, remove_case_tag_by_id, remove_case_tag_by_key_value, update_case\n'), ((478, 13, 478, 80), 'argus_api.api.cases.v2.case.edit_comment', 'edit_comment', (), '', False, 'from argus_api.api.cases.v2.case import add_case_tag, add_comment, advanced_case_search, close_case, create_case, delete_case, delete_comment, download_attachment, edit_comment, get_attachment, get_case_metadata_by_id, list_case_attachments, list_case_tags, list_case_comments, remove_case_tag_by_id, remove_case_tag_by_key_value, update_case\n'), ((498, 13, 498, 71), 'argus_api.api.cases.v2.case.get_attachment', 'get_attachment', (), '', False, 'from argus_api.api.cases.v2.case import add_case_tag, add_comment, advanced_case_search, close_case, create_case, delete_case, delete_comment, download_attachment, edit_comment, get_attachment, get_case_metadata_by_id, list_case_attachments, list_case_tags, list_case_comments, remove_case_tag_by_id, remove_case_tag_by_key_value, update_case\n'), ((608, 13, 608, 64), 'argus_api.api.cases.v2.case.remove_case_tag_by_id', 'remove_case_tag_by_id', (), '', False, 'from argus_api.api.cases.v2.case import add_case_tag, add_comment, advanced_case_search, close_case, create_case, delete_case, delete_comment, download_attachment, edit_comment, get_attachment, get_case_metadata_by_id, list_case_attachments, list_case_tags, list_case_comments, remove_case_tag_by_id, remove_case_tag_by_key_value, update_case\n'), ((633, 13, 633, 85), 'argus_api.api.cases.v2.case.remove_case_tag_by_key_value', 'remove_case_tag_by_key_value', (), '', False, 'from argus_api.api.cases.v2.case import add_case_tag, add_comment, advanced_case_search, close_case, create_case, delete_case, delete_comment, download_attachment, edit_comment, get_attachment, get_case_metadata_by_id, list_case_attachments, list_case_tags, list_case_comments, remove_case_tag_by_id, remove_case_tag_by_key_value, update_case\n'), ((691, 13, 693, 5), 'argus_api.api.events.v1.get_event_by_path', 'get_event_by_path', (), '', False, 'from argus_api.api.events.v1 import get_event_by_path\n'), ((804, 13, 806, 5), 'argus_api.api.events.v1.payload.get_payload', 'get_payload', (), '', False, 'from argus_api.api.events.v1.payload import get_payload\n'), ((832, 13, 834, 5), 'argus_api.api.events.v1.pcap.get_pcap', 'get_pcap', (), '', False, 'from argus_api.api.events.v1.pcap import get_pcap\n'), ((935, 13, 935, 53), 'argus_api.api.reputation.v1.observation.fetch_observations_for_domain', 'fetch_observations_for_domain', (), '', False, 'from argus_api.api.reputation.v1.observation import fetch_observations_for_domain, fetch_observations_for_i_p\n'), ((951, 13, 951, 46), 'argus_api.api.reputation.v1.observation.fetch_observations_for_i_p', 'fetch_observations_for_i_p', (), '', False, 'from argus_api.api.reputation.v1.observation import fetch_observations_for_domain, fetch_observations_for_i_p\n'), ((63, 12, 63, 28), 'demistomock.params', 'demisto.params', ({}, {}), '()', True, 'import demistomock as demisto\n'), ((138, 47, 138, 74), 'dateparser.parse', 'dateparser.parse', ({(138, 64, 138, 73): 'date_time'}, {}), '(date_time)', False, 'import dateparser\n'), ((146, 33, 146, 60), 'dateparser.parse', 'dateparser.parse', ({(146, 50, 146, 59): 'date_time'}, {}), '(date_time)', False, 'import dateparser\n'), ((964, 4, 964, 34), 'logging.getLogger', 'logging.getLogger', ({(964, 22, 964, 33): '"""argus_cli"""'}, {}), "('argus_cli')", False, 'import logging\n'), ((979, 11, 979, 28), 'demistomock.command', 'demisto.command', ({}, {}), '()', True, 'import demistomock as demisto\n'), ((240, 27, 240, 43), 'json.dumps', 'json.dumps', ({(240, 38, 240, 42): 'case'}, {}), '(case)', False, 'import json\n'), ((967, 8, 967, 24), 'demistomock.params', 'demisto.params', ({}, {}), '()', True, 'import demistomock as demisto\n'), ((971, 8, 971, 24), 'demistomock.params', 'demisto.params', ({}, {}), '()', True, 'import demistomock as demisto\n'), ((972, 8, 972, 24), 'demistomock.params', 'demisto.params', ({}, {}), '()', True, 'import demistomock as demisto\n'), ((974, 8, 974, 24), 'demistomock.params', 'demisto.params', ({}, {}), '()', True, 'import demistomock as demisto\n'), ((977, 45, 977, 62), 'demistomock.command', 'demisto.command', ({}, {}), '()', True, 'import demistomock as demisto\n'), ((983, 13, 983, 30), 'demistomock.command', 'demisto.command', ({}, {}), '()', True, 'import demistomock as demisto\n'), ((992, 12, 992, 40), 'demistomock.setLastRun', 'demisto.setLastRun', ({(992, 31, 992, 39): 'next_run'}, {}), '(next_run)', True, 'import demistomock as demisto\n'), ((993, 12, 993, 40), 'demistomock.incidents', 'demisto.incidents', ({(993, 30, 993, 39): 'incidents'}, {}), '(incidents)', True, 'import demistomock as demisto\n'), ((1081, 22, 1081, 44), 'traceback.format_exc', 'traceback.format_exc', ({}, {}), '()', False, 'import traceback\n'), ((995, 13, 995, 30), 'demistomock.command', 'demisto.command', ({}, {}), '()', True, 'import demistomock as demisto\n'), ((986, 25, 986, 45), 'demistomock.getLastRun', 'demisto.getLastRun', ({}, {}), '()', True, 'import demistomock as demisto\n'), ((998, 13, 998, 30), 'demistomock.command', 'demisto.command', ({}, {}), '()', True, 'import demistomock as demisto\n'), ((1083, 33, 1083, 50), 'demistomock.command', 'demisto.command', ({}, {}), '()', True, 'import demistomock as demisto\n'), ((996, 48, 996, 62), 'demistomock.args', 'demisto.args', ({}, {}), '()', True, 'import demistomock as demisto\n'), ((1001, 13, 1001, 30), 'demistomock.command', 'demisto.command', ({}, {}), '()', True, 'import demistomock as demisto\n'), ((988, 22, 988, 38), 'demistomock.params', 'demisto.params', ({}, {}), '()', True, 'import demistomock as demisto\n'), ((989, 29, 989, 45), 'demistomock.params', 'demisto.params', ({}, {}), '()', True, 'import demistomock as demisto\n'), ((999, 47, 999, 61), 'demistomock.args', 'demisto.args', ({}, {}), '()', True, 'import demistomock as demisto\n'), ((1004, 13, 1004, 30), 'demistomock.command', 'demisto.command', ({}, {}), '()', True, 'import demistomock as demisto\n'), ((1002, 56, 1002, 70), 'demistomock.args', 'demisto.args', ({}, {}), '()', True, 'import demistomock as demisto\n'), ((1007, 13, 1007, 30), 'demistomock.command', 'demisto.command', ({}, {}), '()', True, 'import demistomock as demisto\n'), ((1005, 46, 1005, 60), 'demistomock.args', 'demisto.args', ({}, {}), '()', True, 'import demistomock as demisto\n'), ((1010, 13, 1010, 30), 'demistomock.command', 'demisto.command', ({}, {}), '()', True, 'import demistomock as demisto\n'), ((1008, 47, 1008, 61), 'demistomock.args', 'demisto.args', ({}, {}), '()', True, 'import demistomock as demisto\n'), ((1013, 13, 1013, 30), 'demistomock.command', 'demisto.command', ({}, {}), '()', True, 'import demistomock as demisto\n'), ((1011, 47, 1011, 61), 'demistomock.args', 'demisto.args', ({}, {}), '()', True, 'import demistomock as demisto\n'), ((1016, 13, 1016, 30), 'demistomock.command', 'demisto.command', ({}, {}), '()', True, 'import demistomock as demisto\n'), ((1014, 50, 1014, 64), 'demistomock.args', 'demisto.args', ({}, {}), '()', True, 'import demistomock as demisto\n'), ((1019, 13, 1019, 30), 'demistomock.command', 'demisto.command', ({}, {}), '()', True, 'import demistomock as demisto\n'), ((1017, 55, 1017, 69), 'demistomock.args', 'demisto.args', ({}, {}), '()', True, 'import demistomock as demisto\n'), ((1022, 13, 1022, 30), 'demistomock.command', 'demisto.command', ({}, {}), '()', True, 'import demistomock as demisto\n'), ((1020, 48, 1020, 62), 'demistomock.args', 'demisto.args', ({}, {}), '()', True, 'import demistomock as demisto\n'), ((1025, 13, 1025, 30), 'demistomock.command', 'demisto.command', ({}, {}), '()', True, 'import demistomock as demisto\n'), ((1023, 50, 1023, 64), 'demistomock.args', 'demisto.args', ({}, {}), '()', True, 'import demistomock as demisto\n'), ((1028, 13, 1028, 30), 'demistomock.command', 'demisto.command', ({}, {}), '()', True, 'import demistomock as demisto\n'), ((1026, 59, 1026, 73), 'demistomock.args', 'demisto.args', ({}, {}), '()', True, 'import demistomock as demisto\n'), ((1031, 13, 1031, 30), 'demistomock.command', 'demisto.command', ({}, {}), '()', True, 'import demistomock as demisto\n'), ((1029, 57, 1029, 71), 'demistomock.args', 'demisto.args', ({}, {}), '()', True, 'import demistomock as demisto\n'), ((1034, 13, 1034, 30), 'demistomock.command', 'demisto.command', ({}, {}), '()', True, 'import demistomock as demisto\n'), ((1032, 50, 1032, 64), 'demistomock.args', 'demisto.args', ({}, {}), '()', True, 'import demistomock as demisto\n'), ((1037, 13, 1037, 30), 'demistomock.command', 'demisto.command', ({}, {}), '()', True, 'import demistomock as demisto\n'), ((1035, 54, 1035, 68), 'demistomock.args', 'demisto.args', ({}, {}), '()', True, 'import demistomock as demisto\n'), ((1040, 13, 1040, 30), 'demistomock.command', 'demisto.command', ({}, {}), '()', True, 'import demistomock as demisto\n'), ((1038, 57, 1038, 71), 'demistomock.args', 'demisto.args', ({}, {}), '()', True, 'import demistomock as demisto\n'), ((1043, 13, 1043, 30), 'demistomock.command', 'demisto.command', ({}, {}), '()', True, 'import demistomock as demisto\n'), ((1041, 64, 1041, 78), 'demistomock.args', 'demisto.args', ({}, {}), '()', True, 'import demistomock as demisto\n'), ((1046, 13, 1046, 30), 'demistomock.command', 'demisto.command', ({}, {}), '()', True, 'import demistomock as demisto\n'), ((1044, 47, 1044, 61), 'demistomock.args', 'demisto.args', ({}, {}), '()', True, 'import demistomock as demisto\n'), ((1049, 13, 1049, 30), 'demistomock.command', 'demisto.command', ({}, {}), '()', True, 'import demistomock as demisto\n'), ((1047, 45, 1047, 59), 'demistomock.args', 'demisto.args', ({}, {}), '()', True, 'import demistomock as demisto\n'), ((1052, 13, 1052, 30), 'demistomock.command', 'demisto.command', ({}, {}), '()', True, 'import demistomock as demisto\n'), ((1050, 55, 1050, 69), 'demistomock.args', 'demisto.args', ({}, {}), '()', True, 'import demistomock as demisto\n'), ((1055, 13, 1055, 30), 'demistomock.command', 'demisto.command', ({}, {}), '()', True, 'import demistomock as demisto\n'), ((1053, 58, 1053, 72), 'demistomock.args', 'demisto.args', ({}, {}), '()', True, 'import demistomock as demisto\n'), ((1058, 13, 1058, 30), 'demistomock.command', 'demisto.command', ({}, {}), '()', True, 'import demistomock as demisto\n'), ((1056, 58, 1056, 72), 'demistomock.args', 'demisto.args', ({}, {}), '()', True, 'import demistomock as demisto\n'), ((1061, 13, 1061, 30), 'demistomock.command', 'demisto.command', ({}, {}), '()', True, 'import demistomock as demisto\n'), ((1059, 47, 1059, 61), 'demistomock.args', 'demisto.args', ({}, {}), '()', True, 'import demistomock as demisto\n'), ((1064, 13, 1064, 30), 'demistomock.command', 'demisto.command', ({}, {}), '()', True, 'import demistomock as demisto\n'), ((1062, 44, 1062, 58), 'demistomock.args', 'demisto.args', ({}, {}), '()', True, 'import demistomock as demisto\n'), ((1067, 13, 1067, 30), 'demistomock.command', 'demisto.command', ({}, {}), '()', True, 'import demistomock as demisto\n'), ((1065, 52, 1065, 66), 'demistomock.args', 'demisto.args', ({}, {}), '()', True, 'import demistomock as demisto\n'), ((1070, 13, 1070, 30), 'demistomock.command', 'demisto.command', ({}, {}), '()', True, 'import demistomock as demisto\n'), ((1068, 52, 1068, 66), 'demistomock.args', 'demisto.args', ({}, {}), '()', True, 'import demistomock as demisto\n'), ((1073, 13, 1073, 30), 'demistomock.command', 'demisto.command', ({}, {}), '()', True, 'import demistomock as demisto\n'), ((1071, 50, 1071, 64), 'demistomock.args', 'demisto.args', ({}, {}), '()', True, 'import demistomock as demisto\n'), ((1076, 13, 1076, 30), 'demistomock.command', 'demisto.command', ({}, {}), '()', True, 'import demistomock as demisto\n'), ((1074, 65, 1074, 79), 'demistomock.args', 'demisto.args', ({}, {}), '()', True, 'import demistomock as demisto\n'), ((1077, 62, 1077, 76), 'demistomock.args', 'demisto.args', ({}, {}), '()', True, 'import demistomock as demisto\n')] |
SnowWolf75/aoc-2020 | 03.py | 1745a6cf46dac097869e5af99194b710e78bed28 | #!/usr/bin/env python3
import sys, os
import unittest
from lib.common import *
filename = "inputs/2020_12_03_input.txt"
class day03:
def __init__(self):
pass
class day03part1(day03):
def solve(self, args):
pass
class day03part2(day03):
def solve(self, args):
pass
class examples(unittest.TestCase):
def test_examples_part1(self):
day3 = day03part1()
# self.assetTrue()
def test_examples_part2(self):
day3 = day03part2()
# self.assetTrue()
class solutions(unittest.TestCase):
def test_part1(self):
day3 = day03part1()
def test_part2(self):
day3 = day03part2()
| [] |
fortinet/ips-bph-framework | scripts/examples/tools/capturebat.py | 145e14cced2181f388ade07d78b4f0e9452143dd | # Tool Imports
from bph.tools.windows.capturebat import BphCaptureBat as CaptureBat
# Core Imports
from bph.core.server.template import BphTemplateServer as TemplateServer
from bph.core.sample import BphSample as Sample
from bph.core.sample import BphLabFile as LabFile
from bph.core.session import BphSession as Session
session = Session(project_name='blackhat_arsenal_2019')
session.start()
templateserver = TemplateServer()
templateserver.start()
capturebat = CaptureBat()
capturebat.cleanup()
capturebat.execute()
capturebat.start()
capturebat.execute(delay=15)
capturebat.stop()
capturebat.execute()
capturebat.collect()
capturebat.execute()
capturebat.files() | [((10, 10, 10, 55), 'bph.core.session.BphSession', 'Session', (), '', True, 'from bph.core.session import BphSession as Session\n'), ((13, 17, 13, 33), 'bph.core.server.template.BphTemplateServer', 'TemplateServer', ({}, {}), '()', True, 'from bph.core.server.template import BphTemplateServer as TemplateServer\n'), ((16, 13, 16, 25), 'bph.tools.windows.capturebat.BphCaptureBat', 'CaptureBat', ({}, {}), '()', True, 'from bph.tools.windows.capturebat import BphCaptureBat as CaptureBat\n')] |
kaushikponnapalli/dymos | dymos/utils/test/test_hermite.py | 3fba91d0fc2c0e8460717b1bec80774676287739 | import unittest
import numpy as np
from numpy.testing import assert_almost_equal
from dymos.utils.hermite import hermite_matrices
class TestHermiteMatrices(unittest.TestCase):
def test_quadratic(self):
# Interpolate with values and rates provided at [-1, 1] in tau space
tau_given = [-1.0, 1.0]
tau_eval = np.linspace(-1, 1, 100)
# In time space use the boundaries [-2, 2]
dt_dtau = 4.0 / 2.0
# Provide values for y = t**2 and its time-derivative
y_given = [4.0, 4.0]
ydot_given = [-4.0, 4.0]
# Get the hermite matrices.
Ai, Bi, Ad, Bd = hermite_matrices(tau_given, tau_eval)
# Interpolate y and ydot at tau_eval points in tau space.
y_i = np.dot(Ai, y_given) + dt_dtau * np.dot(Bi, ydot_given)
ydot_i = (1.0 / dt_dtau) * np.dot(Ad, y_given) + np.dot(Bd, ydot_given)
# Compute our function as a point of comparison.
y_computed = (tau_eval * dt_dtau)**2
ydot_computed = 2.0 * (tau_eval * dt_dtau)
# Check results
assert_almost_equal(y_i, y_computed)
assert_almost_equal(ydot_i, ydot_computed)
def test_cubic(self):
# Interpolate with values and rates provided at [-1, 1] in tau space
tau_given = [-1.0, 0.0, 1.0]
tau_eval = np.linspace(-1, 1, 101)
# In time space use the boundaries [-2, 2]
dt_dtau = 4.0 / 2.0
# Provide values for y = t**2 and its time-derivative
y_given = [-8.0, 0.0, 8.0]
ydot_given = [12.0, 0.0, 12.0]
# Get the hermite matrices.
Ai, Bi, Ad, Bd = hermite_matrices(tau_given, tau_eval)
# Interpolate y and ydot at tau_eval points in tau space.
y_i = np.dot(Ai, y_given) + dt_dtau * np.dot(Bi, ydot_given)
ydot_i = (1.0 / dt_dtau) * np.dot(Ad, y_given) + np.dot(Bd, ydot_given)
# Compute our function as a point of comparison.
y_computed = (tau_eval * dt_dtau)**3
ydot_computed = 3.0 * (tau_eval * dt_dtau)**2
# Check results
assert_almost_equal(y_i, y_computed)
assert_almost_equal(ydot_i, ydot_computed)
if __name__ == '__main__': # pragma: no cover
unittest.main()
| [((69, 4, 69, 19), 'unittest.main', 'unittest.main', ({}, {}), '()', False, 'import unittest\n'), ((15, 19, 15, 42), 'numpy.linspace', 'np.linspace', ({(15, 31, 15, 33): '-1', (15, 35, 15, 36): '1', (15, 38, 15, 41): '100'}, {}), '(-1, 1, 100)', True, 'import numpy as np\n'), ((25, 25, 25, 62), 'dymos.utils.hermite.hermite_matrices', 'hermite_matrices', ({(25, 42, 25, 51): 'tau_given', (25, 53, 25, 61): 'tau_eval'}, {}), '(tau_given, tau_eval)', False, 'from dymos.utils.hermite import hermite_matrices\n'), ((36, 8, 36, 44), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', ({(36, 28, 36, 31): 'y_i', (36, 33, 36, 43): 'y_computed'}, {}), '(y_i, y_computed)', False, 'from numpy.testing import assert_almost_equal\n'), ((37, 8, 37, 50), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', ({(37, 28, 37, 34): 'ydot_i', (37, 36, 37, 49): 'ydot_computed'}, {}), '(ydot_i, ydot_computed)', False, 'from numpy.testing import assert_almost_equal\n'), ((43, 19, 43, 42), 'numpy.linspace', 'np.linspace', ({(43, 31, 43, 33): '-1', (43, 35, 43, 36): '1', (43, 38, 43, 41): '101'}, {}), '(-1, 1, 101)', True, 'import numpy as np\n'), ((53, 25, 53, 62), 'dymos.utils.hermite.hermite_matrices', 'hermite_matrices', ({(53, 42, 53, 51): 'tau_given', (53, 53, 53, 61): 'tau_eval'}, {}), '(tau_given, tau_eval)', False, 'from dymos.utils.hermite import hermite_matrices\n'), ((64, 8, 64, 44), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', ({(64, 28, 64, 31): 'y_i', (64, 33, 64, 43): 'y_computed'}, {}), '(y_i, y_computed)', False, 'from numpy.testing import assert_almost_equal\n'), ((65, 8, 65, 50), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', ({(65, 28, 65, 34): 'ydot_i', (65, 36, 65, 49): 'ydot_computed'}, {}), '(ydot_i, ydot_computed)', False, 'from numpy.testing import assert_almost_equal\n'), ((28, 14, 28, 33), 'numpy.dot', 'np.dot', ({(28, 21, 28, 23): 'Ai', (28, 25, 28, 32): 'y_given'}, {}), '(Ai, y_given)', True, 'import numpy as np\n'), ((29, 57, 29, 79), 'numpy.dot', 'np.dot', ({(29, 64, 29, 66): 'Bd', (29, 68, 29, 78): 'ydot_given'}, {}), '(Bd, ydot_given)', True, 'import numpy as np\n'), ((56, 14, 56, 33), 'numpy.dot', 'np.dot', ({(56, 21, 56, 23): 'Ai', (56, 25, 56, 32): 'y_given'}, {}), '(Ai, y_given)', True, 'import numpy as np\n'), ((57, 57, 57, 79), 'numpy.dot', 'np.dot', ({(57, 64, 57, 66): 'Bd', (57, 68, 57, 78): 'ydot_given'}, {}), '(Bd, ydot_given)', True, 'import numpy as np\n'), ((28, 46, 28, 68), 'numpy.dot', 'np.dot', ({(28, 53, 28, 55): 'Bi', (28, 57, 28, 67): 'ydot_given'}, {}), '(Bi, ydot_given)', True, 'import numpy as np\n'), ((29, 35, 29, 54), 'numpy.dot', 'np.dot', ({(29, 42, 29, 44): 'Ad', (29, 46, 29, 53): 'y_given'}, {}), '(Ad, y_given)', True, 'import numpy as np\n'), ((56, 46, 56, 68), 'numpy.dot', 'np.dot', ({(56, 53, 56, 55): 'Bi', (56, 57, 56, 67): 'ydot_given'}, {}), '(Bi, ydot_given)', True, 'import numpy as np\n'), ((57, 35, 57, 54), 'numpy.dot', 'np.dot', ({(57, 42, 57, 44): 'Ad', (57, 46, 57, 53): 'y_given'}, {}), '(Ad, y_given)', True, 'import numpy as np\n')] |
libris/xl_auth | xl_auth/settings.py | 33d705c287d2ecd81920d37c3751d947cd52588c | # -*- coding: utf-8 -*-
"""Application configuration."""
from __future__ import absolute_import, division, print_function, unicode_literals
import os
from . import __author__, __name__, __version__
class Config(object):
"""Base configuration."""
SERVER_NAME = os.environ.get('SERVER_NAME', None)
PREFERRED_URL_SCHEME = os.environ.get('PREFERRED_URL_SCHEME', 'http')
APP_NAME = __name__
APP_VERSION = __version__
APP_AUTHOR = __author__
JSON_AS_ASCII = False
SECRET_KEY = os.environ.get('XL_AUTH_SECRET', 'secret-key') # TODO: Change me
APP_DIR = os.path.abspath(os.path.dirname(__file__)) # This directory.
PROJECT_ROOT = os.path.abspath(os.path.join(APP_DIR, os.pardir))
BCRYPT_LOG_ROUNDS = 13
DEBUG_TB_ENABLED = False # Disable Debug toolbar.
DEBUG_TB_INTERCEPT_REDIRECTS = False
CACHE_TYPE = 'simple' # Can be "memcached", "redis", etc.
SQLALCHEMY_TRACK_MODIFICATIONS = False
WEBPACK_MANIFEST_PATH = 'webpack/manifest.json'
BABEL_DEFAULT_LOCALE = os.environ.get('BABEL_DEFAULT_LOCALE', 'sv')
BABEL_DEFAULT_TIMEZONE = 'utc'
EMAIL_DEFAULT_FROM = os.environ.get('EMAIL_DEFAULT_FROM', '[email protected]')
EMAIL_HOST = os.environ.get('EMAIL_HOST', 'smtp.kb.se')
EMAIL_PORT = int(os.environ.get('EMAIL_PORT', '25'))
EMAIL_TIMEOUT = int(os.environ.get('EMAIL_TIMEOUT', '5'))
OAUTH2_PROVIDER_TOKEN_EXPIRES_IN = 36000
XL_AUTH_MAX_ACTIVE_PASSWORD_RESETS = 2
XL_AUTH_FAILED_LOGIN_TIMEFRAME = 60 * 60
XL_AUTH_FAILED_LOGIN_MAX_ATTEMPTS = 7
class ProdConfig(Config):
"""Production configuration."""
ENV = 'prod'
DEBUG = False
SQLALCHEMY_DATABASE_URI = os.environ.get('SQLALCHEMY_DATABASE_URI',
'postgresql://localhost/example')
DEBUG_TB_ENABLED = False # Disable Debug toolbar.
class DevConfig(Config):
"""Development configuration."""
ENV = 'dev'
DEBUG = True
DB_NAME = 'dev.db'
# Put the db file in project root
DB_PATH = os.path.join(Config.PROJECT_ROOT, DB_NAME)
SQLALCHEMY_DATABASE_URI = 'sqlite:///{0}'.format(DB_PATH)
DEBUG_TB_ENABLED = True
CACHE_TYPE = 'simple' # Can be "memcached", "redis", etc.
class TestConfig(Config):
"""Test configuration."""
TESTING = True
DEBUG = True
SQLALCHEMY_DATABASE_URI = 'sqlite://'
# For faster tests; needs at least 4 to avoid "ValueError: Invalid rounds".
BCRYPT_LOG_ROUNDS = 4
WTF_CSRF_ENABLED = False # Allows form testing.
EMAIL_BACKEND = 'flask_emails.backends.DummyBackend'
| [((14, 18, 14, 53), 'os.environ.get', 'os.environ.get', ({(14, 33, 14, 46): '"""SERVER_NAME"""', (14, 48, 14, 52): 'None'}, {}), "('SERVER_NAME', None)", False, 'import os\n'), ((15, 27, 15, 73), 'os.environ.get', 'os.environ.get', ({(15, 42, 15, 64): '"""PREFERRED_URL_SCHEME"""', (15, 66, 15, 72): '"""http"""'}, {}), "('PREFERRED_URL_SCHEME', 'http')", False, 'import os\n'), ((20, 17, 20, 63), 'os.environ.get', 'os.environ.get', ({(20, 32, 20, 48): '"""XL_AUTH_SECRET"""', (20, 50, 20, 62): '"""secret-key"""'}, {}), "('XL_AUTH_SECRET', 'secret-key')", False, 'import os\n'), ((29, 27, 29, 71), 'os.environ.get', 'os.environ.get', ({(29, 42, 29, 64): '"""BABEL_DEFAULT_LOCALE"""', (29, 66, 29, 70): '"""sv"""'}, {}), "('BABEL_DEFAULT_LOCALE', 'sv')", False, 'import os\n'), ((31, 25, 31, 78), 'os.environ.get', 'os.environ.get', ({(31, 40, 31, 60): '"""EMAIL_DEFAULT_FROM"""', (31, 62, 31, 77): '"""[email protected]"""'}, {}), "('EMAIL_DEFAULT_FROM', '[email protected]')", False, 'import os\n'), ((32, 17, 32, 59), 'os.environ.get', 'os.environ.get', ({(32, 32, 32, 44): '"""EMAIL_HOST"""', (32, 46, 32, 58): '"""smtp.kb.se"""'}, {}), "('EMAIL_HOST', 'smtp.kb.se')", False, 'import os\n'), ((46, 30, 47, 78), 'os.environ.get', 'os.environ.get', ({(46, 45, 46, 70): '"""SQLALCHEMY_DATABASE_URI"""', (47, 45, 47, 77): '"""postgresql://localhost/example"""'}, {}), "('SQLALCHEMY_DATABASE_URI', 'postgresql://localhost/example')", False, 'import os\n'), ((58, 14, 58, 56), 'os.path.join', 'os.path.join', ({(58, 27, 58, 46): 'Config.PROJECT_ROOT', (58, 48, 58, 55): 'DB_NAME'}, {}), '(Config.PROJECT_ROOT, DB_NAME)', False, 'import os\n'), ((21, 30, 21, 55), 'os.path.dirname', 'os.path.dirname', ({(21, 46, 21, 54): '__file__'}, {}), '(__file__)', False, 'import os\n'), ((22, 35, 22, 67), 'os.path.join', 'os.path.join', ({(22, 48, 22, 55): 'APP_DIR', (22, 57, 22, 66): 'os.pardir'}, {}), '(APP_DIR, os.pardir)', False, 'import os\n'), ((33, 21, 33, 55), 'os.environ.get', 'os.environ.get', ({(33, 36, 33, 48): '"""EMAIL_PORT"""', (33, 50, 33, 54): '"""25"""'}, {}), "('EMAIL_PORT', '25')", False, 'import os\n'), ((34, 24, 34, 60), 'os.environ.get', 'os.environ.get', ({(34, 39, 34, 54): '"""EMAIL_TIMEOUT"""', (34, 56, 34, 59): '"""5"""'}, {}), "('EMAIL_TIMEOUT', '5')", False, 'import os\n')] |
evanreichard/pyatv | tests/mrp/test_mrp_auth.py | d41bd749bbf8f8a9365e7fd36c1164543e334565 | """Functional authentication tests with fake MRP Apple TV."""
import inspect
from aiohttp.test_utils import AioHTTPTestCase, unittest_run_loop
import pyatv
from pyatv import exceptions
from pyatv.const import Protocol
from pyatv.conf import MrpService, AppleTV
from pyatv.mrp.server_auth import PIN_CODE, CLIENT_IDENTIFIER, CLIENT_CREDENTIALS
from tests.fake_device import FakeAppleTV
class MrpAuthFunctionalTest(AioHTTPTestCase):
def setUp(self):
AioHTTPTestCase.setUp(self)
self.service = MrpService(
CLIENT_IDENTIFIER, self.fake_atv.get_port(Protocol.MRP)
)
self.conf = AppleTV("127.0.0.1", "Apple TV")
self.conf.add_service(self.service)
async def tearDownAsync(self):
if inspect.iscoroutinefunction(self.handle.close):
await self.handle.close()
else:
self.handle.close()
await super().tearDownAsync()
async def get_application(self, loop=None):
self.fake_atv = FakeAppleTV(self.loop)
self.state, self.usecase = self.fake_atv.add_service(Protocol.MRP)
return self.fake_atv.app
@unittest_run_loop
async def test_pairing_with_device(self):
self.handle = await pyatv.pair(self.conf, Protocol.MRP, self.loop)
self.assertIsNone(self.service.credentials)
self.assertTrue(self.handle.device_provides_pin)
await self.handle.begin()
self.handle.pin(PIN_CODE)
await self.handle.finish()
self.assertTrue(self.handle.has_paired)
self.assertTrue(self.state.has_paired)
self.assertIsNotNone(self.service.credentials)
@unittest_run_loop
async def test_pairing_with_existing_credentials(self):
self.service.credentials = CLIENT_CREDENTIALS
self.handle = await pyatv.pair(self.conf, Protocol.MRP, self.loop)
self.assertFalse(self.handle.has_paired)
self.assertIsNotNone(self.service.credentials)
self.assertTrue(self.handle.device_provides_pin)
await self.handle.begin()
self.handle.pin(PIN_CODE)
await self.handle.finish()
self.assertTrue(self.handle.has_paired)
self.assertTrue(self.state.has_paired)
self.assertIsNotNone(self.service.credentials)
@unittest_run_loop
async def test_pairing_with_bad_pin(self):
self.handle = await pyatv.pair(self.conf, Protocol.MRP, self.loop)
self.assertIsNone(self.service.credentials)
self.assertTrue(self.handle.device_provides_pin)
await self.handle.begin()
self.handle.pin(PIN_CODE + 1)
with self.assertRaises(exceptions.PairingError):
await self.handle.finish()
self.assertFalse(self.handle.has_paired)
self.assertFalse(self.state.has_paired)
self.assertIsNone(self.service.credentials)
@unittest_run_loop
async def test_authentication(self):
self.service.credentials = CLIENT_CREDENTIALS
self.handle = await pyatv.connect(self.conf, self.loop)
self.assertTrue(self.state.has_authenticated)
| [((17, 8, 17, 35), 'aiohttp.test_utils.AioHTTPTestCase.setUp', 'AioHTTPTestCase.setUp', ({(17, 30, 17, 34): 'self'}, {}), '(self)', False, 'from aiohttp.test_utils import AioHTTPTestCase, unittest_run_loop\n'), ((22, 20, 22, 52), 'pyatv.conf.AppleTV', 'AppleTV', ({(22, 28, 22, 39): '"""127.0.0.1"""', (22, 41, 22, 51): '"""Apple TV"""'}, {}), "('127.0.0.1', 'Apple TV')", False, 'from pyatv.conf import MrpService, AppleTV\n'), ((26, 11, 26, 57), 'inspect.iscoroutinefunction', 'inspect.iscoroutinefunction', ({(26, 39, 26, 56): 'self.handle.close'}, {}), '(self.handle.close)', False, 'import inspect\n'), ((33, 24, 33, 46), 'tests.fake_device.FakeAppleTV', 'FakeAppleTV', ({(33, 36, 33, 45): 'self.loop'}, {}), '(self.loop)', False, 'from tests.fake_device import FakeAppleTV\n'), ((39, 28, 39, 74), 'pyatv.pair', 'pyatv.pair', ({(39, 39, 39, 48): 'self.conf', (39, 50, 39, 62): 'Protocol.MRP', (39, 64, 39, 73): 'self.loop'}, {}), '(self.conf, Protocol.MRP, self.loop)', False, 'import pyatv\n'), ((57, 28, 57, 74), 'pyatv.pair', 'pyatv.pair', ({(57, 39, 57, 48): 'self.conf', (57, 50, 57, 62): 'Protocol.MRP', (57, 64, 57, 73): 'self.loop'}, {}), '(self.conf, Protocol.MRP, self.loop)', False, 'import pyatv\n'), ((74, 28, 74, 74), 'pyatv.pair', 'pyatv.pair', ({(74, 39, 74, 48): 'self.conf', (74, 50, 74, 62): 'Protocol.MRP', (74, 64, 74, 73): 'self.loop'}, {}), '(self.conf, Protocol.MRP, self.loop)', False, 'import pyatv\n'), ((93, 28, 93, 63), 'pyatv.connect', 'pyatv.connect', ({(93, 42, 93, 51): 'self.conf', (93, 53, 93, 62): 'self.loop'}, {}), '(self.conf, self.loop)', False, 'import pyatv\n')] |
maryokhin/drf-extensions | tests_app/tests/functional/key_constructor/bits/models.py | 8223db2bdddaf3cd99f951b2291210c5fd5b0e6f | # -*- coding: utf-8 -*-
from django.db import models
class KeyConstructorUserProperty(models.Model):
name = models.CharField(max_length=100)
class Meta:
app_label = 'tests_app'
class KeyConstructorUserModel(models.Model):
property = models.ForeignKey(KeyConstructorUserProperty)
class Meta:
app_label = 'tests_app' | [((6, 11, 6, 43), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import models\n'), ((13, 15, 13, 60), 'django.db.models.ForeignKey', 'models.ForeignKey', ({(13, 33, 13, 59): 'KeyConstructorUserProperty'}, {}), '(KeyConstructorUserProperty)', False, 'from django.db import models\n')] |
cliveseldon/ngraph-onnx | ngraph_onnx/onnx_importer/utils/numeric_limits.py | a2d20afdc7acd5064e4717612ad372d864d03d3d | # ******************************************************************************
# Copyright 2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ******************************************************************************
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
import numbers
from typing import Union
class NumericLimits(object):
"""Class providing interface to extract numerical limits for given data type."""
@staticmethod
def _get_number_limits_class(dtype):
# type: (np.dtype) -> Union[IntegralLimits, FloatingPointLimits]
"""Return specialized class instance with limits set for given data type.
:param dtype: The data type we want to check limits for.
:return: The specialized class instance providing numeric limits.
"""
data_type = dtype.type
value = data_type(1)
if isinstance(value, numbers.Integral):
return IntegralLimits(data_type)
elif isinstance(value, numbers.Real):
return FloatingPointLimits(data_type)
else:
raise ValueError('NumericLimits: unsupported data type: <{}>.'.format(dtype.type))
@staticmethod
def _get_dtype(dtype): # type: (Union[np.dtype, int, float]) -> np.dtype
"""Return numpy dtype object wrapping provided data type.
:param dtype: The data type to be wrapped.
:return: The numpy dtype object.
"""
return dtype if isinstance(dtype, np.dtype) else np.dtype(dtype)
@classmethod
def max(cls, dtype): # type: (np.dtype) -> Union[int, float]
"""Return maximum value that can be represented in given data type.
:param dtype: The data type we want to check maximum value for.
:return: The maximum value.
"""
return cls._get_number_limits_class(cls._get_dtype(dtype)).max
@classmethod
def min(cls, dtype): # type: (np.dtype) -> Union[int, float]
"""Return minimum value that can be represented in given data type.
:param dtype: The data type we want to check minimum value for.
:return: The minimum value.
"""
return cls._get_number_limits_class(cls._get_dtype(dtype)).min
class FloatingPointLimits(object):
"""Class providing access to numeric limits for floating point data types."""
def __init__(self, data_type): # type: (type) -> None
self.data_type = data_type
@property
def max(self): # type: () -> float
"""Provide maximum representable value by stored data type.
:return: The maximum value.
"""
return np.finfo(self.data_type).max
@property
def min(self): # type: () -> float
"""Provide minimum representable value by stored data type.
:return: The minimum value.
"""
return np.finfo(self.data_type).min
class IntegralLimits(object):
"""Class providing access to numeric limits for integral data types."""
def __init__(self, data_type): # type: (type) -> None
self.data_type = data_type
@property
def max(self): # type: () -> int
"""Provide maximum representable value by stored data type.
:return: The maximum value.
"""
return np.iinfo(self.data_type).max
@property
def min(self): # type: () -> int
"""Provide minimum representable value by stored data type.
:return: The minimum value.
"""
return np.iinfo(self.data_type).min
| [((54, 57, 54, 72), 'numpy.dtype', 'np.dtype', ({(54, 66, 54, 71): 'dtype'}, {}), '(dtype)', True, 'import numpy as np\n'), ((87, 15, 87, 39), 'numpy.finfo', 'np.finfo', ({(87, 24, 87, 38): 'self.data_type'}, {}), '(self.data_type)', True, 'import numpy as np\n'), ((95, 15, 95, 39), 'numpy.finfo', 'np.finfo', ({(95, 24, 95, 38): 'self.data_type'}, {}), '(self.data_type)', True, 'import numpy as np\n'), ((110, 15, 110, 39), 'numpy.iinfo', 'np.iinfo', ({(110, 24, 110, 38): 'self.data_type'}, {}), '(self.data_type)', True, 'import numpy as np\n'), ((118, 15, 118, 39), 'numpy.iinfo', 'np.iinfo', ({(118, 24, 118, 38): 'self.data_type'}, {}), '(self.data_type)', True, 'import numpy as np\n')] |
AbhilashReddyM/curvpack | curvpack/utils.py | 74351624ec9ec50ec4445c7be85a48a4eabb029a | import numpy as np
# The first two functions are modified from MNE surface project. LIcense follows
# This software is OSI Certified Open Source Software. OSI Certified is a certification mark of the Open Source Initiative.
#
# Copyright (c) 2011-2019, authors of MNE-Python. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
# Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
# Neither the names of MNE-Python authors nor the names of any contributors may be used to endorse or promote products derived from this software without specific prior written permission.
#
# This software is provided by the copyright holders and contributors "as is" and any express or implied warranties, including, but not limited to, the implied warranties of merchantability and fitness for a particular purpose are disclaimed. In no event shall the copyright owner or contributors be liable for any direct, indirect, incidental, special, exemplary, or consequential damages (including, but not limited to, procurement of substitute goods or services; loss of use, data, or profits; or business interruption) however caused and on any theory of liability, whether in contract, strict liability, or tort (including negligence or otherwise) arising in any way out of the use of this software, even if advised of the possibility of such damage.
def triangle_neighbors(tris, npts):
"""Efficiently compute vertex neighboring triangles.
Returns the triangles in the 1-ring of a given vertex
"""
# this code replaces the following, but is faster (vectorized):
#
# this['neighbor_tri'] = [list() for _ in xrange(this['np'])]
# for p in xrange(this['ntri']):
# verts = this['tris'][p]
# this['neighbor_tri'][verts[0]].append(p)
# this['neighbor_tri'][verts[1]].append(p)
# this['neighbor_tri'][verts[2]].append(p)
# this['neighbor_tri'] = [np.array(nb, int) for nb in this['neighbor_tri']]
#
verts = tris.ravel()
counts = np.bincount(verts, minlength=npts)
reord = np.argsort(verts)
tri_idx = np.unravel_index(reord, (len(tris), 3))[0]
idx = np.cumsum(np.r_[0, counts])
# the sort below slows it down a bit, but is needed for equivalence
neighbor_tri = np.array([np.sort(tri_idx[v1:v2])
for v1, v2 in zip(idx[:-1], idx[1:])])
return neighbor_tri
def get_surf_neighbors(tris,neighbor_tri, k):
"""Get vertices of 1-ring
"""
verts = tris[neighbor_tri[k]]
verts = np.setdiff1d(verts, [k], assume_unique=False)
nneighbors = len(verts)
return verts
def GetVertexNormals(vertices,faces,FaceNormals,e0,e1,e2):
"""
INPUT:
Vertices : vertices
Faces : vertex connectivity
FaceNormals : Outer Normal per face, having magnitude equal to area of face
e0,e1,e2 : edge vectors
OUTPUT:
VertNormals : Unit normal at the vertex
"""
VertNormals =np.zeros(vertices.shape)
#edge lengths
de0=np.sqrt(e0[:,0]**2+e0[:,1]**2+e0[:,2]**2)
de1=np.sqrt(e1[:,0]**2+e1[:,1]**2+e1[:,2]**2)
de2=np.sqrt(e2[:,0]**2+e2[:,1]**2+e2[:,2]**2)
L2=np.c_[de0**2,de1**2,de2**2]
#Calculate weights according to N.Max [1999] for normals
wfv1=FaceNormals/(L2[:,1]*L2[:,2])[:,np.newaxis]
wfv2=FaceNormals/(L2[:,2]*L2[:,0])[:,np.newaxis]
wfv3=FaceNormals/(L2[:,0]*L2[:,1])[:,np.newaxis]
# #Calculate the weights according to MWA for normals
# wfv1=FaceNormals*np.arcsin(2*Af/(de1*de2))[:,np.newaxis]
# wfv2=FaceNormals*np.arcsin(2*Af/(de2*de0))[:,np.newaxis]
# wfv3=FaceNormals*np.arcsin(2*Af/(de0*de1))[:,np.newaxis]
verts=faces.T[0]
for j in [0,1,2]:
VertNormals[:,j]+=np.bincount(verts,minlength=vertices.shape[0],weights=wfv1[:,j])
verts=faces.T[1]
for j in [0,1,2]:
VertNormals[:,j]+=np.bincount(verts,minlength=vertices.shape[0],weights=wfv2[:,j])
verts=faces.T[2]
for j in [0,1,2]:
VertNormals[:,j]+=np.bincount(verts,minlength=vertices.shape[0],weights=wfv3[:,j])
VertNormals=normr(VertNormals)
return VertNormals
def fastcross(x, y):
"""Compute cross product between list of 3D vectors
Input
x : Mx3 array
y : Mx3 array
Output
z : Mx3 array Cross product of x and y.
"""
if max([x.shape[0], y.shape[0]]) >= 500:
return np.c_[x[:, 1] * y[:, 2] - x[:, 2] * y[:, 1],
x[:, 2] * y[:, 0] - x[:, 0] * y[:, 2],
x[:, 0] * y[:, 1] - x[:, 1] * y[:, 0]]
else:
return np.cross(x, y)
def normr(vec):
"""
Normalizes an array of vectors. e.g. to convert a np array of vectors to unit vectors
"""
return vec/np.sqrt((vec**2).sum(axis=1))[:,np.newaxis]
| [((31, 13, 31, 47), 'numpy.bincount', 'np.bincount', (), '', True, 'import numpy as np\n'), ((32, 12, 32, 29), 'numpy.argsort', 'np.argsort', ({(32, 23, 32, 28): 'verts'}, {}), '(verts)', True, 'import numpy as np\n'), ((34, 10, 34, 37), 'numpy.cumsum', 'np.cumsum', ({(34, 20, 34, 36): 'np.r_[0, counts]'}, {}), '(np.r_[0, counts])', True, 'import numpy as np\n'), ((44, 12, 44, 57), 'numpy.setdiff1d', 'np.setdiff1d', (), '', True, 'import numpy as np\n'), ((60, 19, 60, 43), 'numpy.zeros', 'np.zeros', ({(60, 28, 60, 42): 'vertices.shape'}, {}), '(vertices.shape)', True, 'import numpy as np\n'), ((63, 8, 63, 49), 'numpy.sqrt', 'np.sqrt', ({(63, 16, 63, 48): 'e0[:, (0)] ** 2 + e0[:, (1)] ** 2 + e0[:, (2)] ** 2'}, {}), '(e0[:, (0)] ** 2 + e0[:, (1)] ** 2 + e0[:, (2)] ** 2)', True, 'import numpy as np\n'), ((64, 8, 64, 49), 'numpy.sqrt', 'np.sqrt', ({(64, 16, 64, 48): 'e1[:, (0)] ** 2 + e1[:, (1)] ** 2 + e1[:, (2)] ** 2'}, {}), '(e1[:, (0)] ** 2 + e1[:, (1)] ** 2 + e1[:, (2)] ** 2)', True, 'import numpy as np\n'), ((65, 8, 65, 49), 'numpy.sqrt', 'np.sqrt', ({(65, 16, 65, 48): 'e2[:, (0)] ** 2 + e2[:, (1)] ** 2 + e2[:, (2)] ** 2'}, {}), '(e2[:, (0)] ** 2 + e2[:, (1)] ** 2 + e2[:, (2)] ** 2)', True, 'import numpy as np\n'), ((82, 24, 82, 88), 'numpy.bincount', 'np.bincount', (), '', True, 'import numpy as np\n'), ((85, 24, 85, 88), 'numpy.bincount', 'np.bincount', (), '', True, 'import numpy as np\n'), ((88, 24, 88, 88), 'numpy.bincount', 'np.bincount', (), '', True, 'import numpy as np\n'), ((108, 15, 108, 29), 'numpy.cross', 'np.cross', ({(108, 24, 108, 25): 'x', (108, 27, 108, 28): 'y'}, {}), '(x, y)', True, 'import numpy as np\n'), ((36, 29, 36, 52), 'numpy.sort', 'np.sort', ({(36, 37, 36, 51): 'tri_idx[v1:v2]'}, {}), '(tri_idx[v1:v2])', True, 'import numpy as np\n')] |
tethys-platform/tethys | tests/unit/core/streams/test_stream_zero.py | c27daf5a832b05f9d771b04355001c331bc08766 | # Copyright 2020 Konstruktor, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import platform
import time
from unittest import mock
from unittest.mock import patch, call
from pytest import fixture
from tethys.core.pipes.pipe_zero import ZeroPipe
from tethys.core.sessions.sess_zero import ZeroSession
from tethys.core.stations.station_zero import ZeroStation
from tethys.core.streams.stream_zero import ZeroStream
from tethys.core.transports.transport_zero import ZeroTransport
class MockTransport(ZeroTransport):
def __init__(self):
pass
connect = mock.MagicMock()
disconnect = mock.MagicMock()
class MockSession(ZeroSession):
closing_mode = None
def __init__(self):
self._closed = False
@property
def closed(self):
return self._closed
class MockStation(ZeroStation):
def __init__(self):
pass
class TestZeroStream:
@staticmethod
def teardown_method():
MockTransport.connect.reset_mock()
MockTransport.disconnect.reset_mock()
@fixture
def pipe(self):
pipe = mock.MagicMock(spec=ZeroPipe)
return pipe
@fixture
def session(self):
session = MockSession()
return session
@fixture
def transport(self):
return MockTransport()
@fixture
def station(self):
return MockStation()
@fixture
def stream(self, pipe, session, transport):
return ZeroStream(pipe, session, transport)
# init
def test_init_with_transport_cb(self, pipe, session, transport):
def get_transport(_):
return transport
get_transport = mock.MagicMock(side_effect=get_transport)
stream = ZeroStream(pipe, session, get_transport)
assert stream.transport == transport
# conn context
def test_new_connection_context(self, stream):
with stream.connection_context():
MockTransport.connect.assert_called_once_with(stream)
MockTransport.disconnect.assert_not_called()
MockTransport.disconnect.assert_called_once_with(stream)
def test_old_connection_context(self, stream):
MockTransport._connections[stream.id] = stream
with stream.connection_context():
MockTransport.connect.assert_not_called()
MockTransport.disconnect.assert_not_called()
# heartbeat
def test_heartbeat_fail_delay(self, stream):
assert stream.heartbeat_fail_delay == stream.DEFAULT_HEARTBEAT_FAIL_DELAY
stream.station = mock.MagicMock(spec=ZeroStation)
stream.station.heartbeat_fail_delay = 0
assert stream.heartbeat_fail_delay == stream.DEFAULT_HEARTBEAT_FAIL_DELAY
stream.station.heartbeat_fail_delay = 12345
assert stream.heartbeat_fail_delay == 12345
def test_busy_false(self, stream):
stream.refresh = mock.MagicMock()
stream.station = mock.MagicMock(spec=ZeroStation)
stream.station.heartbeat_fail_delay = 1
stream.heartbeat_ts = time.time() - 10
assert stream.is_busy is False
assert stream.refresh.call_count == 1
def test_busy_true(self, stream):
stream.refresh = mock.MagicMock()
stream.station = mock.MagicMock(spec=ZeroStation)
stream.station.heartbeat_fail_delay = 1000
stream.heartbeat_ts = time.time()
assert stream.is_busy is True
assert stream.refresh.call_count == 1
def test_heartbeat(self, stream):
stream.save = mock.MagicMock()
with patch("time.time", lambda: 12345):
stream.heartbeat()
assert stream.heartbeat_ts == 12345
stream.save.assert_called_once_with(save_dependency=False)
# open
def test_open(self, stream):
stream.save = mock.MagicMock()
stream.closed = True
assert stream.open() is stream
assert stream.closed is False
stream.save.assert_called_once_with(save_dependency=False)
def test_open_no_commit(self, stream):
stream.save = mock.MagicMock()
stream.closed = True
assert stream.open(save=False) is stream
assert stream.closed is False
stream.save.assert_not_called()
# close
def test_close(self, stream):
stream.save = mock.MagicMock()
assert stream.close() is stream
assert stream.closed is True
stream.save.assert_called_once_with(save_dependency=False)
def test_close_no_commit(self, stream):
stream.save = mock.MagicMock()
assert stream.close(save=False) is stream
assert stream.closed is True
stream.save.assert_not_called()
# read
def test_read(self, stream):
data = ["packet", 0, {}, "", None] + [None, "packet"] * 5
result_data = list(filter(lambda x: x is not None, data))
iter_data = iter(data)
def recv_cb(*_, **__):
try:
return next(iter_data)
except StopIteration:
return ...
connection_context = mock.MagicMock()
stream.connection_context = mock.MagicMock(
side_effect=lambda: connection_context
)
stream.transport.recv = mock.MagicMock(side_effect=recv_cb)
result = []
for item in stream.read(test_kw=1):
if item is ...:
break
result.append(item)
if platform.python_implementation().lower() == "pypy":
gc.collect()
assert result == result_data
assert stream.connection_context.call_count == 1
assert connection_context.__enter__.call_count == 1
assert connection_context.__exit__.call_count == 1
stream.transport.recv.assert_has_calls(
[call(stream, wait_timeout=None, test_kw=1) for _ in data]
)
def test_read_n_packets(self, stream):
iter_data = iter([None, "packet"] + ["packet"] * 10)
def recv_cb(*_, **__):
try:
return next(iter_data)
except StopIteration:
return ...
connection_context = mock.MagicMock()
stream.connection_context = mock.MagicMock(
side_effect=lambda: connection_context
)
stream.transport.recv = mock.MagicMock(side_effect=recv_cb)
result = []
for item in stream.read(count=5, test_kw=1):
if item is ...:
break
result.append(item)
assert result == ["packet"] * 5
assert stream.connection_context.call_count == 1
assert connection_context.__enter__.call_count == 1
assert connection_context.__exit__.call_count == 1
stream.transport.recv.assert_has_calls(
[call(stream, wait_timeout=None, test_kw=1) for _ in range(6)]
)
def test_read_while_stream_open(self, stream):
iter_data = iter(range(10))
def recv_cb(*_, **__):
try:
return next(iter_data)
except StopIteration:
return ...
connection_context = mock.MagicMock()
stream.connection_context = mock.MagicMock(
side_effect=lambda: connection_context
)
stream.transport.recv = mock.MagicMock(side_effect=recv_cb)
result = []
for item in stream.read(test_kw=1):
if item == 4:
stream.closed = True
if item is ...:
break
result.append(item)
assert result == list(range(5))
assert stream.connection_context.call_count == 1
assert connection_context.__enter__.call_count == 1
assert connection_context.__exit__.call_count == 1
stream.transport.recv.assert_has_calls(
[call(stream, wait_timeout=None, test_kw=1) for _ in range(5)]
)
def test_read_while_sess_open(self, stream):
stream.session._closed = True
iter_data = iter([0, 1, 2, 3, None, 4])
def recv_cb(*_, **__):
try:
return next(iter_data)
except StopIteration:
return ...
connection_context = mock.MagicMock()
stream.connection_context = mock.MagicMock(
side_effect=lambda: connection_context
)
stream.transport.recv = mock.MagicMock(side_effect=recv_cb)
result = []
for item in stream.read(test_kw=1):
if item is ...:
break
result.append(item)
assert result == list(range(4))
assert stream.connection_context.call_count == 1
assert connection_context.__enter__.call_count == 1
assert connection_context.__exit__.call_count == 1
stream.transport.recv.assert_has_calls(
[call(stream, wait_timeout=None, test_kw=1) for _ in range(5)]
)
def test_read_when_station_changed(self, stream, station):
iter_data = iter(range(10))
def recv_cb(*_, **__):
try:
return next(iter_data)
except StopIteration:
return ...
connection_context = mock.MagicMock()
stream.connection_context = mock.MagicMock(
side_effect=lambda: connection_context
)
stream.transport.recv = mock.MagicMock(side_effect=recv_cb)
result = []
for item in stream.read(test_kw=1):
if item == 4:
stream.station = station
if item is ...:
break
result.append(item)
assert result == list(range(5))
assert stream.connection_context.call_count == 1
assert connection_context.__enter__.call_count == 1
assert connection_context.__exit__.call_count == 1
stream.transport.recv.assert_has_calls(
[call(stream, wait_timeout=None, test_kw=1) for _ in range(5)]
)
def test_read_none(self, stream):
iter_data = iter([None, "packet"] + ["packet"] * 10)
def recv_cb(*_, **__):
try:
return next(iter_data)
except StopIteration:
return ...
connection_context = mock.MagicMock()
stream.connection_context = mock.MagicMock(
side_effect=lambda: connection_context
)
stream.transport.recv = mock.MagicMock(side_effect=recv_cb)
result = []
for item in stream.read(wait_timeout=1, test_kw=1):
if item is ...:
break
result.append(item)
assert result == []
assert stream.connection_context.call_count == 1
assert connection_context.__enter__.call_count == 1
assert connection_context.__exit__.call_count == 1
stream.transport.recv.assert_called_once_with(stream, wait_timeout=1, test_kw=1)
# write
def test_write(self, stream):
connection_context = mock.MagicMock()
stream.connection_context = mock.MagicMock(
side_effect=lambda: connection_context
)
stream.transport.send = mock.MagicMock()
stream.write("packet", test_kw=1)
stream.transport.send.assert_called_once_with(stream, "packet", test_kw=1)
assert stream.connection_context.call_count == 1
assert connection_context.__enter__.call_count == 1
assert connection_context.__exit__.call_count == 1
def test_write_many(self, stream):
connection_context = mock.MagicMock()
stream.connection_context = mock.MagicMock(
side_effect=lambda: connection_context
)
stream.transport.send = mock.MagicMock()
stream.write("packet", many=True, test_kw=1)
stream.transport.send.assert_has_calls(
[call(stream, i, test_kw=1) for i in "packet"]
)
assert stream.connection_context.call_count == 1
assert connection_context.__enter__.call_count == 1
assert connection_context.__exit__.call_count == 1
def test_write_when_closed(self, stream):
connection_context = mock.MagicMock()
stream.connection_context = mock.MagicMock(
side_effect=lambda: connection_context
)
stream.transport.send = mock.MagicMock()
stream.closed = True
stream.write("packet", test_kw=1)
stream.transport.send.assert_not_called()
stream.connection_context.assert_not_called()
connection_context.__enter__.assert_not_called()
connection_context.__exit__.assert_not_called()
def test_write_out(self, stream):
connection_context = mock.MagicMock()
stream.connection_context = mock.MagicMock(
side_effect=lambda: connection_context
)
stream.transport.send = mock.MagicMock()
stream.closed = True
stream.pipe.node_b = "<out>"
stream.write("packet", test_kw=1)
stream.transport.send.assert_called_once_with(stream, "packet", test_kw=1)
assert stream.connection_context.call_count == 1
assert connection_context.__enter__.call_count == 1
assert connection_context.__exit__.call_count == 1
# ack
def test_ack(self, stream):
stream.transport.ack = mock.MagicMock()
stream.ack("message", test_kw=1)
stream.transport.ack.assert_called_once_with(stream, "message", test_kw=1)
def test_ack_closed(self, stream):
stream.closed = True
stream.transport.ack = mock.MagicMock()
stream.ack("message", test_kw=1)
stream.transport.ack.assert_not_called()
# redirect
def test_redirect(self, stream, station):
station.save = mock.MagicMock()
station.stream_lock_ttl = 0
stream.save = mock.MagicMock()
stream.redirect_to(station)
assert stream.station == station
station.save.assert_called_once_with(save_dependency=False)
stream.save.assert_called_once_with(save_dependency=False)
# open/close context
def test_context(self, stream):
stream.open = mock.MagicMock()
stream.close = mock.MagicMock()
with stream:
stream.open.assert_called_once_with(save=False)
stream.close.assert_not_called()
stream.close.assert_called_once_with(save=False)
| [((33, 14, 33, 30), 'unittest.mock.MagicMock', 'mock.MagicMock', ({}, {}), '()', False, 'from unittest import mock\n'), ((34, 17, 34, 33), 'unittest.mock.MagicMock', 'mock.MagicMock', ({}, {}), '()', False, 'from unittest import mock\n'), ((61, 15, 61, 44), 'unittest.mock.MagicMock', 'mock.MagicMock', (), '', False, 'from unittest import mock\n'), ((79, 15, 79, 51), 'tethys.core.streams.stream_zero.ZeroStream', 'ZeroStream', ({(79, 26, 79, 30): 'pipe', (79, 32, 79, 39): 'session', (79, 41, 79, 50): 'transport'}, {}), '(pipe, session, transport)', False, 'from tethys.core.streams.stream_zero import ZeroStream\n'), ((87, 24, 87, 65), 'unittest.mock.MagicMock', 'mock.MagicMock', (), '', False, 'from unittest import mock\n'), ((89, 17, 89, 57), 'tethys.core.streams.stream_zero.ZeroStream', 'ZeroStream', ({(89, 28, 89, 32): 'pipe', (89, 34, 89, 41): 'session', (89, 43, 89, 56): 'get_transport'}, {}), '(pipe, session, get_transport)', False, 'from tethys.core.streams.stream_zero import ZeroStream\n'), ((113, 25, 113, 57), 'unittest.mock.MagicMock', 'mock.MagicMock', (), '', False, 'from unittest import mock\n'), ((121, 25, 121, 41), 'unittest.mock.MagicMock', 'mock.MagicMock', ({}, {}), '()', False, 'from unittest import mock\n'), ((122, 25, 122, 57), 'unittest.mock.MagicMock', 'mock.MagicMock', (), '', False, 'from unittest import mock\n'), ((130, 25, 130, 41), 'unittest.mock.MagicMock', 'mock.MagicMock', ({}, {}), '()', False, 'from unittest import mock\n'), ((131, 25, 131, 57), 'unittest.mock.MagicMock', 'mock.MagicMock', (), '', False, 'from unittest import mock\n'), ((133, 30, 133, 41), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((139, 22, 139, 38), 'unittest.mock.MagicMock', 'mock.MagicMock', ({}, {}), '()', False, 'from unittest import mock\n'), ((150, 22, 150, 38), 'unittest.mock.MagicMock', 'mock.MagicMock', ({}, {}), '()', False, 'from unittest import mock\n'), ((158, 22, 158, 38), 'unittest.mock.MagicMock', 'mock.MagicMock', ({}, {}), '()', False, 'from unittest import mock\n'), ((168, 22, 168, 38), 'unittest.mock.MagicMock', 'mock.MagicMock', ({}, {}), '()', False, 'from unittest import mock\n'), ((175, 22, 175, 38), 'unittest.mock.MagicMock', 'mock.MagicMock', ({}, {}), '()', False, 'from unittest import mock\n'), ((194, 29, 194, 45), 'unittest.mock.MagicMock', 'mock.MagicMock', ({}, {}), '()', False, 'from unittest import mock\n'), ((195, 36, 197, 9), 'unittest.mock.MagicMock', 'mock.MagicMock', (), '', False, 'from unittest import mock\n'), ((198, 32, 198, 67), 'unittest.mock.MagicMock', 'mock.MagicMock', (), '', False, 'from unittest import mock\n'), ((227, 29, 227, 45), 'unittest.mock.MagicMock', 'mock.MagicMock', ({}, {}), '()', False, 'from unittest import mock\n'), ((228, 36, 230, 9), 'unittest.mock.MagicMock', 'mock.MagicMock', (), '', False, 'from unittest import mock\n'), ((231, 32, 231, 67), 'unittest.mock.MagicMock', 'mock.MagicMock', (), '', False, 'from unittest import mock\n'), ((257, 29, 257, 45), 'unittest.mock.MagicMock', 'mock.MagicMock', ({}, {}), '()', False, 'from unittest import mock\n'), ((258, 36, 260, 9), 'unittest.mock.MagicMock', 'mock.MagicMock', (), '', False, 'from unittest import mock\n'), ((261, 32, 261, 67), 'unittest.mock.MagicMock', 'mock.MagicMock', (), '', False, 'from unittest import mock\n'), ((293, 29, 293, 45), 'unittest.mock.MagicMock', 'mock.MagicMock', ({}, {}), '()', False, 'from unittest import mock\n'), ((294, 36, 296, 9), 'unittest.mock.MagicMock', 'mock.MagicMock', (), '', False, 'from unittest import mock\n'), ((297, 32, 297, 67), 'unittest.mock.MagicMock', 'mock.MagicMock', (), '', False, 'from unittest import mock\n'), ((324, 29, 324, 45), 'unittest.mock.MagicMock', 'mock.MagicMock', ({}, {}), '()', False, 'from unittest import mock\n'), ((325, 36, 327, 9), 'unittest.mock.MagicMock', 'mock.MagicMock', (), '', False, 'from unittest import mock\n'), ((328, 32, 328, 67), 'unittest.mock.MagicMock', 'mock.MagicMock', (), '', False, 'from unittest import mock\n'), ((358, 29, 358, 45), 'unittest.mock.MagicMock', 'mock.MagicMock', ({}, {}), '()', False, 'from unittest import mock\n'), ((359, 36, 361, 9), 'unittest.mock.MagicMock', 'mock.MagicMock', (), '', False, 'from unittest import mock\n'), ((362, 32, 362, 67), 'unittest.mock.MagicMock', 'mock.MagicMock', (), '', False, 'from unittest import mock\n'), ((380, 29, 380, 45), 'unittest.mock.MagicMock', 'mock.MagicMock', ({}, {}), '()', False, 'from unittest import mock\n'), ((381, 36, 383, 9), 'unittest.mock.MagicMock', 'mock.MagicMock', (), '', False, 'from unittest import mock\n'), ((384, 32, 384, 48), 'unittest.mock.MagicMock', 'mock.MagicMock', ({}, {}), '()', False, 'from unittest import mock\n'), ((394, 29, 394, 45), 'unittest.mock.MagicMock', 'mock.MagicMock', ({}, {}), '()', False, 'from unittest import mock\n'), ((395, 36, 397, 9), 'unittest.mock.MagicMock', 'mock.MagicMock', (), '', False, 'from unittest import mock\n'), ((398, 32, 398, 48), 'unittest.mock.MagicMock', 'mock.MagicMock', ({}, {}), '()', False, 'from unittest import mock\n'), ((410, 29, 410, 45), 'unittest.mock.MagicMock', 'mock.MagicMock', ({}, {}), '()', False, 'from unittest import mock\n'), ((411, 36, 413, 9), 'unittest.mock.MagicMock', 'mock.MagicMock', (), '', False, 'from unittest import mock\n'), ((414, 32, 414, 48), 'unittest.mock.MagicMock', 'mock.MagicMock', ({}, {}), '()', False, 'from unittest import mock\n'), ((425, 29, 425, 45), 'unittest.mock.MagicMock', 'mock.MagicMock', ({}, {}), '()', False, 'from unittest import mock\n'), ((426, 36, 428, 9), 'unittest.mock.MagicMock', 'mock.MagicMock', (), '', False, 'from unittest import mock\n'), ((429, 32, 429, 48), 'unittest.mock.MagicMock', 'mock.MagicMock', ({}, {}), '()', False, 'from unittest import mock\n'), ((443, 31, 443, 47), 'unittest.mock.MagicMock', 'mock.MagicMock', ({}, {}), '()', False, 'from unittest import mock\n'), ((451, 31, 451, 47), 'unittest.mock.MagicMock', 'mock.MagicMock', ({}, {}), '()', False, 'from unittest import mock\n'), ((460, 23, 460, 39), 'unittest.mock.MagicMock', 'mock.MagicMock', ({}, {}), '()', False, 'from unittest import mock\n'), ((463, 22, 463, 38), 'unittest.mock.MagicMock', 'mock.MagicMock', ({}, {}), '()', False, 'from unittest import mock\n'), ((474, 22, 474, 38), 'unittest.mock.MagicMock', 'mock.MagicMock', ({}, {}), '()', False, 'from unittest import mock\n'), ((475, 23, 475, 39), 'unittest.mock.MagicMock', 'mock.MagicMock', ({}, {}), '()', False, 'from unittest import mock\n'), ((124, 30, 124, 41), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((141, 13, 141, 46), 'unittest.mock.patch', 'patch', ({(141, 19, 141, 30): '"""time.time"""', (141, 32, 141, 45): '(lambda : 12345)'}, {}), "('time.time', lambda : 12345)", False, 'from unittest.mock import patch, call\n'), ((208, 12, 208, 24), 'gc.collect', 'gc.collect', ({}, {}), '()', False, 'import gc\n'), ((215, 13, 215, 55), 'unittest.mock.call', 'call', (), '', False, 'from unittest.mock import patch, call\n'), ((245, 13, 245, 55), 'unittest.mock.call', 'call', (), '', False, 'from unittest.mock import patch, call\n'), ((279, 13, 279, 55), 'unittest.mock.call', 'call', (), '', False, 'from unittest.mock import patch, call\n'), ((312, 13, 312, 55), 'unittest.mock.call', 'call', (), '', False, 'from unittest.mock import patch, call\n'), ((346, 13, 346, 55), 'unittest.mock.call', 'call', (), '', False, 'from unittest.mock import patch, call\n'), ((403, 13, 403, 39), 'unittest.mock.call', 'call', (), '', False, 'from unittest.mock import patch, call\n'), ((207, 11, 207, 43), 'platform.python_implementation', 'platform.python_implementation', ({}, {}), '()', False, 'import platform\n')] |
qiyancos/Simics-3.0.31 | amd64-linux/lib/ppc64_simple_components.py | 9bd52d5abad023ee87a37306382a338abf7885f1 | ## Copyright 2005-2007 Virtutech AB
##
## The contents herein are Source Code which are a subset of Licensed
## Software pursuant to the terms of the Virtutech Simics Software
## License Agreement (the "Agreement"), and are being distributed under
## the Agreement. You should have received a copy of the Agreement with
## this Licensed Software; if not, please contact Virtutech for a copy
## of the Agreement prior to using this Licensed Software.
##
## By using this Source Code, you agree to be bound by all of the terms
## of the Agreement, and use of this Source Code is subject to the terms
## the Agreement.
##
## This Source Code and any derivatives thereof are provided on an "as
## is" basis. Virtutech makes no warranties with respect to the Source
## Code or any derivatives thereof and disclaims all implied warranties,
## including, without limitation, warranties of merchantability and
## fitness for a particular purpose and non-infringement.
from sim_core import *
from components import *
import time
# Generic Simple System for PPC64 Processors
class ppc64_simple_base_component(component_object):
basename = 'system'
connectors = {
'uart0' : {'type' : 'serial', 'direction' : 'down',
'empty_ok' : True, 'hotplug' : True, 'multi' : False},
'uart1' : {'type' : 'serial', 'direction' : 'down',
'empty_ok' : True, 'hotplug' : True, 'multi' : False}}
def __init__(self, parse_obj):
component_object.__init__(self, parse_obj)
self.o.cpu = []
self.map_offset = 0xf0000000
self.time_of_day = "2006-06-06 06:06:06 UTC"
def get_cpu_frequency(self, idx):
return self.freq_mhz
def set_cpu_frequency(self, val, idx):
if self.obj.configured:
return Sim_Set_Illegal_Value
self.freq_mhz = val
return Sim_Set_Ok
def get_memory_megs(self, idx):
return self.memory_megs
def set_memory_megs(self, val, idx):
if self.obj.configured:
return Sim_Set_Illegal_Value
self.memory_megs = val
return Sim_Set_Ok
def get_map_offset(self, idx):
return self.map_offset
def set_map_offset(self, val, idx):
if self.obj.configured:
return Sim_Set_Illegal_Value
self.map_offset = val
return Sim_Set_Ok
def get_time_of_day(self, idx):
return self.time_of_day
def set_time_of_day(self, val, idx):
if self.obj.configured:
return Sim_Set_Illegal_Value
try:
time.strptime(val, "%Y-%m-%d %H:%M:%S %Z")
except Exception, msg:
SIM_attribute_error(str(msg))
return Sim_Set_Illegal_Value
self.time_of_day = val
return Sim_Set_Ok
def add_objects(self, cpu):
self.o.phys_mem = pre_obj('phys_mem', 'memory-space')
self.o.ram_image = pre_obj('memory_image', 'image')
self.o.ram_image.size = self.memory_megs * 0x100000
self.o.ram = pre_obj('memory', 'ram')
self.o.ram.image = self.o.ram_image
self.o.pic = pre_obj('pic$', 'open-pic')
self.o.pic.irq_devs = [cpu]
self.o.irq = pre_obj('irq$', 'i8259x2')
self.o.irq.irq_dev = self.o.pic
self.o.uart0 = pre_obj('uart0', 'NS16550')
self.o.uart0.irq_dev = self.o.irq
self.o.uart0.irq_level = 4
self.o.uart0.xmit_time = 1000
self.o.uart1 = pre_obj('uart1', 'NS16550')
self.o.uart1.irq_dev = self.o.irq
self.o.uart1.irq_level = 3
self.o.uart1.xmit_time = 1000
self.o.of = pre_obj('of', 'ppc-of')
self.o.of.cpu = self.o.cpu[0]
self.o.of.memory_megs = self.memory_megs
self.o.of.entry_point = 0x7000000
self.o.of.map_offset = self.map_offset
self.o.of.time_of_day = self.time_of_day
self.o.broadcast_bus = pre_obj('broadcast_bus', 'ppc-broadcast-bus')
self.o.empty = pre_obj('empty', 'empty-device')
self.o.pci_io = pre_obj('pci_io', 'memory-space')
self.o.hfs = pre_obj('hfs$', 'hostfs')
self.o.phys_mem.map = [
[0x00000000, self.o.ram, 0, 0x0, self.memory_megs * 0x100000],
[self.map_offset + 0x08000000, self.o.pci_io, 0, 0x0, 0x100000],
[self.map_offset + 0x0f660000, self.o.hfs, 0, 0, 0x10],
[self.map_offset + 0x0fc00000, self.o.pic, 0, 0x0, 0x100000]]
self.o.pci_io.map = [
[0x020, self.o.irq, 0, 0x20, 0x1],
[0x021, self.o.irq, 0, 0x21, 0x1],
[0x0a0, self.o.irq, 0, 0xa0, 0x1],
[0x0a1, self.o.irq, 0, 0xa1, 0x1],
# Linux probes for UARTs at 0x2e8 and 0x3e8 too, so provide
# empty mappings there
[0x2e8, self.o.empty, 0, 0x0, 0x8],
# two NS16550, at the traditional addresses
[0x2f8, self.o.uart1, 0, 0x0, 0x8, None, 0, 1],
[0x3e8, self.o.empty, 0, 0x0, 0x8],
[0x3f8, self.o.uart0, 0, 0x0, 0x8, None, 0, 1],
# no UARTs here either
[0x890, self.o.empty, 0, 0x0, 0x8],
[0x898, self.o.empty, 0, 0x0, 0x8]]
def add_connector_info(self):
self.connector_info['uart0'] = [None, self.o.uart0, self.o.uart0.name]
self.connector_info['uart1'] = [None, self.o.uart1, self.o.uart1.name]
def connect_serial(self, connector, link, console):
if connector == 'uart0':
if link:
self.o.uart0.link = link
else:
self.o.uart0.console = console
elif connector == 'uart1':
if link:
self.o.uart1.link = link
else:
self.o.uart1.console = console
def disconnect_serial(self, connector):
if connector == 'uart0':
self.o.uart0.link = None
self.o.uart0.console = None
elif connector == 'uart1':
self.o.uart1.link = None
self.o.uart1.console = None
def get_clock(self):
return self.o.cpu[0]
def get_processors(self):
return self.o.cpu
ppc64_simple_attributes = [
['cpu_frequency', Sim_Attr_Required, 'f',
'Processor frequency in MHz.'],
['memory_megs', Sim_Attr_Required, 'i',
'The amount of RAM in megabytes.'],
['map_offset', Sim_Attr_Optional, 'i',
'Base address for device mappings. ' \
'Offsets at 4 GB and above will not work'],
['time_of_day', Sim_Attr_Optional, 's',
'Date and time to initialize the OpenFirmware RTC to']]
| [] |
shadow0403bsr/AutomatedGradingSoftware | Front-end (Django)/course/migrations/0002_subject_number_of_questions.py | 5031d22683a05f937615b3b8997152c285a2f930 | # Generated by Django 3.0.1 on 2020-02-15 06:02
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('course', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='subject',
name='Number_Of_Questions',
field=models.IntegerField(default=0),
),
]
| [((16, 18, 16, 48), 'django.db.models.IntegerField', 'models.IntegerField', (), '', False, 'from django.db import migrations, models\n')] |
pchtsp/corn | cornflow/tests/unit/test_dags.py | 2811ad400f3f3681a159984eabf4fee1fc99b433 | """
Unit test for the DAG endpoints
"""
# Import from libraries
import json
# Import from internal modules
from cornflow.shared.const import EXEC_STATE_CORRECT, EXEC_STATE_MANUAL
from cornflow.tests.const import (
DAG_URL,
EXECUTION_URL_NORUN,
CASE_PATH,
INSTANCE_URL,
)
from cornflow.tests.unit.test_executions import TestExecutionsDetailEndpointMock
class TestDagEndpoint(TestExecutionsDetailEndpointMock):
def test_manual_dag_service_user(self):
with open(CASE_PATH) as f:
payload = json.load(f)
data = dict(
data=payload["data"],
state=EXEC_STATE_MANUAL,
)
payload_to_send = {**self.payload, **data}
token = self.create_service_user()
self.items_to_check = [
"config",
"name",
"description",
"schema",
"instance_id",
"state",
]
idx = self.create_new_row(
url=DAG_URL,
model=self.model,
payload=payload_to_send,
check_payload=True,
token=token,
)
def test_manual_dag_planner_user(self):
with open(CASE_PATH) as f:
payload = json.load(f)
data = dict(
data=payload["data"],
state=EXEC_STATE_MANUAL,
)
payload_to_send = {**self.payload, **data}
token = self.create_planner()
self.items_to_check = [
"config",
"name",
"description",
"schema",
"instance_id",
"state",
]
idx = self.create_new_row(
url=DAG_URL,
model=self.model,
payload=payload_to_send,
check_payload=True,
token=token,
)
class TestDagDetailEndpoint(TestExecutionsDetailEndpointMock):
def test_put_dag(self):
idx = self.create_new_row(EXECUTION_URL_NORUN, self.model, self.payload)
with open(CASE_PATH) as f:
payload = json.load(f)
data = dict(
data=payload["data"],
state=EXEC_STATE_CORRECT,
)
payload_to_check = {**self.payload, **data}
token = self.create_service_user()
data = self.update_row(
url=DAG_URL + idx + "/",
payload_to_check=payload_to_check,
change=data,
token=token,
check_payload=False,
)
def test_get_dag(self):
idx = self.create_new_row(EXECUTION_URL_NORUN, self.model, self.payload)
token = self.create_service_user()
data = self.get_one_row(
url=DAG_URL + idx + "/",
token=token,
check_payload=False,
payload=self.payload,
)
instance_data = self.get_one_row(
url=INSTANCE_URL + self.payload["instance_id"] + "/data/",
payload=dict(),
check_payload=False,
)
self.assertEqual(data["data"], instance_data["data"])
self.assertEqual(data["config"], self.payload["config"])
return
| [((22, 22, 22, 34), 'json.load', 'json.load', ({(22, 32, 22, 33): 'f'}, {}), '(f)', False, 'import json\n'), ((49, 22, 49, 34), 'json.load', 'json.load', ({(49, 32, 49, 33): 'f'}, {}), '(f)', False, 'import json\n'), ((79, 22, 79, 34), 'json.load', 'json.load', ({(79, 32, 79, 33): 'f'}, {}), '(f)', False, 'import json\n')] |
xwshi/faster-rcnn-keras | nets/resnet.py | bfd99e3d0e786ada75a212c007111364b2c86312 | #-------------------------------------------------------------#
# ResNet50的网络部分
#-------------------------------------------------------------#
import keras.backend as K
from keras import backend as K
from keras import initializers, layers, regularizers
from keras.engine import InputSpec, Layer
from keras.initializers import random_normal
from keras.layers import (Activation, Add, AveragePooling2D, Conv2D, MaxPooling2D, TimeDistributed,
ZeroPadding2D)
class BatchNormalization(Layer):
def __init__(self, epsilon=1e-3, axis=-1,
weights=None, beta_init='zero', gamma_init='one',
gamma_regularizer=None, beta_regularizer=None, **kwargs):
self.supports_masking = True
self.beta_init = initializers.get(beta_init)
self.gamma_init = initializers.get(gamma_init)
self.epsilon = epsilon
self.axis = axis
self.gamma_regularizer = regularizers.get(gamma_regularizer)
self.beta_regularizer = regularizers.get(beta_regularizer)
self.initial_weights = weights
super(BatchNormalization, self).__init__(**kwargs)
def build(self, input_shape):
self.input_spec = [InputSpec(shape=input_shape)]
shape = (input_shape[self.axis],)
self.gamma = self.add_weight(shape,
initializer=self.gamma_init,
regularizer=self.gamma_regularizer,
name='{}_gamma'.format(self.name),
trainable=False)
self.beta = self.add_weight(shape,
initializer=self.beta_init,
regularizer=self.beta_regularizer,
name='{}_beta'.format(self.name),
trainable=False)
self.running_mean = self.add_weight(shape, initializer='zero',
name='{}_running_mean'.format(self.name),
trainable=False)
self.running_std = self.add_weight(shape, initializer='one',
name='{}_running_std'.format(self.name),
trainable=False)
if self.initial_weights is not None:
self.set_weights(self.initial_weights)
del self.initial_weights
self.built = True
def call(self, x, mask=None):
assert self.built, 'Layer must be built before being called'
input_shape = K.int_shape(x)
reduction_axes = list(range(len(input_shape)))
del reduction_axes[self.axis]
broadcast_shape = [1] * len(input_shape)
broadcast_shape[self.axis] = input_shape[self.axis]
if sorted(reduction_axes) == range(K.ndim(x))[:-1]:
x_normed = K.batch_normalization(
x, self.running_mean, self.running_std,
self.beta, self.gamma,
epsilon=self.epsilon)
else:
broadcast_running_mean = K.reshape(self.running_mean, broadcast_shape)
broadcast_running_std = K.reshape(self.running_std, broadcast_shape)
broadcast_beta = K.reshape(self.beta, broadcast_shape)
broadcast_gamma = K.reshape(self.gamma, broadcast_shape)
x_normed = K.batch_normalization(
x, broadcast_running_mean, broadcast_running_std,
broadcast_beta, broadcast_gamma,
epsilon=self.epsilon)
return x_normed
def get_config(self):
config = {'epsilon': self.epsilon,
'axis': self.axis,
'gamma_regularizer': self.gamma_regularizer.get_config() if self.gamma_regularizer else None,
'beta_regularizer': self.beta_regularizer.get_config() if self.beta_regularizer else None}
base_config = super(BatchNormalization, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def identity_block(input_tensor, kernel_size, filters, stage, block):
filters1, filters2, filters3 = filters
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
x = Conv2D(filters1, (1, 1), kernel_initializer=random_normal(stddev=0.02), name=conv_name_base + '2a')(input_tensor)
x = BatchNormalization(name=bn_name_base + '2a')(x)
x = Activation('relu')(x)
x = Conv2D(filters2, kernel_size, padding='same', kernel_initializer=random_normal(stddev=0.02), name=conv_name_base + '2b')(x)
x = BatchNormalization(name=bn_name_base + '2b')(x)
x = Activation('relu')(x)
x = Conv2D(filters3, (1, 1), kernel_initializer=random_normal(stddev=0.02), name=conv_name_base + '2c')(x)
x = BatchNormalization(name=bn_name_base + '2c')(x)
x = layers.add([x, input_tensor])
x = Activation('relu')(x)
return x
def conv_block(input_tensor, kernel_size, filters, stage, block, strides=(2, 2)):
filters1, filters2, filters3 = filters
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
x = Conv2D(filters1, (1, 1), strides=strides, kernel_initializer=random_normal(stddev=0.02),
name=conv_name_base + '2a')(input_tensor)
x = BatchNormalization(name=bn_name_base + '2a')(x)
x = Activation('relu')(x)
x = Conv2D(filters2, kernel_size, padding='same', kernel_initializer=random_normal(stddev=0.02),
name=conv_name_base + '2b')(x)
x = BatchNormalization(name=bn_name_base + '2b')(x)
x = Activation('relu')(x)
x = Conv2D(filters3, (1, 1), kernel_initializer=random_normal(stddev=0.02),
name=conv_name_base + '2c')(x)
x = BatchNormalization(name=bn_name_base + '2c')(x)
shortcut = Conv2D(filters3, (1, 1), strides=strides, kernel_initializer=random_normal(stddev=0.02),
name=conv_name_base + '1')(input_tensor)
shortcut = BatchNormalization(name=bn_name_base + '1')(shortcut)
x = layers.add([x, shortcut])
x = Activation('relu')(x)
return x
def ResNet50(inputs):
#-----------------------------------#
# 假设输入进来的图片是600,600,3
#-----------------------------------#
img_input = inputs
# 600,600,3 -> 300,300,64
x = ZeroPadding2D((3, 3))(img_input)
x = Conv2D(64, (7, 7), strides=(2, 2), name='conv1')(x)
x = BatchNormalization(name='bn_conv1')(x)
x = Activation('relu')(x)
# 300,300,64 -> 150,150,64
x = MaxPooling2D((3, 3), strides=(2, 2), padding="same")(x)
# 150,150,64 -> 150,150,256
x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1))
x = identity_block(x, 3, [64, 64, 256], stage=2, block='b')
x = identity_block(x, 3, [64, 64, 256], stage=2, block='c')
# 150,150,256 -> 75,75,512
x = conv_block(x, 3, [128, 128, 512], stage=3, block='a')
x = identity_block(x, 3, [128, 128, 512], stage=3, block='b')
x = identity_block(x, 3, [128, 128, 512], stage=3, block='c')
x = identity_block(x, 3, [128, 128, 512], stage=3, block='d')
# 75,75,512 -> 38,38,1024
x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a')
x = identity_block(x, 3, [256, 256, 1024], stage=4, block='b')
x = identity_block(x, 3, [256, 256, 1024], stage=4, block='c')
x = identity_block(x, 3, [256, 256, 1024], stage=4, block='d')
x = identity_block(x, 3, [256, 256, 1024], stage=4, block='e')
x = identity_block(x, 3, [256, 256, 1024], stage=4, block='f')
# 最终获得一个38,38,1024的共享特征层
return x
def identity_block_td(input_tensor, kernel_size, filters, stage, block):
nb_filter1, nb_filter2, nb_filter3 = filters
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
x = TimeDistributed(Conv2D(nb_filter1, (1, 1), kernel_initializer='normal'), name=conv_name_base + '2a')(input_tensor)
x = TimeDistributed(BatchNormalization(), name=bn_name_base + '2a')(x)
x = Activation('relu')(x)
x = TimeDistributed(Conv2D(nb_filter2, (kernel_size, kernel_size), kernel_initializer='normal',padding='same'), name=conv_name_base + '2b')(x)
x = TimeDistributed(BatchNormalization(), name=bn_name_base + '2b')(x)
x = Activation('relu')(x)
x = TimeDistributed(Conv2D(nb_filter3, (1, 1), kernel_initializer='normal'), name=conv_name_base + '2c')(x)
x = TimeDistributed(BatchNormalization(), name=bn_name_base + '2c')(x)
x = Add()([x, input_tensor])
x = Activation('relu')(x)
return x
def conv_block_td(input_tensor, kernel_size, filters, stage, block, strides=(2, 2)):
nb_filter1, nb_filter2, nb_filter3 = filters
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
x = TimeDistributed(Conv2D(nb_filter1, (1, 1), strides=strides, kernel_initializer='normal'), name=conv_name_base + '2a')(input_tensor)
x = TimeDistributed(BatchNormalization(), name=bn_name_base + '2a')(x)
x = Activation('relu')(x)
x = TimeDistributed(Conv2D(nb_filter2, (kernel_size, kernel_size), padding='same', kernel_initializer='normal'), name=conv_name_base + '2b')(x)
x = TimeDistributed(BatchNormalization(), name=bn_name_base + '2b')(x)
x = Activation('relu')(x)
x = TimeDistributed(Conv2D(nb_filter3, (1, 1), kernel_initializer='normal'), name=conv_name_base + '2c')(x)
x = TimeDistributed(BatchNormalization(), name=bn_name_base + '2c')(x)
shortcut = TimeDistributed(Conv2D(nb_filter3, (1, 1), strides=strides, kernel_initializer='normal'), name=conv_name_base + '1')(input_tensor)
shortcut = TimeDistributed(BatchNormalization(), name=bn_name_base + '1')(shortcut)
x = Add()([x, shortcut])
x = Activation('relu')(x)
return x
def classifier_layers(x):
# num_rois, 14, 14, 1024 -> num_rois, 7, 7, 2048
x = conv_block_td(x, 3, [512, 512, 2048], stage=5, block='a', strides=(2, 2))
# num_rois, 7, 7, 2048 -> num_rois, 7, 7, 2048
x = identity_block_td(x, 3, [512, 512, 2048], stage=5, block='b')
# num_rois, 7, 7, 2048 -> num_rois, 7, 7, 2048
x = identity_block_td(x, 3, [512, 512, 2048], stage=5, block='c')
# num_rois, 7, 7, 2048 -> num_rois, 1, 1, 2048
x = TimeDistributed(AveragePooling2D((7, 7)), name='avg_pool')(x)
return x
| [((109, 8, 109, 37), 'keras.layers.add', 'layers.add', ({(109, 19, 109, 36): '[x, input_tensor]'}, {}), '([x, input_tensor])', False, 'from keras import initializers, layers, regularizers\n'), ((139, 8, 139, 33), 'keras.layers.add', 'layers.add', ({(139, 19, 139, 32): '[x, shortcut]'}, {}), '([x, shortcut])', False, 'from keras import initializers, layers, regularizers\n'), ((19, 25, 19, 52), 'keras.initializers.get', 'initializers.get', ({(19, 42, 19, 51): 'beta_init'}, {}), '(beta_init)', False, 'from keras import initializers, layers, regularizers\n'), ((20, 26, 20, 54), 'keras.initializers.get', 'initializers.get', ({(20, 43, 20, 53): 'gamma_init'}, {}), '(gamma_init)', False, 'from keras import initializers, layers, regularizers\n'), ((23, 33, 23, 68), 'keras.regularizers.get', 'regularizers.get', ({(23, 50, 23, 67): 'gamma_regularizer'}, {}), '(gamma_regularizer)', False, 'from keras import initializers, layers, regularizers\n'), ((24, 32, 24, 66), 'keras.regularizers.get', 'regularizers.get', ({(24, 49, 24, 65): 'beta_regularizer'}, {}), '(beta_regularizer)', False, 'from keras import initializers, layers, regularizers\n'), ((58, 22, 58, 36), 'keras.backend.int_shape', 'K.int_shape', ({(58, 34, 58, 35): 'x'}, {}), '(x)', True, 'from keras import backend as K\n'), ((100, 8, 100, 26), 'keras.layers.Activation', 'Activation', ({(100, 19, 100, 25): '"""relu"""'}, {}), "('relu')", False, 'from keras.layers import Activation, Add, AveragePooling2D, Conv2D, MaxPooling2D, TimeDistributed, ZeroPadding2D\n'), ((104, 8, 104, 26), 'keras.layers.Activation', 'Activation', ({(104, 19, 104, 25): '"""relu"""'}, {}), "('relu')", False, 'from keras.layers import Activation, Add, AveragePooling2D, Conv2D, MaxPooling2D, TimeDistributed, ZeroPadding2D\n'), ((110, 8, 110, 26), 'keras.layers.Activation', 'Activation', ({(110, 19, 110, 25): '"""relu"""'}, {}), "('relu')", False, 'from keras.layers import Activation, Add, AveragePooling2D, Conv2D, MaxPooling2D, TimeDistributed, ZeroPadding2D\n'), ((124, 8, 124, 26), 'keras.layers.Activation', 'Activation', ({(124, 19, 124, 25): '"""relu"""'}, {}), "('relu')", False, 'from keras.layers import Activation, Add, AveragePooling2D, Conv2D, MaxPooling2D, TimeDistributed, ZeroPadding2D\n'), ((129, 8, 129, 26), 'keras.layers.Activation', 'Activation', ({(129, 19, 129, 25): '"""relu"""'}, {}), "('relu')", False, 'from keras.layers import Activation, Add, AveragePooling2D, Conv2D, MaxPooling2D, TimeDistributed, ZeroPadding2D\n'), ((140, 8, 140, 26), 'keras.layers.Activation', 'Activation', ({(140, 19, 140, 25): '"""relu"""'}, {}), "('relu')", False, 'from keras.layers import Activation, Add, AveragePooling2D, Conv2D, MaxPooling2D, TimeDistributed, ZeroPadding2D\n'), ((150, 8, 150, 29), 'keras.layers.ZeroPadding2D', 'ZeroPadding2D', ({(150, 22, 150, 28): '(3, 3)'}, {}), '((3, 3))', False, 'from keras.layers import Activation, Add, AveragePooling2D, Conv2D, MaxPooling2D, TimeDistributed, ZeroPadding2D\n'), ((151, 8, 151, 56), 'keras.layers.Conv2D', 'Conv2D', (), '', False, 'from keras.layers import Activation, Add, AveragePooling2D, Conv2D, MaxPooling2D, TimeDistributed, ZeroPadding2D\n'), ((153, 8, 153, 26), 'keras.layers.Activation', 'Activation', ({(153, 19, 153, 25): '"""relu"""'}, {}), "('relu')", False, 'from keras.layers import Activation, Add, AveragePooling2D, Conv2D, MaxPooling2D, TimeDistributed, ZeroPadding2D\n'), ((156, 8, 156, 60), 'keras.layers.MaxPooling2D', 'MaxPooling2D', (), '', False, 'from keras.layers import Activation, Add, AveragePooling2D, Conv2D, MaxPooling2D, TimeDistributed, ZeroPadding2D\n'), ((187, 8, 187, 26), 'keras.layers.Activation', 'Activation', ({(187, 19, 187, 25): '"""relu"""'}, {}), "('relu')", False, 'from keras.layers import Activation, Add, AveragePooling2D, Conv2D, MaxPooling2D, TimeDistributed, ZeroPadding2D\n'), ((191, 8, 191, 26), 'keras.layers.Activation', 'Activation', ({(191, 19, 191, 25): '"""relu"""'}, {}), "('relu')", False, 'from keras.layers import Activation, Add, AveragePooling2D, Conv2D, MaxPooling2D, TimeDistributed, ZeroPadding2D\n'), ((196, 8, 196, 13), 'keras.layers.Add', 'Add', ({}, {}), '()', False, 'from keras.layers import Activation, Add, AveragePooling2D, Conv2D, MaxPooling2D, TimeDistributed, ZeroPadding2D\n'), ((197, 8, 197, 26), 'keras.layers.Activation', 'Activation', ({(197, 19, 197, 25): '"""relu"""'}, {}), "('relu')", False, 'from keras.layers import Activation, Add, AveragePooling2D, Conv2D, MaxPooling2D, TimeDistributed, ZeroPadding2D\n'), ((208, 8, 208, 26), 'keras.layers.Activation', 'Activation', ({(208, 19, 208, 25): '"""relu"""'}, {}), "('relu')", False, 'from keras.layers import Activation, Add, AveragePooling2D, Conv2D, MaxPooling2D, TimeDistributed, ZeroPadding2D\n'), ((212, 8, 212, 26), 'keras.layers.Activation', 'Activation', ({(212, 19, 212, 25): '"""relu"""'}, {}), "('relu')", False, 'from keras.layers import Activation, Add, AveragePooling2D, Conv2D, MaxPooling2D, TimeDistributed, ZeroPadding2D\n'), ((220, 8, 220, 13), 'keras.layers.Add', 'Add', ({}, {}), '()', False, 'from keras.layers import Activation, Add, AveragePooling2D, Conv2D, MaxPooling2D, TimeDistributed, ZeroPadding2D\n'), ((221, 8, 221, 26), 'keras.layers.Activation', 'Activation', ({(221, 19, 221, 25): '"""relu"""'}, {}), "('relu')", False, 'from keras.layers import Activation, Add, AveragePooling2D, Conv2D, MaxPooling2D, TimeDistributed, ZeroPadding2D\n'), ((29, 27, 29, 55), 'keras.engine.InputSpec', 'InputSpec', (), '', False, 'from keras.engine import InputSpec, Layer\n'), ((66, 23, 69, 37), 'keras.backend.batch_normalization', 'K.batch_normalization', (), '', True, 'from keras import backend as K\n'), ((71, 37, 71, 82), 'keras.backend.reshape', 'K.reshape', ({(71, 47, 71, 64): 'self.running_mean', (71, 66, 71, 81): 'broadcast_shape'}, {}), '(self.running_mean, broadcast_shape)', True, 'from keras import backend as K\n'), ((72, 36, 72, 80), 'keras.backend.reshape', 'K.reshape', ({(72, 46, 72, 62): 'self.running_std', (72, 64, 72, 79): 'broadcast_shape'}, {}), '(self.running_std, broadcast_shape)', True, 'from keras import backend as K\n'), ((73, 29, 73, 66), 'keras.backend.reshape', 'K.reshape', ({(73, 39, 73, 48): 'self.beta', (73, 50, 73, 65): 'broadcast_shape'}, {}), '(self.beta, broadcast_shape)', True, 'from keras import backend as K\n'), ((74, 30, 74, 68), 'keras.backend.reshape', 'K.reshape', ({(74, 40, 74, 50): 'self.gamma', (74, 52, 74, 67): 'broadcast_shape'}, {}), '(self.gamma, broadcast_shape)', True, 'from keras import backend as K\n'), ((75, 23, 78, 37), 'keras.backend.batch_normalization', 'K.batch_normalization', (), '', True, 'from keras import backend as K\n'), ((185, 24, 185, 79), 'keras.layers.Conv2D', 'Conv2D', (), '', False, 'from keras.layers import Activation, Add, AveragePooling2D, Conv2D, MaxPooling2D, TimeDistributed, ZeroPadding2D\n'), ((189, 24, 189, 114), 'keras.layers.Conv2D', 'Conv2D', (), '', False, 'from keras.layers import Activation, Add, AveragePooling2D, Conv2D, MaxPooling2D, TimeDistributed, ZeroPadding2D\n'), ((193, 24, 193, 79), 'keras.layers.Conv2D', 'Conv2D', (), '', False, 'from keras.layers import Activation, Add, AveragePooling2D, Conv2D, MaxPooling2D, TimeDistributed, ZeroPadding2D\n'), ((206, 24, 206, 96), 'keras.layers.Conv2D', 'Conv2D', (), '', False, 'from keras.layers import Activation, Add, AveragePooling2D, Conv2D, MaxPooling2D, TimeDistributed, ZeroPadding2D\n'), ((210, 24, 210, 115), 'keras.layers.Conv2D', 'Conv2D', (), '', False, 'from keras.layers import Activation, Add, AveragePooling2D, Conv2D, MaxPooling2D, TimeDistributed, ZeroPadding2D\n'), ((214, 24, 214, 79), 'keras.layers.Conv2D', 'Conv2D', (), '', False, 'from keras.layers import Activation, Add, AveragePooling2D, Conv2D, MaxPooling2D, TimeDistributed, ZeroPadding2D\n'), ((217, 31, 217, 103), 'keras.layers.Conv2D', 'Conv2D', (), '', False, 'from keras.layers import Activation, Add, AveragePooling2D, Conv2D, MaxPooling2D, TimeDistributed, ZeroPadding2D\n'), ((232, 24, 232, 48), 'keras.layers.AveragePooling2D', 'AveragePooling2D', ({(232, 41, 232, 47): '(7, 7)'}, {}), '((7, 7))', False, 'from keras.layers import Activation, Add, AveragePooling2D, Conv2D, MaxPooling2D, TimeDistributed, ZeroPadding2D\n'), ((98, 52, 98, 78), 'keras.initializers.random_normal', 'random_normal', (), '', False, 'from keras.initializers import random_normal\n'), ((102, 73, 102, 99), 'keras.initializers.random_normal', 'random_normal', (), '', False, 'from keras.initializers import random_normal\n'), ((106, 52, 106, 78), 'keras.initializers.random_normal', 'random_normal', (), '', False, 'from keras.initializers import random_normal\n'), ((121, 69, 121, 95), 'keras.initializers.random_normal', 'random_normal', (), '', False, 'from keras.initializers import random_normal\n'), ((126, 73, 126, 99), 'keras.initializers.random_normal', 'random_normal', (), '', False, 'from keras.initializers import random_normal\n'), ((131, 52, 131, 78), 'keras.initializers.random_normal', 'random_normal', (), '', False, 'from keras.initializers import random_normal\n'), ((135, 76, 135, 102), 'keras.initializers.random_normal', 'random_normal', (), '', False, 'from keras.initializers import random_normal\n'), ((65, 43, 65, 52), 'keras.backend.ndim', 'K.ndim', ({(65, 50, 65, 51): 'x'}, {}), '(x)', True, 'from keras import backend as K\n')] |
congdh/fastapi-realworld | app/api/deps.py | 42c8630aedf594b69bc96a327b04dfe636a785fe | from typing import Generator
from fastapi import Depends, HTTPException
from fastapi.security import APIKeyHeader
from sqlalchemy.orm import Session
from starlette import status
from app import crud, models
from app.core import security
from app.db.session import SessionLocal
JWT_TOKEN_PREFIX = "Token" # noqa: S105
def get_db() -> Generator:
db = SessionLocal()
try:
yield db
finally:
db.close()
def authrization_heder_token(
api_key: str = Depends(APIKeyHeader(name="Authorization")),
) -> str:
try:
token_prefix, token = api_key.split(" ")
except ValueError:
raise HTTPException(
status_code=status.HTTP_403_FORBIDDEN,
detail="unsupported authorization type",
)
if token_prefix != JWT_TOKEN_PREFIX:
raise HTTPException(
status_code=status.HTTP_403_FORBIDDEN,
detail="unsupported authorization type",
)
return token
async def get_current_user(
token: str = Depends(authrization_heder_token), db: Session = Depends(get_db)
) -> models.User:
user_id = security.get_user_id_from_token(token=token)
user = crud.user.get_user_by_id(db, int(user_id))
if not user:
raise HTTPException(status_code=404, detail="User not found")
return user
| [((16, 9, 16, 23), 'app.db.session.SessionLocal', 'SessionLocal', ({}, {}), '()', False, 'from app.db.session import SessionLocal\n'), ((42, 17, 42, 50), 'fastapi.Depends', 'Depends', ({(42, 25, 42, 49): 'authrization_heder_token'}, {}), '(authrization_heder_token)', False, 'from fastapi import Depends, HTTPException\n'), ((42, 66, 42, 81), 'fastapi.Depends', 'Depends', ({(42, 74, 42, 80): 'get_db'}, {}), '(get_db)', False, 'from fastapi import Depends, HTTPException\n'), ((44, 14, 44, 58), 'app.core.security.get_user_id_from_token', 'security.get_user_id_from_token', (), '', False, 'from app.core import security\n'), ((24, 27, 24, 61), 'fastapi.security.APIKeyHeader', 'APIKeyHeader', (), '', False, 'from fastapi.security import APIKeyHeader\n'), ((34, 14, 37, 9), 'fastapi.HTTPException', 'HTTPException', (), '', False, 'from fastapi import Depends, HTTPException\n'), ((47, 14, 47, 69), 'fastapi.HTTPException', 'HTTPException', (), '', False, 'from fastapi import Depends, HTTPException\n'), ((29, 14, 32, 9), 'fastapi.HTTPException', 'HTTPException', (), '', False, 'from fastapi import Depends, HTTPException\n')] |
netcriptus/raiden-services | src/raiden_libs/contract_info.py | 3955d91852c616f6ba0a3a979757edbd852b2c6d | import sys
from typing import Dict, List, Tuple
import structlog
from eth_utils import to_canonical_address
from raiden.utils.typing import Address, BlockNumber, ChainID, Optional
from raiden_contracts.contract_manager import (
ContractDevEnvironment,
ContractManager,
contracts_precompiled_path,
get_contracts_deployment_info,
)
log = structlog.get_logger(__name__)
CONTRACT_MANAGER = ContractManager(contracts_precompiled_path())
def get_contract_addresses_and_start_block(
chain_id: ChainID,
contracts: List[str],
address_overwrites: Dict[str, Address],
development_environment: ContractDevEnvironment = ContractDevEnvironment.DEMO,
contracts_version: Optional[str] = None,
) -> Tuple[Dict[str, Address], BlockNumber]:
"""Returns contract addresses and start query block for a given chain and contracts version.
The default contracts can be overwritten by the additional parameters.
Args:
chain_id: The chain id to look for deployed contracts.
contracts: The list of contracts which should be considered
address_overwrites: Dict of addresses which should be used instead of
the ones in the requested deployment.
contracts_version: The version of the contracts to use.
Returns: A dictionary with the contract addresses and start block for the given information
"""
contract_data = get_contracts_deployment_info(
chain_id=chain_id,
version=contracts_version,
development_environment=development_environment,
)
if not contract_data:
log.error(
"No deployed contracts were found at the default registry",
contracts_version=contracts_version,
)
sys.exit(1)
# Get deployed addresses for those contracts which have no overwrites
addresses = {
c: (
address_overwrites.get(c)
or to_canonical_address(contract_data["contracts"][c]["address"])
)
for c in contracts
}
# Set start block to zero if any contract addresses are overwritten
if any(address_overwrites.values()):
start_block = BlockNumber(0)
else:
start_block = BlockNumber(
max(0, min(contract_data["contracts"][c]["block_number"] for c in contracts))
)
return addresses, start_block
| [((15, 6, 15, 36), 'structlog.get_logger', 'structlog.get_logger', ({(15, 27, 15, 35): '__name__'}, {}), '(__name__)', False, 'import structlog\n'), ((16, 35, 16, 63), 'raiden_contracts.contract_manager.contracts_precompiled_path', 'contracts_precompiled_path', ({}, {}), '()', False, 'from raiden_contracts.contract_manager import ContractDevEnvironment, ContractManager, contracts_precompiled_path, get_contracts_deployment_info\n'), ((39, 20, 43, 5), 'raiden_contracts.contract_manager.get_contracts_deployment_info', 'get_contracts_deployment_info', (), '', False, 'from raiden_contracts.contract_manager import ContractDevEnvironment, ContractManager, contracts_precompiled_path, get_contracts_deployment_info\n'), ((49, 8, 49, 19), 'sys.exit', 'sys.exit', ({(49, 17, 49, 18): '(1)'}, {}), '(1)', False, 'import sys\n'), ((62, 22, 62, 36), 'raiden.utils.typing.BlockNumber', 'BlockNumber', ({(62, 34, 62, 35): '0'}, {}), '(0)', False, 'from raiden.utils.typing import Address, BlockNumber, ChainID, Optional\n'), ((55, 15, 55, 77), 'eth_utils.to_canonical_address', 'to_canonical_address', ({(55, 36, 55, 76): "contract_data['contracts'][c]['address']"}, {}), "(contract_data['contracts'][c]['address'])", False, 'from eth_utils import to_canonical_address\n')] |
letyrodridc/meta-dataset | meta_dataset/models/functional_classifiers.py | d868ea1c767cce46fa6723f6f77c29552754fcc9 | # coding=utf-8
# Copyright 2022 The Meta-Dataset Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python2,python3
"""Classifier-related code."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gin.tf
from meta_dataset.models import functional_backbones
import tensorflow.compat.v1 as tf
def linear_classifier_forward_pass(embeddings, w_fc, b_fc, cosine_classifier,
cosine_logits_multiplier, use_weight_norm):
"""Passes embeddings through the linear layer defined by w_fc and b_fc.
Args:
embeddings: A Tensor of size [batch size, embedding dim].
w_fc: A Tensor of size [embedding dim, num outputs].
b_fc: Either None, or a Tensor of size [num outputs] or []. If
cosine_classifier is False, it can not be None.
cosine_classifier: A bool. If true, a cosine classifier is used which does
not require the bias b_fc.
cosine_logits_multiplier: A float. Only used if cosine_classifier is True,
and multiplies the resulting logits.
use_weight_norm: A bool. Whether weight norm was used. If so, then if using
cosine classifier, normalize only the embeddings but not the weights.
Returns:
logits: A Tensor of size [batch size, num outputs].
"""
if cosine_classifier:
# Each column of the weight matrix may be interpreted as a class
# representation (of the same dimenionality as the embedding space). The
# logit for an embedding vector belonging to that class is the cosine
# similarity between that embedding and that class representation.
embeddings = tf.nn.l2_normalize(embeddings, axis=1, epsilon=1e-3)
if not use_weight_norm:
# Only normalize the weights if weight norm was not used.
w_fc = tf.nn.l2_normalize(w_fc, axis=0, epsilon=1e-3)
logits = tf.matmul(embeddings, w_fc)
# Scale the logits as passing numbers in [-1, 1] to softmax is not very
# expressive.
logits *= cosine_logits_multiplier
else:
assert b_fc is not None
logits = tf.matmul(embeddings, w_fc) + b_fc
return logits
@gin.configurable
def linear_classifier(embeddings, num_classes, cosine_classifier,
cosine_logits_multiplier, use_weight_norm, weight_decay):
"""Forward pass through a linear classifier, or possibly a cosine classifier.
Args:
embeddings: A Tensor of size [batch size, embedding dim].
num_classes: An integer; the dimension of the classification.
cosine_classifier: A bool. If true, a cosine classifier is used, which does
not require a bias.
cosine_logits_multiplier: A float. Only used if cosine_classifier is True,
and multiplies the resulting logits.
use_weight_norm: A bool. Whether weight norm was used. If so, then if using
cosine classifier, normalize only the embeddings but not the weights.
weight_decay: A float; the scalar multiple on the L2 regularization of the
weight matrix.
Returns:
logits: A Tensor of size [batch size, num outputs].
"""
embedding_dims = embeddings.get_shape().as_list()[-1]
if use_weight_norm:
# A variable to keep track of whether the initialization has already
# happened.
data_dependent_init_done = tf.get_variable(
'data_dependent_init_done',
initializer=0,
dtype=tf.int32,
trainable=False)
w_fc = tf.get_variable(
'w_fc', [embedding_dims, num_classes],
initializer=tf.random_normal_initializer(0, 0.05),
trainable=True)
# This init is temporary as it needs to be done in a data-dependent way.
# It will be overwritten during the first forward pass through this layer.
g = tf.get_variable(
'g',
dtype=tf.float32,
initializer=tf.ones([num_classes]),
trainable=True)
b_fc = None
if not cosine_classifier:
# Also initialize a bias.
b_fc = tf.get_variable(
'b_fc', initializer=tf.zeros([num_classes]), trainable=True)
def _do_data_dependent_init():
"""Returns ops for the data-dependent init of g and maybe b_fc."""
w_fc_normalized = tf.nn.l2_normalize(w_fc.read_value(), [0])
output_init = tf.matmul(embeddings, w_fc_normalized)
mean_init, var_init = tf.nn.moments(output_init, [0])
# Data-dependent init values.
g_init_value = 1. / tf.sqrt(var_init + 1e-10)
ops = [tf.assign(g, g_init_value)]
if not cosine_classifier:
# Also initialize a bias in a data-dependent way.
b_fc_init_value = -mean_init * g_init_value
ops.append(tf.assign(b_fc, b_fc_init_value))
# Mark that the data-dependent initialization is done to prevent it from
# happening again in the future.
ops.append(tf.assign(data_dependent_init_done, 1))
return tf.group(*ops)
# Possibly perform data-dependent init (if it hasn't been done already).
init_op = tf.cond(
tf.equal(data_dependent_init_done, 0), _do_data_dependent_init,
tf.no_op)
with tf.control_dependencies([init_op]):
# Apply weight normalization.
w_fc *= g / tf.sqrt(tf.reduce_sum(tf.square(w_fc), [0]))
# Forward pass through the layer defined by w_fc and b_fc.
logits = linear_classifier_forward_pass(embeddings, w_fc, b_fc,
cosine_classifier,
cosine_logits_multiplier, True)
else:
# No weight norm.
w_fc = functional_backbones.weight_variable([embedding_dims, num_classes],
weight_decay=weight_decay)
b_fc = None
if not cosine_classifier:
# Also initialize a bias.
b_fc = functional_backbones.bias_variable([num_classes])
# Forward pass through the layer defined by w_fc and b_fc.
logits = linear_classifier_forward_pass(embeddings, w_fc, b_fc,
cosine_classifier,
cosine_logits_multiplier, False)
return logits
@gin.configurable
def separate_head_linear_classifier(embeddings, num_classes, dataset_idx,
start_idx, cosine_classifier,
cosine_logits_multiplier, learnable_scale,
weight_decay):
"""A linear classifier with num_sets heads, for different datasets.
Args:
embeddings: A Tensor of size [batch size, embedding dim].
num_classes: A list of integers; the dimension of the classifier layers of
the different heads.
dataset_idx: An int Tensor. The index of the dataset head to use.
start_idx: An int Tensor. The index of the first class of the given dataset.
cosine_classifier: A bool. If true, a cosine classifier is used, which does
not require a bias.
cosine_logits_multiplier: A float. Only used if cosine_classifier is True,
and multiplies the resulting logits.
learnable_scale: A bool. Whether to make the cosine_logits_multiplier a
learnable parameter. Only applies if cosine_classifier is True.
weight_decay: A float; the scalar multiple on the L2 regularization of the
weight matrix.
Returns:
logits: A Tensor of size [batch size, num outputs].
"""
if not cosine_classifier:
raise NotImplementedError('`separate_head_linear_classifier` currently '
'only supports `cosine_classifier` True.')
if learnable_scale:
cosine_logits_multiplier = tf.get_variable(
'cosine_scale',
initializer=cosine_logits_multiplier,
dtype=tf.float32,
trainable=True)
embedding_dims = embeddings.get_shape().as_list()[-1]
w_fc = functional_backbones.weight_variable(
[embedding_dims, sum(num_classes)], weight_decay=weight_decay)
# Select the output "head" to use in the forward pass.
dataset_num_classes = tf.gather(num_classes, dataset_idx)
w_fc = w_fc[:, start_idx:start_idx + dataset_num_classes]
logits = linear_classifier_forward_pass(embeddings, w_fc, None,
cosine_classifier,
cosine_logits_multiplier, False)
return logits
| [((201, 24, 201, 59), 'tensorflow.compat.v1.gather', 'tf.gather', ({(201, 34, 201, 45): 'num_classes', (201, 47, 201, 58): 'dataset_idx'}, {}), '(num_classes, dataset_idx)', True, 'import tensorflow.compat.v1 as tf\n'), ((52, 17, 52, 69), 'tensorflow.compat.v1.nn.l2_normalize', 'tf.nn.l2_normalize', (), '', True, 'import tensorflow.compat.v1 as tf\n'), ((56, 13, 56, 40), 'tensorflow.compat.v1.matmul', 'tf.matmul', ({(56, 23, 56, 33): 'embeddings', (56, 35, 56, 39): 'w_fc'}, {}), '(embeddings, w_fc)', True, 'import tensorflow.compat.v1 as tf\n'), ((92, 31, 96, 24), 'tensorflow.compat.v1.get_variable', 'tf.get_variable', (), '', True, 'import tensorflow.compat.v1 as tf\n'), ((147, 11, 148, 74), 'meta_dataset.models.functional_backbones.weight_variable', 'functional_backbones.weight_variable', (), '', False, 'from meta_dataset.models import functional_backbones\n'), ((190, 31, 194, 23), 'tensorflow.compat.v1.get_variable', 'tf.get_variable', (), '', True, 'import tensorflow.compat.v1 as tf\n'), ((55, 13, 55, 59), 'tensorflow.compat.v1.nn.l2_normalize', 'tf.nn.l2_normalize', (), '', True, 'import tensorflow.compat.v1 as tf\n'), ((62, 13, 62, 40), 'tensorflow.compat.v1.matmul', 'tf.matmul', ({(62, 23, 62, 33): 'embeddings', (62, 35, 62, 39): 'w_fc'}, {}), '(embeddings, w_fc)', True, 'import tensorflow.compat.v1 as tf\n'), ((118, 20, 118, 58), 'tensorflow.compat.v1.matmul', 'tf.matmul', ({(118, 30, 118, 40): 'embeddings', (118, 42, 118, 57): 'w_fc_normalized'}, {}), '(embeddings, w_fc_normalized)', True, 'import tensorflow.compat.v1 as tf\n'), ((119, 28, 119, 59), 'tensorflow.compat.v1.nn.moments', 'tf.nn.moments', ({(119, 42, 119, 53): 'output_init', (119, 55, 119, 58): '[0]'}, {}), '(output_init, [0])', True, 'import tensorflow.compat.v1 as tf\n'), ((130, 13, 130, 27), 'tensorflow.compat.v1.group', 'tf.group', ({(130, 22, 130, 26): '*ops'}, {}), '(*ops)', True, 'import tensorflow.compat.v1 as tf\n'), ((134, 8, 134, 45), 'tensorflow.compat.v1.equal', 'tf.equal', ({(134, 17, 134, 41): 'data_dependent_init_done', (134, 43, 134, 44): '0'}, {}), '(data_dependent_init_done, 0)', True, 'import tensorflow.compat.v1 as tf\n'), ((137, 9, 137, 43), 'tensorflow.compat.v1.control_dependencies', 'tf.control_dependencies', ({(137, 33, 137, 42): '[init_op]'}, {}), '([init_op])', True, 'import tensorflow.compat.v1 as tf\n'), ((152, 13, 152, 62), 'meta_dataset.models.functional_backbones.bias_variable', 'functional_backbones.bias_variable', ({(152, 48, 152, 61): '[num_classes]'}, {}), '([num_classes])', False, 'from meta_dataset.models import functional_backbones\n'), ((100, 20, 100, 57), 'tensorflow.compat.v1.random_normal_initializer', 'tf.random_normal_initializer', ({(100, 49, 100, 50): '0', (100, 52, 100, 56): '0.05'}, {}), '(0, 0.05)', True, 'import tensorflow.compat.v1 as tf\n'), ((107, 20, 107, 42), 'tensorflow.compat.v1.ones', 'tf.ones', ({(107, 28, 107, 41): '[num_classes]'}, {}), '([num_classes])', True, 'import tensorflow.compat.v1 as tf\n'), ((121, 26, 121, 51), 'tensorflow.compat.v1.sqrt', 'tf.sqrt', ({(121, 34, 121, 50): '(var_init + 1e-10)'}, {}), '(var_init + 1e-10)', True, 'import tensorflow.compat.v1 as tf\n'), ((122, 13, 122, 39), 'tensorflow.compat.v1.assign', 'tf.assign', ({(122, 23, 122, 24): 'g', (122, 26, 122, 38): 'g_init_value'}, {}), '(g, g_init_value)', True, 'import tensorflow.compat.v1 as tf\n'), ((129, 17, 129, 55), 'tensorflow.compat.v1.assign', 'tf.assign', ({(129, 27, 129, 51): 'data_dependent_init_done', (129, 53, 129, 54): '(1)'}, {}), '(data_dependent_init_done, 1)', True, 'import tensorflow.compat.v1 as tf\n'), ((113, 30, 113, 53), 'tensorflow.compat.v1.zeros', 'tf.zeros', ({(113, 39, 113, 52): '[num_classes]'}, {}), '([num_classes])', True, 'import tensorflow.compat.v1 as tf\n'), ((126, 19, 126, 51), 'tensorflow.compat.v1.assign', 'tf.assign', ({(126, 29, 126, 33): 'b_fc', (126, 35, 126, 50): 'b_fc_init_value'}, {}), '(b_fc, b_fc_init_value)', True, 'import tensorflow.compat.v1 as tf\n'), ((139, 40, 139, 55), 'tensorflow.compat.v1.square', 'tf.square', ({(139, 50, 139, 54): 'w_fc'}, {}), '(w_fc)', True, 'import tensorflow.compat.v1 as tf\n')] |
Shrinidhi-C/Context-Based-Question-Answering | app.py | f2e0bbc03003aae65f4cabddecd5cd9fcdbfb333 | import os
import threading
import shutil
from datetime import timedelta, datetime
from flask import Flask, render_template, request, session, jsonify, url_for, redirect
from haystack.document_store.elasticsearch import *
from haystack.preprocessor.utils import convert_files_to_dicts
from haystack.preprocessor.cleaning import clean_wiki_text
from haystack import Finder
from haystack.retriever.sparse import ElasticsearchRetriever
from haystack.reader.transformers import TransformersReader
from elasticsearch import Elasticsearch
es = (
Elasticsearch()
) # Replace with Elasticsearch(["http://elasticsearch:9200/"], verify_certs=True) to build docker image
session_time = 60 # Session Timeout in Minutes
app = Flask(__name__)
app.secret_key = "cbqa_123"
app.permanent_session_lifetime = timedelta(minutes=session_time)
user_id = 0 # User ID to keep track w.r.t sessions and context data
current_users = dict() # Used to store user id with time of login
user_doc_store = dict() # Document store object of the user id
user_settings = dict() # User settings for GPU and Pre-trained models choice
# Handles pre-processing the context and uploads the pre-processed context to Elasticsearch
# Each user is assigned with a separate Elasticsearch index starting with "user_{user_id}"
# Documents & textual context are deleted from them temp folder named with user_id under users dir after uploading to Es
def pre_process(user_id_key):
uploads_dir = "users/" + str(user_id_key) + "/uploads/"
try:
es_result = es.search(
index="user_" + str(user_id_key), body={"query": {"match_all": {}}}
)
no_docs = len(es_result["hits"]["hits"])
except Exception as e:
print(e)
print("\n no documents in es")
processed = convert_files_to_dicts(
dir_path=uploads_dir, clean_func=clean_wiki_text, split_paragraphs=True
)
for doc in range(len(processed)):
try:
# print("\n Checking for duplicate docs ..")
add_doc = True
for each_doc in range(no_docs):
doc_text = es_result["hits"]["hits"][each_doc]["_source"]["text"]
doc_name = es_result["hits"]["hits"][each_doc]["_source"]["name"]
doc_id = es_result["hits"]["hits"][each_doc]["_id"]
if (
processed[doc]["meta"]["name"] == "context_file.txt"
and doc_name == "context_file.txt"
):
# print("Deleting context file to update with new changes ..")
es.delete(
index="user_" + str(user_id_key), doc_type="_doc", id=doc_id
)
if processed[doc]["text"] == doc_text:
# print("\n There is a duplicate, So this document is not added ..")
add_doc = False
os.remove(uploads_dir + str(processed[doc]["meta"]["name"]))
break
if add_doc:
# print("\n No duplicates found, so adding this to es..")
processed_lst = [processed[doc]]
user_doc_store[user_id_key].write_documents(processed_lst)
os.remove(uploads_dir + str(processed[doc]["meta"]["name"]))
except Exception as e:
print(e)
# print("\n no documents in es")
processed_lst = [processed[doc]]
user_doc_store[user_id_key].write_documents(processed_lst)
os.remove(uploads_dir + str(processed[doc]["meta"]["name"]))
# Handles setting up reader and retriever
def set_finder(user_id_key):
if user_settings[user_id_key]["model"] == "roberta":
model_path = (
"deepset/roberta-base-squad2" # Path of the models hosted in Hugging Face
)
elif user_settings[user_id_key]["model"] == "bert":
model_path = "deepset/bert-large-uncased-whole-word-masking-squad2"
elif user_settings[user_id_key]["model"] == "distilbert":
model_path = "distilbert-base-uncased-distilled-squad"
else:
model_path = "illuin/camembert-base-fquad"
retriever = ElasticsearchRetriever(document_store=user_doc_store[user_id_key])
if user_settings[user_id_key]["gpu"] == "on":
try:
reader = TransformersReader(
model_name_or_path=model_path, tokenizer=model_path, use_gpu=0
)
except Exception as e:
print(e)
print("GPU not available. Inferencing on CPU")
reader = TransformersReader(
model_name_or_path=model_path, tokenizer=model_path, use_gpu=-1
)
else:
reader = TransformersReader(
model_name_or_path=model_path, tokenizer=model_path, use_gpu=-1
)
finder = Finder(reader, retriever)
return finder
# Handles deletion of context data completely from the server after the session time ends and deletes user id from dict
def user_session_timer():
global current_users, session_time
seconds_in_day = 24 * 60 * 60
print("\n User tracker thread started @ ", datetime.now())
while True:
for user_id_key in current_users.copy():
current_time = datetime.now()
user_time = current_users[user_id_key]
difference = current_time - user_time
time_diff = divmod(
difference.days * seconds_in_day + difference.seconds, 60
)
if time_diff[0] >= session_time:
try:
del current_users[user_id_key]
del user_doc_store[user_id_key]
del user_settings[user_id_key]
shutil.rmtree("users/" + str(user_id_key))
es.indices.delete(
index="user_" + str(user_id_key), ignore=[400, 404]
)
except OSError as e:
print("Error: %s - %s." % (e.filename, e.strerror))
# print("\n Deleted user:", user_id_key, " @", datetime.now())
session_timer = threading.Thread(target=user_session_timer)
session_timer.start()
# Handles users w.r.t new session or already in session
@app.route("/")
def home():
global user_id, current_users, session_time
logging.info(
"User connected at "
+ str(datetime.now())
+ " with IP: "
+ str(request.environ["REMOTE_ADDR"])
)
if "user" in session and session["user"] in current_users:
user_id = session["user"]
logged_on = current_users[user_id]
current_time = datetime.now()
diff_min_sec = (
int(datetime.strftime(current_time, "%M"))
- int(datetime.strftime(logged_on, "%M"))
) * 60
diff_sec = int(datetime.strftime(current_time, "%S")) - int(
datetime.strftime(logged_on, "%S")
)
diff_time = diff_min_sec + diff_sec
time_left = (
session_time * 60
) - diff_time # For session timeout on client side
return render_template("index.html", time_left=time_left)
else:
session.permanent = True
current_time = datetime.now()
user_id += 1
current_users[user_id] = current_time
session["user"] = user_id
# print(current_users)
if not os.path.exists("users/"): # Creating user temp dir for uploading context
os.makedirs("users/" + str(user_id))
os.makedirs("users/" + str(user_id) + "/uploads")
else:
os.makedirs("users/" + str(user_id))
os.makedirs("users/" + str(user_id) + "/uploads")
user_doc_store[user_id] = ElasticsearchDocumentStore(
host="localhost", index="user_" + str(user_id)
) # Change host = "elasticsearch" to build docker image
user_settings[user_id] = {
"gpu": "off",
"model": "roberta",
} # Initial user settings
logged_on = current_users[user_id]
current_time = datetime.now()
diff_min_sec = (
int(datetime.strftime(current_time, "%M"))
- int(datetime.strftime(logged_on, "%M"))
) * 60
diff_sec = int(datetime.strftime(current_time, "%S")) - int(
datetime.strftime(logged_on, "%S")
)
diff_time = diff_min_sec + diff_sec
time_left = (
session_time * 60
) - diff_time # For session timeout on client side
return render_template("index.html", time_left=time_left)
# Handles context documents uploads
@app.route("/upload_file", methods=["GET", "POST"])
def upload_file():
global current_users
if "user" in session:
user_id_key = session["user"]
if user_id_key in current_users:
for f in request.files.getlist("file"):
f.save(
os.path.join("users/" + str(user_id_key) + "/uploads", f.filename)
)
pre_process(user_id_key)
return render_template("index.html")
else:
return redirect(url_for("session_timeout"))
else:
return redirect(url_for("session_timeout"))
# Handles context added through the textbox
@app.route("/context", methods=["POST"])
def context():
if "user" in session:
user_id_key = session["user"]
if user_id_key in current_users:
text_context = request.form["context"]
context_file = open(
"users/" + str(user_id_key) + "/uploads/context_file.txt", "w"
)
context_file.write(text_context)
context_file.close()
pre_process(user_id_key)
return jsonify({"output": "" + text_context})
else:
return render_template("session_out.html")
else:
return redirect(url_for("session_timeout"))
# Provides extracted answers for the posted question
@app.route("/question", methods=["POST"])
def question():
if "user" in session:
user_id_key = session["user"]
if user_id_key in current_users:
query_question = request.form["question"]
es_stats = es.indices.stats(index="user_" + str(user_id_key))
user_index_size = es_stats["_all"]["primaries"]["store"]["size_in_bytes"]
if (
user_index_size == 208
): # To check if index in Es is empty. 208 bytes is default index size without docs
return jsonify({"error": "add context"})
finder = set_finder(user_id_key)
answers_dict = finder.get_answers(
question=query_question, top_k_retriever=5, top_k_reader=5
)
unique_answers = list()
output = list()
if len(answers_dict["answers"]) > 0:
for i in range(len(answers_dict["answers"])):
if (
answers_dict["answers"][i]["answer"] is not None
and answers_dict["answers"][i]["answer"] not in unique_answers
):
temp_dict = answers_dict["answers"][i]
remove = (
"score",
"probability",
"offset_start",
"offset_end",
"document_id",
)
unique_answers.append(temp_dict["answer"])
if temp_dict["meta"]["name"] == "context_file.txt":
temp_dict["meta"]["name"] = "Textual Context"
temp_dict["meta"] = temp_dict["meta"]["name"]
output.append(temp_dict)
for key in remove:
if key in temp_dict:
del temp_dict[key]
else:
output = [
{"answer": "No Answers found ..", "context": " ", "meta": " "},
]
return jsonify({"output": output})
else:
return render_template("session_out.html")
# Handles GPU setting changes.
@app.route("/gpu", methods=["POST"])
def gpu():
if "user" in session:
user_id_key = session["user"]
if user_id_key in current_users:
if user_settings[user_id_key]["gpu"] == "on":
user_settings[user_id_key]["gpu"] = "off"
else:
user_settings[user_id_key]["gpu"] = "on"
return jsonify({"output": "gpu status changed"})
# Handles pre-trained model choice setting changes.
@app.route("/models", methods=["POST"])
def models():
if "user" in session:
user_id_key = session["user"]
if user_id_key in current_users:
user_settings[user_id_key]["model"] = request.form["model"]
return jsonify({"output": "model changed"})
# Handles session timeout redirection
@app.route("/session_timeout")
def session_timeout():
return render_template("session_out.html")
# Handles removing of session identifier from session dict, This works only when app tab is open until session completes
@app.route("/session_out", methods=["POST"])
def session_out():
session.pop("user", None)
return redirect(url_for("session_timeout"))
# Comment the below block in case of building a docker image or running on WSGI server like gunicorn
if __name__ == "__main__":
app.run(host="0.0.0.0")
| [((15, 4, 15, 19), 'elasticsearch.Elasticsearch', 'Elasticsearch', ({}, {}), '()', False, 'from elasticsearch import Elasticsearch\n'), ((18, 6, 18, 21), 'flask.Flask', 'Flask', ({(18, 12, 18, 20): '__name__'}, {}), '(__name__)', False, 'from flask import Flask, render_template, request, session, jsonify, url_for, redirect\n'), ((20, 33, 20, 64), 'datetime.timedelta', 'timedelta', (), '', False, 'from datetime import timedelta, datetime\n'), ((145, 16, 145, 59), 'threading.Thread', 'threading.Thread', (), '', False, 'import threading\n'), ((41, 16, 43, 5), 'haystack.preprocessor.utils.convert_files_to_dicts', 'convert_files_to_dicts', (), '', False, 'from haystack.preprocessor.utils import convert_files_to_dicts\n'), ((94, 16, 94, 82), 'haystack.retriever.sparse.ElasticsearchRetriever', 'ElasticsearchRetriever', (), '', False, 'from haystack.retriever.sparse import ElasticsearchRetriever\n'), ((113, 13, 113, 38), 'haystack.Finder', 'Finder', ({(113, 20, 113, 26): 'reader', (113, 28, 113, 37): 'retriever'}, {}), '(reader, retriever)', False, 'from haystack import Finder\n'), ((325, 11, 325, 52), 'flask.jsonify', 'jsonify', ({(325, 19, 325, 51): "{'output': 'gpu status changed'}"}, {}), "({'output': 'gpu status changed'})", False, 'from flask import Flask, render_template, request, session, jsonify, url_for, redirect\n'), ((335, 11, 335, 47), 'flask.jsonify', 'jsonify', ({(335, 19, 335, 46): "{'output': 'model changed'}"}, {}), "({'output': 'model changed'})", False, 'from flask import Flask, render_template, request, session, jsonify, url_for, redirect\n'), ((341, 11, 341, 46), 'flask.render_template', 'render_template', ({(341, 27, 341, 45): '"""session_out.html"""'}, {}), "('session_out.html')", False, 'from flask import Flask, render_template, request, session, jsonify, url_for, redirect\n'), ((347, 4, 347, 29), 'flask.session.pop', 'session.pop', ({(347, 16, 347, 22): '"""user"""', (347, 24, 347, 28): 'None'}, {}), "('user', None)", False, 'from flask import Flask, render_template, request, session, jsonify, url_for, redirect\n'), ((109, 17, 111, 9), 'haystack.reader.transformers.TransformersReader', 'TransformersReader', (), '', False, 'from haystack.reader.transformers import TransformersReader\n'), ((122, 47, 122, 61), 'datetime.datetime.now', 'datetime.now', ({}, {}), '()', False, 'from datetime import timedelta, datetime\n'), ((163, 23, 163, 37), 'datetime.datetime.now', 'datetime.now', ({}, {}), '()', False, 'from datetime import timedelta, datetime\n'), ((175, 15, 175, 65), 'flask.render_template', 'render_template', (), '', False, 'from flask import Flask, render_template, request, session, jsonify, url_for, redirect\n'), ((179, 23, 179, 37), 'datetime.datetime.now', 'datetime.now', ({}, {}), '()', False, 'from datetime import timedelta, datetime\n'), ((199, 23, 199, 37), 'datetime.datetime.now', 'datetime.now', ({}, {}), '()', False, 'from datetime import timedelta, datetime\n'), ((212, 15, 212, 65), 'flask.render_template', 'render_template', (), '', False, 'from flask import Flask, render_template, request, session, jsonify, url_for, redirect\n'), ((348, 20, 348, 46), 'flask.url_for', 'url_for', ({(348, 28, 348, 45): '"""session_timeout"""'}, {}), "('session_timeout')", False, 'from flask import Flask, render_template, request, session, jsonify, url_for, redirect\n'), ((98, 21, 100, 13), 'haystack.reader.transformers.TransformersReader', 'TransformersReader', (), '', False, 'from haystack.reader.transformers import TransformersReader\n'), ((125, 27, 125, 41), 'datetime.datetime.now', 'datetime.now', ({}, {}), '()', False, 'from datetime import timedelta, datetime\n'), ((184, 15, 184, 39), 'os.path.exists', 'os.path.exists', ({(184, 30, 184, 38): '"""users/"""'}, {}), "('users/')", False, 'import os\n'), ((223, 21, 223, 50), 'flask.request.files.getlist', 'request.files.getlist', ({(223, 43, 223, 49): '"""file"""'}, {}), "('file')", False, 'from flask import Flask, render_template, request, session, jsonify, url_for, redirect\n'), ((230, 19, 230, 48), 'flask.render_template', 'render_template', ({(230, 35, 230, 47): '"""index.html"""'}, {}), "('index.html')", False, 'from flask import Flask, render_template, request, session, jsonify, url_for, redirect\n'), ((236, 24, 236, 50), 'flask.url_for', 'url_for', ({(236, 32, 236, 49): '"""session_timeout"""'}, {}), "('session_timeout')", False, 'from flask import Flask, render_template, request, session, jsonify, url_for, redirect\n'), ((252, 19, 252, 57), 'flask.jsonify', 'jsonify', ({(252, 27, 252, 56): "{'output': '' + text_context}"}, {}), "({'output': '' + text_context})", False, 'from flask import Flask, render_template, request, session, jsonify, url_for, redirect\n'), ((255, 19, 255, 54), 'flask.render_template', 'render_template', ({(255, 35, 255, 53): '"""session_out.html"""'}, {}), "('session_out.html')", False, 'from flask import Flask, render_template, request, session, jsonify, url_for, redirect\n'), ((258, 24, 258, 50), 'flask.url_for', 'url_for', ({(258, 32, 258, 49): '"""session_timeout"""'}, {}), "('session_timeout')", False, 'from flask import Flask, render_template, request, session, jsonify, url_for, redirect\n'), ((309, 19, 309, 46), 'flask.jsonify', 'jsonify', ({(309, 27, 309, 45): "{'output': output}"}, {}), "({'output': output})", False, 'from flask import Flask, render_template, request, session, jsonify, url_for, redirect\n'), ((312, 19, 312, 54), 'flask.render_template', 'render_template', ({(312, 35, 312, 53): '"""session_out.html"""'}, {}), "('session_out.html')", False, 'from flask import Flask, render_template, request, session, jsonify, url_for, redirect\n'), ((104, 21, 106, 13), 'haystack.reader.transformers.TransformersReader', 'TransformersReader', (), '', False, 'from haystack.reader.transformers import TransformersReader\n'), ((168, 23, 168, 60), 'datetime.datetime.strftime', 'datetime.strftime', ({(168, 41, 168, 53): 'current_time', (168, 55, 168, 59): '"""%S"""'}, {}), "(current_time, '%S')", False, 'from datetime import timedelta, datetime\n'), ((169, 12, 169, 46), 'datetime.datetime.strftime', 'datetime.strftime', ({(169, 30, 169, 39): 'logged_on', (169, 41, 169, 45): '"""%S"""'}, {}), "(logged_on, '%S')", False, 'from datetime import timedelta, datetime\n'), ((204, 23, 204, 60), 'datetime.datetime.strftime', 'datetime.strftime', ({(204, 41, 204, 53): 'current_time', (204, 55, 204, 59): '"""%S"""'}, {}), "(current_time, '%S')", False, 'from datetime import timedelta, datetime\n'), ((205, 12, 205, 46), 'datetime.datetime.strftime', 'datetime.strftime', ({(205, 30, 205, 39): 'logged_on', (205, 41, 205, 45): '"""%S"""'}, {}), "(logged_on, '%S')", False, 'from datetime import timedelta, datetime\n'), ((233, 28, 233, 54), 'flask.url_for', 'url_for', ({(233, 36, 233, 53): '"""session_timeout"""'}, {}), "('session_timeout')", False, 'from flask import Flask, render_template, request, session, jsonify, url_for, redirect\n'), ((273, 23, 273, 56), 'flask.jsonify', 'jsonify', ({(273, 31, 273, 55): "{'error': 'add context'}"}, {}), "({'error': 'add context'})", False, 'from flask import Flask, render_template, request, session, jsonify, url_for, redirect\n'), ((165, 16, 165, 53), 'datetime.datetime.strftime', 'datetime.strftime', ({(165, 34, 165, 46): 'current_time', (165, 48, 165, 52): '"""%M"""'}, {}), "(current_time, '%M')", False, 'from datetime import timedelta, datetime\n'), ((166, 18, 166, 52), 'datetime.datetime.strftime', 'datetime.strftime', ({(166, 36, 166, 45): 'logged_on', (166, 47, 166, 51): '"""%M"""'}, {}), "(logged_on, '%M')", False, 'from datetime import timedelta, datetime\n'), ((201, 16, 201, 53), 'datetime.datetime.strftime', 'datetime.strftime', ({(201, 34, 201, 46): 'current_time', (201, 48, 201, 52): '"""%M"""'}, {}), "(current_time, '%M')", False, 'from datetime import timedelta, datetime\n'), ((202, 18, 202, 52), 'datetime.datetime.strftime', 'datetime.strftime', ({(202, 36, 202, 45): 'logged_on', (202, 47, 202, 51): '"""%M"""'}, {}), "(logged_on, '%M')", False, 'from datetime import timedelta, datetime\n'), ((155, 14, 155, 28), 'datetime.datetime.now', 'datetime.now', ({}, {}), '()', False, 'from datetime import timedelta, datetime\n')] |
timevortexproject/timevortex | timevortex/utils/filestorage.py | 2bc1a50b255524af8582e6624dee280d64d3c9f3 | #!/usr/bin/python3
# -*- coding: utf8 -*-
# -*- Mode: Python; py-indent-offset: 4 -*-
"""File storage adapter for timevortex project"""
import os
from os import listdir, makedirs
from os.path import isfile, join, exists
from time import tzname
from datetime import datetime
import pytz
import dateutil.parser
from django.conf import settings
from django.utils import timezone
from timevortex.utils.globals import LOGGER, KEY_ERROR, KEY_SITE_ID, KEY_VARIABLE_ID, KEY_VALUE, KEY_DATE
from timevortex.utils.globals import KEY_DST_TIMEZONE, KEY_NON_DST_TIMEZONE, SYSTEM_SITE_ID
SETTINGS_FILE_STORAGE_FOLDER = "SETTINGS_FILE_STORAGE_FOLDER"
SETTINGS_DEFAULT_FILE_STORAGE_FOLDER = "/tmp/data/"
def get_lines_number(file_path):
"""Get lines number
"""
return sum(1 for line in open(file_path))
def get_series_per_file(site_folder, file_prefix):
"""Get series per file
"""
series = {}
for filename in listdir(site_folder):
is_file = isfile(join(site_folder, filename))
if is_file and file_prefix in filename:
complete_filename = "%s/%s" % (site_folder, filename)
with open(complete_filename, "r") as filed:
temp_series = filed.readlines()
for line in temp_series:
array_line = line.split("\t")
if len(array_line) >= 2:
series[array_line[1]] = array_line[0]
return series
def get_last_file_name(site_folder, file_prefix):
"""Get last filename
"""
old_date = None
last_filename = ""
for new_filename in listdir(site_folder):
is_file = isfile(join(site_folder, new_filename))
if is_file and file_prefix in new_filename:
old_date, last_filename = update_last_file_name(file_prefix, old_date, last_filename, new_filename)
return last_filename
def update_last_file_name(file_prefix, old_date, last_filename, new_filename):
"""Update last file name
"""
try:
new_date = new_filename.replace(file_prefix, "")
new_date = datetime.strptime(new_date, "%Y-%m-%d")
if old_date is None or new_date > old_date:
return new_date, new_filename
except ValueError:
LOGGER.error("Not right file")
return old_date, last_filename
class FileStorage(object):
"""Class that help us to store and load data over several file"""
def __init__(self, folder_path):
"""Constructor"""
self.folder_path = folder_path
if not exists(self.folder_path):
makedirs(self.folder_path)
def insert_series(self, series):
"""Insert series in DB
:param series: Representation of a series
:type series: dict.
"""
self.insert(series)
def insert(self, message):
"""Insert data in file"""
file_folder = "%s/%s" % (self.folder_path, message[KEY_SITE_ID])
file_date = timezone.localtime(
dateutil.parser.parse(message[KEY_DATE]).replace(tzinfo=pytz.UTC)).strftime("%Y-%m-%d")
if not exists(file_folder):
makedirs(file_folder)
raw_file = "%s/%s.tsv.%s" % (
file_folder, message[KEY_VARIABLE_ID], file_date)
extracted = open(raw_file, "a+")
extracted.write("%s\t%s\t%s\t%s\n" % (
message[KEY_VALUE],
message[KEY_DATE],
message[KEY_DST_TIMEZONE],
message[KEY_NON_DST_TIMEZONE]))
extracted.close()
def insert_error(self, message):
"""Function that store error in errors collection and in log
:param message: Error to insert in DB
:type message: str.
"""
LOGGER.error(message)
message[KEY_VARIABLE_ID] = KEY_ERROR
self.insert(message)
def store_error(self, error):
"""Function that create valid error message
:param error: Mal formed message
:type error: str.
"""
message = {
KEY_VALUE: error,
KEY_VARIABLE_ID: KEY_ERROR,
KEY_SITE_ID: SYSTEM_SITE_ID,
KEY_DATE: datetime.utcnow().isoformat('T'),
KEY_DST_TIMEZONE: tzname[1],
KEY_NON_DST_TIMEZONE: tzname[0]
}
LOGGER.error(error)
self.insert(message)
def get_series(self, site_id, variable_id):
"""Retrieve all series for a variable_id in site_id
"""
element = variable_id
file_prefix = "%s.tsv." % element
site_folder = "%s/%s" % (self.folder_path, site_id)
if exists(site_folder):
series = get_series_per_file(site_folder, file_prefix)
else:
series = {}
return series
def get_last_series(self, site_id, variable_id):
"""Retrieve last value of variable_id in site_id
"""
element = variable_id
file_prefix = "%s.tsv." % element
site_folder = "%s/%s" % (self.folder_path, site_id)
if exists(site_folder):
last_filename = get_last_file_name(site_folder, file_prefix)
last_filename = "%s/%s" % (site_folder, last_filename)
try:
with open(last_filename, "rb") as filed2:
for last in filed2:
pass
except IsADirectoryError:
return None
LOGGER.debug(last) # pylint: disable=I0011,W0631
last = last.decode("utf-8").replace("\n", "") # pylint: disable=I0011,W0631
return {
KEY_VARIABLE_ID: element,
KEY_SITE_ID: site_id,
KEY_VALUE: last.split("\t")[0],
KEY_DATE: last.split("\t")[1],
KEY_DST_TIMEZONE: last.split("\t")[2],
KEY_NON_DST_TIMEZONE: last.split("\t")[3]
}
return None
def get_last_error(self, site_id):
"""Retrieve last error of a site_id file storage
"""
return self.get_last_series(site_id, KEY_ERROR)
def get_number_of_error(self, site_id, day_date):
"""This method retrieve number of error published for a day_date
"""
element = KEY_ERROR
site_folder = "%s/%s" % (self.folder_path, site_id)
filename = "%s.tsv.%s" % (element, day_date)
file_path = "%s/%s" % (site_folder, filename)
if exists(site_folder) and exists(file_path):
return get_lines_number(file_path)
return 0
def get_number_of_series(self, site_id, day_date):
"""This method retrieve number of series published for a day_date
"""
site_folder = "%s/%s" % (self.folder_path, site_id)
series = []
if exists(site_folder):
for filename in listdir(site_folder):
if "%s.tsv" % KEY_ERROR not in filename and day_date in filename:
file_path = "%s/%s" % (site_folder, filename)
var_id = filename.replace(".tsv.%s" % day_date, "")
series_numbers = get_lines_number(file_path)
series.append([var_id, series_numbers])
return series
def set_data_location(self, folder_path):
"""Set data folder space"""
self.folder_path = folder_path
def get_sites_list(self):
"""Get sites list"""
return os.listdir(self.folder_path)
FILE_STORAGE_SPACE = FileStorage(getattr(settings, SETTINGS_FILE_STORAGE_FOLDER, SETTINGS_DEFAULT_FILE_STORAGE_FOLDER))
| [((33, 20, 33, 40), 'os.listdir', 'listdir', ({(33, 28, 33, 39): 'site_folder'}, {}), '(site_folder)', False, 'from os import listdir, makedirs\n'), ((51, 24, 51, 44), 'os.listdir', 'listdir', ({(51, 32, 51, 43): 'site_folder'}, {}), '(site_folder)', False, 'from os import listdir, makedirs\n'), ((63, 19, 63, 58), 'datetime.datetime.strptime', 'datetime.strptime', ({(63, 37, 63, 45): 'new_date', (63, 47, 63, 57): '"""%Y-%m-%d"""'}, {}), "(new_date, '%Y-%m-%d')", False, 'from datetime import datetime\n'), ((113, 8, 113, 29), 'timevortex.utils.globals.LOGGER.error', 'LOGGER.error', ({(113, 21, 113, 28): 'message'}, {}), '(message)', False, 'from timevortex.utils.globals import LOGGER, KEY_ERROR, KEY_SITE_ID, KEY_VARIABLE_ID, KEY_VALUE, KEY_DATE\n'), ((131, 8, 131, 27), 'timevortex.utils.globals.LOGGER.error', 'LOGGER.error', ({(131, 21, 131, 26): 'error'}, {}), '(error)', False, 'from timevortex.utils.globals import LOGGER, KEY_ERROR, KEY_SITE_ID, KEY_VARIABLE_ID, KEY_VALUE, KEY_DATE\n'), ((140, 11, 140, 30), 'os.path.exists', 'exists', ({(140, 18, 140, 29): 'site_folder'}, {}), '(site_folder)', False, 'from os.path import isfile, join, exists\n'), ((152, 11, 152, 30), 'os.path.exists', 'exists', ({(152, 18, 152, 29): 'site_folder'}, {}), '(site_folder)', False, 'from os.path import isfile, join, exists\n'), ((198, 11, 198, 30), 'os.path.exists', 'exists', ({(198, 18, 198, 29): 'site_folder'}, {}), '(site_folder)', False, 'from os.path import isfile, join, exists\n'), ((214, 15, 214, 43), 'os.listdir', 'os.listdir', ({(214, 26, 214, 42): 'self.folder_path'}, {}), '(self.folder_path)', False, 'import os\n'), ((34, 25, 34, 52), 'os.path.join', 'join', ({(34, 30, 34, 41): 'site_folder', (34, 43, 34, 51): 'filename'}, {}), '(site_folder, filename)', False, 'from os.path import isfile, join, exists\n'), ((52, 25, 52, 56), 'os.path.join', 'join', ({(52, 30, 52, 41): 'site_folder', (52, 43, 52, 55): 'new_filename'}, {}), '(site_folder, new_filename)', False, 'from os.path import isfile, join, exists\n'), ((67, 8, 67, 38), 'timevortex.utils.globals.LOGGER.error', 'LOGGER.error', ({(67, 21, 67, 37): '"""Not right file"""'}, {}), "('Not right file')", False, 'from timevortex.utils.globals import LOGGER, KEY_ERROR, KEY_SITE_ID, KEY_VARIABLE_ID, KEY_VALUE, KEY_DATE\n'), ((77, 15, 77, 39), 'os.path.exists', 'exists', ({(77, 22, 77, 38): 'self.folder_path'}, {}), '(self.folder_path)', False, 'from os.path import isfile, join, exists\n'), ((78, 12, 78, 38), 'os.makedirs', 'makedirs', ({(78, 21, 78, 37): 'self.folder_path'}, {}), '(self.folder_path)', False, 'from os import listdir, makedirs\n'), ((94, 15, 94, 34), 'os.path.exists', 'exists', ({(94, 22, 94, 33): 'file_folder'}, {}), '(file_folder)', False, 'from os.path import isfile, join, exists\n'), ((95, 12, 95, 33), 'os.makedirs', 'makedirs', ({(95, 21, 95, 32): 'file_folder'}, {}), '(file_folder)', False, 'from os import listdir, makedirs\n'), ((162, 12, 162, 30), 'timevortex.utils.globals.LOGGER.debug', 'LOGGER.debug', ({(162, 25, 162, 29): 'last'}, {}), '(last)', False, 'from timevortex.utils.globals import LOGGER, KEY_ERROR, KEY_SITE_ID, KEY_VARIABLE_ID, KEY_VALUE, KEY_DATE\n'), ((188, 11, 188, 30), 'os.path.exists', 'exists', ({(188, 18, 188, 29): 'site_folder'}, {}), '(site_folder)', False, 'from os.path import isfile, join, exists\n'), ((188, 35, 188, 52), 'os.path.exists', 'exists', ({(188, 42, 188, 51): 'file_path'}, {}), '(file_path)', False, 'from os.path import isfile, join, exists\n'), ((199, 28, 199, 48), 'os.listdir', 'listdir', ({(199, 36, 199, 47): 'site_folder'}, {}), '(site_folder)', False, 'from os import listdir, makedirs\n'), ((127, 22, 127, 39), 'datetime.datetime.utcnow', 'datetime.utcnow', ({}, {}), '()', False, 'from datetime import datetime\n')] |
olegush/quiz-bot | main_tg.py | ae370d42f32c42b290a507924a801c63901d5148 | import os
import logging
import logging.config
from functools import partial
from dotenv import load_dotenv
from telegram import Bot, ReplyKeyboardMarkup, ReplyKeyboardRemove
from telegram.ext import (Updater, CommandHandler, MessageHandler,
RegexHandler, ConversationHandler, Filters)
from redis import Redis
from tg_logging import create_logger
from quiz_tools import get_question_and_answer, format_answer, format_question
QUESTION, ATTEMPT = range(2)
def main():
class LoggerTelegramBot(logging.Handler):
def emit(self, record):
log_entry = self.format(record)
bot.send_message(chat_id=chat_id_tg_admin, text=log_entry)
dictLogConfig = {
'version': 1,
'handlers': {
'handler': {
'()': LoggerTelegramBot,
'formatter': 'formatter'
}
},
'loggers': {
'tg_logger': {
'handlers': ['handler'],
'level': 'INFO'
}
},
'formatters': {
'formatter': {
'format': '%(asctime)s - %(levelname)s - %(message)s'
}
}
}
load_dotenv()
chat_id_tg_admin = os.getenv('CHAT_ID_TG_ADMIN')
bot = Bot(token=os.getenv('TOKEN_TG'))
logging.config.dictConfig(dictLogConfig)
logger = logging.getLogger('tg_logger')
handler = LoggerTelegramBot()
logger.addHandler(handler)
rediser = Redis(
host=os.getenv('REDIS_HOST'),
port=os.getenv('REDIS_PORT'),
db=0,
password=os.getenv('REDIS_PWD'))
updater = Updater(token_tg)
dp = updater.dispatcher
logger.info(dp)
conv_handler = ConversationHandler(
entry_points=[CommandHandler('start', start)],
states={
QUESTION: [
RegexHandler('^Выход$', do_exit),
MessageHandler(Filters.text, partial(handle_new_question, rediser))],
ATTEMPT: [
RegexHandler('^Выход$', do_exit),
RegexHandler('^(Новый вопрос|Другой вопрос)$', partial(handle_new_question, rediser)),
RegexHandler('^Показать ответ$', partial(display_answer, rediser)),
MessageHandler(Filters.text, partial(handle_attempt, rediser))],
},
fallbacks=[CommandHandler('cancel', do_exit)]
)
dp.add_handler(conv_handler)
updater.start_polling()
updater.idle()
def do_reply(update, text, keyboard=None):
if keyboard is None:
markup = ReplyKeyboardRemove()
return update.message.reply_text(text, reply_markup=markup)
markup = ReplyKeyboardMarkup(keyboard, resize_keyboard=True)
return update.message.reply_text(text, reply_markup=markup)
def start(bot, update):
do_reply(update, 'Привет знатоку в чате-викторине! Начинаем?', [['Да!']])
return QUESTION
def handle_new_question(rediser, bot, update):
new_question, new_answer = get_question_and_answer()
chat_id = update.message.chat_id
rediser.set(chat_id, new_answer)
do_reply(update, format_question(new_question))
return ATTEMPT
def display_answer(rediser, bot, update):
chat_id = update.message.chat_id
answer = rediser.get(chat_id).decode()
do_reply(update, answer, [['Новый вопрос', 'Выход']])
return QUESTION
def handle_attempt(rediser, bot, update):
chat_id = update.message.chat_id
attempt = update.message.text.strip().lower()
answer = rediser.get(chat_id).decode()
if attempt == format_answer(answer):
text = 'Правильно! \n\n {}'.format(answer)
reply_keyboard = [['Новый вопрос', 'Выход']]
else:
text = 'Неверно! Попробуйте еще раз.'
reply_keyboard = [['Показать ответ', 'Другой вопрос', 'Выход']]
do_reply(update, text, reply_keyboard)
return ATTEMPT
def do_exit(bot, update):
text = 'До скорой встречи! Желаете начать заново? Жмите /start'
do_reply(update, text)
return ConversationHandler.END
if __name__ == '__main__':
main()
| [((45, 4, 45, 17), 'dotenv.load_dotenv', 'load_dotenv', ({}, {}), '()', False, 'from dotenv import load_dotenv\n'), ((46, 23, 46, 52), 'os.getenv', 'os.getenv', ({(46, 33, 46, 51): '"""CHAT_ID_TG_ADMIN"""'}, {}), "('CHAT_ID_TG_ADMIN')", False, 'import os\n'), ((48, 4, 48, 44), 'logging.config.dictConfig', 'logging.config.dictConfig', ({(48, 30, 48, 43): 'dictLogConfig'}, {}), '(dictLogConfig)', False, 'import logging\n'), ((49, 13, 49, 43), 'logging.getLogger', 'logging.getLogger', ({(49, 31, 49, 42): '"""tg_logger"""'}, {}), "('tg_logger')", False, 'import logging\n'), ((57, 14, 57, 31), 'telegram.ext.Updater', 'Updater', ({(57, 22, 57, 30): 'token_tg'}, {}), '(token_tg)', False, 'from telegram.ext import Updater, CommandHandler, MessageHandler, RegexHandler, ConversationHandler, Filters\n'), ((83, 13, 83, 64), 'telegram.ReplyKeyboardMarkup', 'ReplyKeyboardMarkup', (), '', False, 'from telegram import Bot, ReplyKeyboardMarkup, ReplyKeyboardRemove\n'), ((93, 31, 93, 56), 'quiz_tools.get_question_and_answer', 'get_question_and_answer', ({}, {}), '()', False, 'from quiz_tools import get_question_and_answer, format_answer, format_question\n'), ((81, 17, 81, 38), 'telegram.ReplyKeyboardRemove', 'ReplyKeyboardRemove', ({}, {}), '()', False, 'from telegram import Bot, ReplyKeyboardMarkup, ReplyKeyboardRemove\n'), ((96, 21, 96, 50), 'quiz_tools.format_question', 'format_question', ({(96, 37, 96, 49): 'new_question'}, {}), '(new_question)', False, 'from quiz_tools import get_question_and_answer, format_answer, format_question\n'), ((111, 18, 111, 39), 'quiz_tools.format_answer', 'format_answer', ({(111, 32, 111, 38): 'answer'}, {}), '(answer)', False, 'from quiz_tools import get_question_and_answer, format_answer, format_question\n'), ((47, 20, 47, 41), 'os.getenv', 'os.getenv', ({(47, 30, 47, 40): '"""TOKEN_TG"""'}, {}), "('TOKEN_TG')", False, 'import os\n'), ((53, 21, 53, 44), 'os.getenv', 'os.getenv', ({(53, 31, 53, 43): '"""REDIS_HOST"""'}, {}), "('REDIS_HOST')", False, 'import os\n'), ((54, 21, 54, 44), 'os.getenv', 'os.getenv', ({(54, 31, 54, 43): '"""REDIS_PORT"""'}, {}), "('REDIS_PORT')", False, 'import os\n'), ((56, 25, 56, 47), 'os.getenv', 'os.getenv', ({(56, 35, 56, 46): '"""REDIS_PWD"""'}, {}), "('REDIS_PWD')", False, 'import os\n'), ((61, 22, 61, 52), 'telegram.ext.CommandHandler', 'CommandHandler', ({(61, 37, 61, 44): '"""start"""', (61, 46, 61, 51): 'start'}, {}), "('start', start)", False, 'from telegram.ext import Updater, CommandHandler, MessageHandler, RegexHandler, ConversationHandler, Filters\n'), ((72, 19, 72, 52), 'telegram.ext.CommandHandler', 'CommandHandler', ({(72, 34, 72, 42): '"""cancel"""', (72, 44, 72, 51): 'do_exit'}, {}), "('cancel', do_exit)", False, 'from telegram.ext import Updater, CommandHandler, MessageHandler, RegexHandler, ConversationHandler, Filters\n'), ((64, 16, 64, 53), 'telegram.ext.RegexHandler', 'RegexHandler', ({(64, 29, 64, 43): '"""^Выход$"""', (64, 45, 64, 52): 'do_exit'}, {}), "('^Выход$', do_exit)", False, 'from telegram.ext import Updater, CommandHandler, MessageHandler, RegexHandler, ConversationHandler, Filters\n'), ((67, 16, 67, 53), 'telegram.ext.RegexHandler', 'RegexHandler', ({(67, 29, 67, 43): '"""^Выход$"""', (67, 45, 67, 52): 'do_exit'}, {}), "('^Выход$', do_exit)", False, 'from telegram.ext import Updater, CommandHandler, MessageHandler, RegexHandler, ConversationHandler, Filters\n'), ((65, 45, 65, 82), 'functools.partial', 'partial', ({(65, 53, 65, 72): 'handle_new_question', (65, 74, 65, 81): 'rediser'}, {}), '(handle_new_question, rediser)', False, 'from functools import partial\n'), ((68, 86, 68, 123), 'functools.partial', 'partial', ({(68, 94, 68, 113): 'handle_new_question', (68, 115, 68, 122): 'rediser'}, {}), '(handle_new_question, rediser)', False, 'from functools import partial\n'), ((69, 62, 69, 94), 'functools.partial', 'partial', ({(69, 70, 69, 84): 'display_answer', (69, 86, 69, 93): 'rediser'}, {}), '(display_answer, rediser)', False, 'from functools import partial\n'), ((70, 45, 70, 77), 'functools.partial', 'partial', ({(70, 53, 70, 67): 'handle_attempt', (70, 69, 70, 76): 'rediser'}, {}), '(handle_attempt, rediser)', False, 'from functools import partial\n')] |
titu1994/tf_fourier_features | tf_fourier_features/fourier_features_mlp.py | 3aead078ae79a278b9975e21f44560a7f51e3f31 | import tensorflow as tf
from typing import Optional
from tf_fourier_features import fourier_features
class FourierFeatureMLP(tf.keras.Model):
def __init__(self, units: int, final_units: int, gaussian_projection: Optional[int],
activation: str = 'relu',
final_activation: str = "linear",
num_layers: int = 1,
gaussian_scale: float = 1.0,
use_bias: bool = True, **kwargs):
"""
Fourier Feature Projection model from the paper
[Fourier Features Let Networks Learn High Frequency Functions in Low Dimensional Domains](https://people.eecs.berkeley.edu/~bmild/fourfeat/).
Used to create a multi-layer MLP with optional FourierFeatureProjection layer.
Args:
units: Number of hidden units in the intermediate layers.
final_units: Number of hidden units in the final layer.
activation: Activation in the hidden layers.
final_activation: Activation function of the final layer.
num_layers: Number of layers in the network.
gaussian_projection: Projection dimension for the gaussian kernel in fourier feature
projection layer. Can be None, negative or positive integer.
If None, then fourier feature map layer is not used.
If <=0, uses identity matrix (basic projection) without gaussian kernel.
If >=1, uses gaussian projection matrix of specified dim.
gaussian_scale: Scale of the gaussian kernel in fourier feature projection layer.
Note: If the scale is too small, convergence will slow down and obtain poor results.
If the scale is too large (>50), convergence will be fast but results will be grainy.
Try grid search for scales in the range [10 - 50].
use_bias: Boolean whether to use bias or not.
# References:
- [Fourier Features Let Networks Learn High Frequency Functions in Low Dimensional Domains](https://people.eecs.berkeley.edu/~bmild/fourfeat/)
"""
super().__init__(**kwargs)
layers = []
if gaussian_projection is not None:
layers.append(fourier_features.FourierFeatureProjection(
gaussian_projection=gaussian_projection,
gaussian_scale=gaussian_scale,
**kwargs
))
for _ in range(num_layers - 1):
layers.append(tf.keras.layers.Dense(units, activation=activation, use_bias=use_bias,
bias_initializer='he_uniform', **kwargs))
self.network = tf.keras.Sequential(layers)
self.final_dense = tf.keras.layers.Dense(final_units, activation=final_activation,
use_bias=use_bias, bias_initializer='he_uniform', **kwargs)
def call(self, inputs, training=None, mask=None):
features = self.network(inputs)
output = self.final_dense(features)
return output
| [((55, 23, 55, 50), 'tensorflow.keras.Sequential', 'tf.keras.Sequential', ({(55, 43, 55, 49): 'layers'}, {}), '(layers)', True, 'import tensorflow as tf\n'), ((56, 27, 57, 108), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (), '', True, 'import tensorflow as tf\n'), ((45, 26, 49, 13), 'tf_fourier_features.fourier_features.FourierFeatureProjection', 'fourier_features.FourierFeatureProjection', (), '', False, 'from tf_fourier_features import fourier_features\n'), ((52, 26, 53, 88), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (), '', True, 'import tensorflow as tf\n')] |
fusionbox/eek | eek/spider.py | 8e962b7ad80c594a3498190fead016db826771e0 | import urlparse
import csv
import sys
import re
import collections
import time
import requests
from eek import robotparser # this project's version
from bs4 import BeautifulSoup
try:
import lxml
except ImportError:
HTML_PARSER = None
else:
HTML_PARSER = 'lxml'
encoding_re = re.compile("charset\s*=\s*(\S+?)(;|$)")
html_re = re.compile("text/html")
headers = ['url', 'title', 'description', 'keywords', 'allow', 'disallow',
'noindex', 'meta robots', 'canonical', 'referer', 'status']
def encoding_from_content_type(content_type):
"""
Extracts the charset from a Content-Type header.
>>> encoding_from_content_type('text/html; charset=utf-8')
'utf-8'
>>> encoding_from_content_type('text/html')
>>>
"""
if not content_type:
return None
match = encoding_re.search(content_type)
return match and match.group(1) or None
class NotHtmlException(Exception):
pass
class UrlTask(tuple):
"""
We need to keep track of referers, but we don't want to add a url multiple
times just because it was referenced on multiple pages
"""
def __hash__(self):
return hash(self[0])
def __eq__(self, other):
return self[0] == other[0]
class VisitOnlyOnceClerk(object):
def __init__(self):
self.visited = set()
self.to_visit = set()
def enqueue(self, url, referer):
if not url in self.visited:
self.to_visit.add(UrlTask((url, referer)))
def __bool__(self):
return bool(self.to_visit)
def __iter__(self):
while self.to_visit:
(url, referer) = self.to_visit.pop()
self.visited.add(url)
yield (url, referer)
def lremove(string, prefix):
"""
Remove a prefix from a string, if it exists.
>>> lremove('www.foo.com', 'www.')
'foo.com'
>>> lremove('foo.com', 'www.')
'foo.com'
"""
if string.startswith(prefix):
return string[len(prefix):]
else:
return string
def beautify(response):
content_type = response.headers.get('content-type')
if content_type:
if not html_re.search(content_type):
raise NotHtmlException
encoding = encoding_from_content_type(content_type)
else:
encoding = None
try:
return BeautifulSoup(
response.content,
features=HTML_PARSER,
from_encoding=encoding,
)
except UnicodeEncodeError:
raise NotHtmlException
def get_links(response):
if 300 <= response.status_code < 400 and response.headers['location']:
# redirect
yield urlparse.urldefrag(
urlparse.urljoin(response.url, response.headers['location'], False)
)[0]
try:
html = beautify(response)
for i in html.find_all('a', href=True):
yield urlparse.urldefrag(urlparse.urljoin(response.url, i['href'], False))[0]
except NotHtmlException:
pass
def force_unicode(s):
if isinstance(s, str):
return unicode(s, encoding='utf-8')
else:
return s
def force_bytes(str_or_unicode):
if isinstance(str_or_unicode, unicode):
return str_or_unicode.encode('utf-8')
else:
return str_or_unicode
def get_pages(base, clerk, session=requests.session()):
clerk.enqueue(base, base)
base_domain = lremove(urlparse.urlparse(base).netloc, 'www.')
for (url, referer) in clerk:
url = force_bytes(url)
referer = force_bytes(referer)
response = session.get(
url,
headers={'Referer': referer, 'User-Agent': 'Fusionbox spider'},
allow_redirects=False,
)
for link in get_links(response):
parsed = urlparse.urlparse(link)
if lremove(parsed.netloc, 'www.') == base_domain:
clerk.enqueue(link, url)
yield referer, response
def metadata_spider(base, output=sys.stdout, delay=0, insecure=False):
writer = csv.writer(output)
robots = robotparser.RobotFileParser(base + '/robots.txt')
robots.read()
writer.writerow(headers)
session = requests.session()
session.verify = not insecure
for referer, response in get_pages(base, VisitOnlyOnceClerk(), session=session):
rules = applicable_robot_rules(robots, response.url)
robots_meta = canonical = title = description = keywords = ''
try:
html = beautify(response)
robots_meta = ','.join(i['content'] for i in html.find_all('meta', {"name": "robots"}))
try:
canonical = html.find_all('link', {"rel": "canonical"})[0]['href']
except IndexError:
pass
try:
title = html.head.title.contents[0]
except (AttributeError, IndexError):
pass
try:
description = html.head.find_all('meta', {"name": "description"})[0]['content']
except (AttributeError, IndexError, KeyError):
pass
try:
keywords = html.head.find_all('meta', {"name": "keywords"})[0]['content']
except (AttributeError, IndexError, KeyError):
pass
except NotHtmlException:
pass
writer.writerow(map(force_bytes, [
response.url,
title,
description,
keywords,
','.join(rules['allow']),
','.join(rules['disallow']),
','.join(rules['noindex']),
robots_meta,
canonical,
referer,
response.status_code,
]))
if delay:
time.sleep(delay)
def grep_spider(base, pattern, delay=0, insensitive=False, insecure=False):
flags = 0
if insensitive:
flags |= re.IGNORECASE
pattern = re.compile(pattern, flags)
session = requests.session()
session.verify = not insecure
for referer, response in get_pages(base, VisitOnlyOnceClerk(), session=session):
for line in response.content.split('\n'):
if pattern.search(line):
print u'%s:%s' % (force_unicode(response.url), force_unicode(line))
if delay:
time.sleep(delay)
def graphviz_spider(base, delay=0, insecure=False):
print "digraph links {"
session = requests.session()
session.verify = not insecure
for referer, response in get_pages(base, VisitOnlyOnceClerk(), session=session):
for link in get_links(response):
print ' "%s" -> "%s";' % (force_bytes(response.url), force_bytes(link))
if delay:
time.sleep(delay)
print "}"
def applicable_robot_rules(robots, url):
rules = collections.defaultdict(list)
if robots.default_entry:
rules[robots.default_entry.allowance(url)].append('*')
for entry in robots.entries:
rules[entry.allowance(url)].extend(entry.useragents)
return rules
| [] |
hajime9652/observations | observations/r/zea_mays.py | 2c8b1ac31025938cb17762e540f2f592e302d5de | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import numpy as np
import os
import sys
from observations.util import maybe_download_and_extract
def zea_mays(path):
"""Darwin's Heights of Cross- and Self-fertilized Zea May Pairs
Darwin (1876) studied the growth of pairs of zea may (aka corn)
seedlings, one produced by cross-fertilization and the other produced by
self-fertilization, but otherwise grown under identical conditions. His
goal was to demonstrate the greater vigour of the cross-fertilized
plants. The data recorded are the final height (inches, to the nearest
1/8th) of the plants in each pair.
In the *Design of Experiments*, Fisher (1935) used these data to
illustrate a paired t-test (well, a one-sample test on the mean
difference, `cross - self`). Later in the book (section 21), he used
this data to illustrate an early example of a non-parametric permutation
test, treating each paired difference as having (randomly) either a
positive or negative sign.
A data frame with 15 observations on the following 4 variables.
`pair`
pair number, a numeric vector
`pot`
pot, a factor with levels `1` `2` `3` `4`
`cross`
height of cross fertilized plant, a numeric vector
`self`
height of self fertilized plant, a numeric vector
`diff`
`cross - self` for each pair
Darwin, C. (1876). *The Effect of Cross- and Self-fertilization in the
Vegetable Kingdom*, 2nd Ed. London: John Murray.
Andrews, D. and Herzberg, A. (1985) *Data: a collection of problems from
many fields for the student and research worker*. New York: Springer.
Data retrieved from: `https://www.stat.cmu.edu/StatDat/`
Args:
path: str.
Path to directory which either stores file or otherwise file will
be downloaded and extracted there.
Filename is `zea_mays.csv`.
Returns:
Tuple of np.ndarray `x_train` with 15 rows and 5 columns and
dictionary `metadata` of column headers (feature names).
"""
import pandas as pd
path = os.path.expanduser(path)
filename = 'zea_mays.csv'
if not os.path.exists(os.path.join(path, filename)):
url = 'http://dustintran.com/data/r/HistData/ZeaMays.csv'
maybe_download_and_extract(path, url,
save_file_name='zea_mays.csv',
resume=False)
data = pd.read_csv(os.path.join(path, filename), index_col=0,
parse_dates=True)
x_train = data.values
metadata = {'columns': data.columns}
return x_train, metadata
| [((68, 9, 68, 33), 'os.path.expanduser', 'os.path.expanduser', ({(68, 28, 68, 32): 'path'}, {}), '(path)', False, 'import os\n'), ((72, 4, 74, 44), 'observations.util.maybe_download_and_extract', 'maybe_download_and_extract', (), '', False, 'from observations.util import maybe_download_and_extract\n'), ((76, 21, 76, 49), 'os.path.join', 'os.path.join', ({(76, 34, 76, 38): 'path', (76, 40, 76, 48): 'filename'}, {}), '(path, filename)', False, 'import os\n'), ((70, 24, 70, 52), 'os.path.join', 'os.path.join', ({(70, 37, 70, 41): 'path', (70, 43, 70, 51): 'filename'}, {}), '(path, filename)', False, 'import os\n')] |
peterkulik/ois_api_client | ois_api_client/v3_0/dto/Lines.py | 51dabcc9f920f89982c4419bb058f5a88193cee0 | from typing import List
from dataclasses import dataclass
from .Line import Line
@dataclass
class Lines:
"""Product / service items
:param merged_item_indicator: Indicates whether the data exchange contains merged line data due to size reduction
:param line: Product / service item
"""
merged_item_indicator: bool
line: List[Line]
| [] |
leylafenix/belief-network-irs | parsing_documents.py | 9094e4cde738bd93ed1747dc958b5acb0e0fa684 | __author__ = 'Jose Gabriel'
import os
import pprint
def read_block(f):
s = ""
line = f.readline()
while line and not line.startswith("."):
s += line
line = f.readline()
return s, line
def read_doc(f):
doc = {"title": "", "authors": "", "content": ""}
line = f.readline()
while line and not line.startswith(".I"):
if line.startswith(".T"):
doc["title"], line = read_block(f)
elif line.startswith(".A"):
doc["authors"], line = read_block(f)
elif line.startswith(".W"):
doc["content"], line = read_block(f)
else:
_, line = read_block(f)
return doc, line
def create_doc(data, out_folder, name):
with open(out_folder + os.sep + name, 'w') as f:
f.write(data["title"] + "\n")
f.write(data["content"] + "\n")
f.write(data["authors"])
def parse_all(s, out_folder):
with open(s) as f:
line = f.readline() # .I
while line:
doc_name = "d%03d.txt" % (int(line.strip().split()[-1]))
doc, line = read_doc(f)
create_doc(doc, out_folder, doc_name)
# print("**********************************")
if __name__ == '__main__':
s = "adi" + os.sep + "ADI.ALL"
out_folder = "test_index"
try: # averiguar como preguntar si una carpeta o fichero existe en python
os.mkdir(out_folder)
except FileExistsError:
pass
parse_all(s, out_folder)
| [((54, 8, 54, 28), 'os.mkdir', 'os.mkdir', ({(54, 17, 54, 27): 'out_folder'}, {}), '(out_folder)', False, 'import os\n')] |
caktus/rapidsms-groups | groups/admin.py | eda6f30cdc60cf57833f1d37ba08e59454da8987 | #!/usr/bin/env python
# vim: ai ts=4 sts=4 et sw=4 encoding=utf-8
from django.contrib import admin
from groups.models import Group
admin.site.register(Group)
| [((8, 0, 8, 26), 'django.contrib.admin.site.register', 'admin.site.register', ({(8, 20, 8, 25): 'Group'}, {}), '(Group)', False, 'from django.contrib import admin\n')] |
Ascend-Huawei/AVOD | avod/datasets/kitti/kitti_aug_test.py | ea62372517bbfa9d4020bc5ab2739ee182c63c56 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from npu_bridge.npu_init import *
import unittest
import numpy as np
from avod.datasets.kitti import kitti_aug
class KittiAugTest(unittest.TestCase):
def test_flip_boxes_3d(self):
boxes_3d = np.array([
[1, 2, 3, 4, 5, 6, np.pi / 4],
[1, 2, 3, 4, 5, 6, -np.pi / 4]
])
exp_flipped_boxes_3d = np.array([
[-1, 2, 3, 4, 5, 6, 3 * np.pi / 4],
[-1, 2, 3, 4, 5, 6, -3 * np.pi / 4]
])
flipped_boxes_3d = kitti_aug.flip_boxes_3d(boxes_3d)
np.testing.assert_almost_equal(flipped_boxes_3d, exp_flipped_boxes_3d)
| [((42, 19, 45, 10), 'numpy.array', 'np.array', ({(42, 28, 45, 9): '[[1, 2, 3, 4, 5, 6, np.pi / 4], [1, 2, 3, 4, 5, 6, -np.pi / 4]]'}, {}), '([[1, 2, 3, 4, 5, 6, np.pi / 4], [1, 2, 3, 4, 5, 6, -np.pi / 4]])', True, 'import numpy as np\n'), ((47, 31, 50, 10), 'numpy.array', 'np.array', ({(47, 40, 50, 9): '[[-1, 2, 3, 4, 5, 6, 3 * np.pi / 4], [-1, 2, 3, 4, 5, 6, -3 * np.pi / 4]]'}, {}), '([[-1, 2, 3, 4, 5, 6, 3 * np.pi / 4], [-1, 2, 3, 4, 5, 6, -3 * np.\n pi / 4]])', True, 'import numpy as np\n'), ((52, 27, 52, 60), 'avod.datasets.kitti.kitti_aug.flip_boxes_3d', 'kitti_aug.flip_boxes_3d', ({(52, 51, 52, 59): 'boxes_3d'}, {}), '(boxes_3d)', False, 'from avod.datasets.kitti import kitti_aug\n'), ((54, 8, 54, 78), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', ({(54, 39, 54, 55): 'flipped_boxes_3d', (54, 57, 54, 77): 'exp_flipped_boxes_3d'}, {}), '(flipped_boxes_3d, exp_flipped_boxes_3d)', True, 'import numpy as np\n')] |
keysona/blog | application/model/_base.py | 783e0bdbed1e4d8ec9857ee609b39c9dfb958670 | from flask_sqlalchemy import SQLAlchemy, Model
# class BaseModel(Model):
# def save(self):
# db.session.add(self)
# db.session.commit(self)
# def delete(self):
# db.session.
db = SQLAlchemy()
| [((12, 5, 12, 17), 'flask_sqlalchemy.SQLAlchemy', 'SQLAlchemy', ({}, {}), '()', False, 'from flask_sqlalchemy import SQLAlchemy, Model\n')] |
qianfei11/zstack-utility | kvmagent/kvmagent/plugins/prometheus.py | e791bc6b6ae3a74e202f6fce84bde498c715aee8 | import os.path
import threading
import typing
from prometheus_client import start_http_server
from prometheus_client.core import GaugeMetricFamily, REGISTRY
from kvmagent import kvmagent
from zstacklib.utils import http
from zstacklib.utils import jsonobject
from zstacklib.utils import lock
from zstacklib.utils import lvm
from zstacklib.utils import misc
from zstacklib.utils import thread
from zstacklib.utils.bash import *
from zstacklib.utils.ip import get_nic_supported_max_speed
logger = log.get_logger(__name__)
collector_dict = {} # type: Dict[str, threading.Thread]
latest_collect_result = {}
collectResultLock = threading.RLock()
QEMU_CMD = kvmagent.get_qemu_path().split("/")[-1]
def read_number(fname):
res = linux.read_file(fname)
return 0 if not res else int(res)
def collect_host_network_statistics():
all_eths = os.listdir("/sys/class/net/")
virtual_eths = os.listdir("/sys/devices/virtual/net/")
interfaces = []
for eth in all_eths:
eth = eth.strip(' \t\n\r')
if eth in virtual_eths: continue
if eth == 'bonding_masters':
continue
elif not eth:
continue
else:
interfaces.append(eth)
all_in_bytes = 0
all_in_packets = 0
all_in_errors = 0
all_out_bytes = 0
all_out_packets = 0
all_out_errors = 0
for intf in interfaces:
all_in_bytes += read_number("/sys/class/net/{}/statistics/rx_bytes".format(intf))
all_in_packets += read_number("/sys/class/net/{}/statistics/rx_packets".format(intf))
all_in_errors += read_number("/sys/class/net/{}/statistics/rx_errors".format(intf))
all_out_bytes += read_number("/sys/class/net/{}/statistics/tx_bytes".format(intf))
all_out_packets += read_number("/sys/class/net/{}/statistics/tx_packets".format(intf))
all_out_errors += read_number("/sys/class/net/{}/statistics/tx_errors".format(intf))
metrics = {
'host_network_all_in_bytes': GaugeMetricFamily('host_network_all_in_bytes',
'Host all inbound traffic in bytes'),
'host_network_all_in_packages': GaugeMetricFamily('host_network_all_in_packages',
'Host all inbound traffic in packages'),
'host_network_all_in_errors': GaugeMetricFamily('host_network_all_in_errors',
'Host all inbound traffic errors'),
'host_network_all_out_bytes': GaugeMetricFamily('host_network_all_out_bytes',
'Host all outbound traffic in bytes'),
'host_network_all_out_packages': GaugeMetricFamily('host_network_all_out_packages',
'Host all outbound traffic in packages'),
'host_network_all_out_errors': GaugeMetricFamily('host_network_all_out_errors',
'Host all outbound traffic errors'),
}
metrics['host_network_all_in_bytes'].add_metric([], float(all_in_bytes))
metrics['host_network_all_in_packages'].add_metric([], float(all_in_packets))
metrics['host_network_all_in_errors'].add_metric([], float(all_in_errors))
metrics['host_network_all_out_bytes'].add_metric([], float(all_out_bytes))
metrics['host_network_all_out_packages'].add_metric([], float(all_out_packets))
metrics['host_network_all_out_errors'].add_metric([], float(all_out_errors))
return metrics.values()
def collect_host_capacity_statistics():
default_zstack_path = '/usr/local/zstack/apache-tomcat/webapps/zstack'
zstack_env_path = os.environ.get('ZSTACK_HOME', None)
if zstack_env_path and zstack_env_path != default_zstack_path:
default_zstack_path = zstack_env_path
zstack_dir = ['/var/lib/zstack', '%s/../../../' % default_zstack_path, '/opt/zstack-dvd/',
'/var/log/zstack', '/var/lib/mysql', '/var/lib/libvirt', '/tmp/zstack']
metrics = {
'zstack_used_capacity_in_bytes': GaugeMetricFamily('zstack_used_capacity_in_bytes',
'ZStack used capacity in bytes')
}
zstack_used_capacity = 0
for dir in zstack_dir:
if not os.path.exists(dir):
continue
cmd = "du -bs %s | awk {\'print $1\'}" % dir
res = bash_o(cmd)
zstack_used_capacity += int(res)
metrics['zstack_used_capacity_in_bytes'].add_metric([], float(zstack_used_capacity))
return metrics.values()
def collect_lvm_capacity_statistics():
metrics = {
'vg_size': GaugeMetricFamily('vg_size',
'volume group size', None, ['vg_name']),
'vg_avail': GaugeMetricFamily('vg_avail',
'volume group and thin pool free size', None, ['vg_name']),
}
r = bash_r("grep -Ev '^[[:space:]]*#|^[[:space:]]*$' /etc/multipath/wwids")
if r == 0:
linux.set_fail_if_no_path()
r, o, e = bash_roe("vgs --nolocking --noheading -oname")
if r != 0 or len(o.splitlines()) == 0:
return metrics.values()
vg_names = o.splitlines()
for name in vg_names:
name = name.strip()
size, avail = lvm.get_vg_size(name, False)
metrics['vg_size'].add_metric([name], float(size))
metrics['vg_avail'].add_metric([name], float(avail))
return metrics.values()
def convert_raid_state_to_int(state):
"""
:type state: str
"""
state = state.lower()
if state == "optimal":
return 0
elif state == "degraded":
return 5
else:
return 100
def convert_disk_state_to_int(state):
"""
:type state: str
"""
state = state.lower()
if "online" in state or "jobd" in state:
return 0
elif "rebuild" in state:
return 5
elif "failed" in state:
return 10
elif "unconfigured" in state:
return 15
else:
return 100
def collect_raid_state():
metrics = {
'raid_state': GaugeMetricFamily('raid_state',
'raid state', None, ['target_id']),
'physical_disk_state': GaugeMetricFamily('physical_disk_state',
'physical disk state', None,
['slot_number', 'disk_group']),
'physical_disk_temperature': GaugeMetricFamily('physical_disk_temperature',
'physical disk temperature', None,
['slot_number', 'disk_group']),
}
if bash_r("/opt/MegaRAID/MegaCli/MegaCli64 -LDInfo -LALL -aAll") != 0:
return metrics.values()
raid_info = bash_o("/opt/MegaRAID/MegaCli/MegaCli64 -LDInfo -LALL -aAll | grep -E 'Target Id|State'").strip().splitlines()
target_id = state = "unknown"
for info in raid_info:
if "Target Id" in info:
target_id = info.strip().strip(")").split(" ")[-1]
else:
state = info.strip().split(" ")[-1]
metrics['raid_state'].add_metric([target_id], convert_raid_state_to_int(state))
disk_info = bash_o(
"/opt/MegaRAID/MegaCli/MegaCli64 -PDList -aAll | grep -E 'Slot Number|DiskGroup|Firmware state|Drive Temperature'").strip().splitlines()
slot_number = state = disk_group = "unknown"
for info in disk_info:
if "Slot Number" in info:
slot_number = info.strip().split(" ")[-1]
elif "DiskGroup" in info:
kvs = info.replace("Drive's position: ", "").split(",")
disk_group = filter(lambda x: "DiskGroup" in x, kvs)[0]
disk_group = disk_group.split(" ")[-1]
elif "Drive Temperature" in info:
temp = info.split(":")[1].split("C")[0]
metrics['physical_disk_temperature'].add_metric([slot_number, disk_group], int(temp))
else:
disk_group = "JBOD" if disk_group == "unknown" and info.count("JBOD") > 0 else disk_group
disk_group = "unknown" if disk_group is None else disk_group
state = info.strip().split(":")[-1]
metrics['physical_disk_state'].add_metric([slot_number, disk_group], convert_disk_state_to_int(state))
return metrics.values()
def collect_equipment_state():
metrics = {
'power_supply': GaugeMetricFamily('power_supply',
'power supply', None, ['ps_id']),
'ipmi_status': GaugeMetricFamily('ipmi_status', 'ipmi status', None, []),
'physical_network_interface': GaugeMetricFamily('physical_network_interface',
'physical network interface', None,
['interface_name', 'speed']),
}
r, ps_info = bash_ro("ipmitool sdr type 'power supply'") # type: (int, str)
if r == 0:
for info in ps_info.splitlines():
info = info.strip()
ps_id = info.split("|")[0].strip().split(" ")[0]
health = 10 if "fail" in info.lower() or "lost" in info.lower() else 0
metrics['power_supply'].add_metric([ps_id], health)
metrics['ipmi_status'].add_metric([], bash_r("ipmitool mc info"))
nics = bash_o("find /sys/class/net -type l -not -lname '*virtual*' -printf '%f\\n'").splitlines()
if len(nics) != 0:
for nic in nics:
nic = nic.strip()
try:
# NOTE(weiw): sriov nic contains carrier file but can not read
status = linux.read_file("/sys/class/net/%s/carrier" % nic) == 1
except Exception as e:
status = True
speed = str(get_nic_supported_max_speed(nic))
metrics['physical_network_interface'].add_metric([nic, speed], status)
return metrics.values()
def collect_vm_statistics():
metrics = {
'cpu_occupied_by_vm': GaugeMetricFamily('cpu_occupied_by_vm',
'Percentage of CPU used by vm', None, ['vmUuid'])
}
r, pid_vm_map_str = bash_ro("ps --no-headers u -C \"%s -name\" | awk '{print $2,$13}'" % QEMU_CMD)
if r != 0 or len(pid_vm_map_str.splitlines()) == 0:
return metrics.values()
pid_vm_map_str = pid_vm_map_str.replace(",debug-threads=on", "").replace("guest=", "")
'''pid_vm_map_str samples:
38149 e8e6f27bfb2d47e08c59cbea1d0488c3
38232 afa02edca7eb4afcb5d2904ac1216eb1
'''
pid_vm_map = {}
for pid_vm in pid_vm_map_str.splitlines():
arr = pid_vm.split()
if len(arr) == 2:
pid_vm_map[arr[0]] = arr[1]
def collect(vm_pid_arr):
vm_pid_arr_str = ','.join(vm_pid_arr)
r, pid_cpu_usages_str = bash_ro("top -b -n 1 -p %s | grep qemu | awk '{print $1,$9}'" % vm_pid_arr_str)
if r != 0 or len(pid_cpu_usages_str.splitlines()) == 0:
return
for pid_cpu_usage in pid_cpu_usages_str.splitlines():
arr = pid_cpu_usage.split()
pid = arr[0]
vm_uuid = pid_vm_map[pid]
cpu_usage = arr[1]
metrics['cpu_occupied_by_vm'].add_metric([vm_uuid], float(cpu_usage))
n = 10
for i in range(0, len(pid_vm_map.keys()), n):
collect(pid_vm_map.keys()[i:i + n])
return metrics.values()
collect_node_disk_wwid_last_time = None
collect_node_disk_wwid_last_result = None
def collect_node_disk_wwid():
global collect_node_disk_wwid_last_time
global collect_node_disk_wwid_last_result
# NOTE(weiw): some storage can not afford frequent TUR. ref: ZSTAC-23416
if collect_node_disk_wwid_last_time is None:
collect_node_disk_wwid_last_time = time.time()
elif time.time() - collect_node_disk_wwid_last_time < 60 and collect_node_disk_wwid_last_result is not None:
return collect_node_disk_wwid_last_result
metrics = {
'node_disk_wwid': GaugeMetricFamily('node_disk_wwid',
'node disk wwid', None, ["disk", "wwid"])
}
pvs = bash_o("pvs --nolocking --noheading -o pv_name").strip().splitlines()
for pv in pvs:
multipath_wwid = None
if bash_r("dmsetup table %s | grep multipath" % pv) == 0:
multipath_wwid = bash_o("udevadm info -n %s | grep -E '^S: disk/by-id/dm-uuid' | awk -F '-' '{print $NF}'" % pv).strip()
disks = linux.get_physical_disk(pv, False)
for disk in disks:
disk_name = disk.split("/")[-1].strip()
wwids = bash_o("udevadm info -n %s | grep -E '^S: disk/by-id' | awk -F '/' '{print $NF}' | grep -v '^lvm-pv' | sort" % disk).strip().splitlines()
if multipath_wwid is not None:
wwids.append(multipath_wwid)
if len(wwids) > 0:
metrics['node_disk_wwid'].add_metric([disk_name, ";".join([w.strip() for w in wwids])], 1)
collect_node_disk_wwid_last_result = metrics.values()
return metrics.values()
kvmagent.register_prometheus_collector(collect_host_network_statistics)
kvmagent.register_prometheus_collector(collect_host_capacity_statistics)
kvmagent.register_prometheus_collector(collect_vm_statistics)
kvmagent.register_prometheus_collector(collect_node_disk_wwid)
if misc.isMiniHost():
kvmagent.register_prometheus_collector(collect_lvm_capacity_statistics)
kvmagent.register_prometheus_collector(collect_raid_state)
kvmagent.register_prometheus_collector(collect_equipment_state)
class PrometheusPlugin(kvmagent.KvmAgent):
COLLECTD_PATH = "/prometheus/collectdexporter/start"
@kvmagent.replyerror
@in_bash
def start_prometheus_exporter(self, req):
@in_bash
def start_collectd(cmd):
conf_path = os.path.join(os.path.dirname(cmd.binaryPath), 'collectd.conf')
conf = '''Interval {{INTERVAL}}
# version {{VERSION}}
FQDNLookup false
LoadPlugin syslog
LoadPlugin aggregation
LoadPlugin cpu
LoadPlugin disk
LoadPlugin interface
LoadPlugin memory
LoadPlugin network
LoadPlugin virt
<Plugin aggregation>
<Aggregation>
#Host "unspecified"
Plugin "cpu"
#PluginInstance "unspecified"
Type "cpu"
#TypeInstance "unspecified"
GroupBy "Host"
GroupBy "TypeInstance"
CalculateNum false
CalculateSum false
CalculateAverage true
CalculateMinimum false
CalculateMaximum false
CalculateStddev false
</Aggregation>
</Plugin>
<Plugin cpu>
ReportByCpu true
ReportByState true
ValuesPercentage true
</Plugin>
<Plugin disk>
Disk "/^sd[a-z]$/"
Disk "/^hd[a-z]$/"
Disk "/^vd[a-z]$/"
IgnoreSelected false
</Plugin>
<Plugin "interface">
{% for i in INTERFACES -%}
Interface "{{i}}"
{% endfor -%}
IgnoreSelected false
</Plugin>
<Plugin memory>
ValuesAbsolute true
ValuesPercentage false
</Plugin>
<Plugin virt>
Connection "qemu:///system"
RefreshInterval {{INTERVAL}}
HostnameFormat name
PluginInstanceFormat name
BlockDevice "/:hd[a-z]/"
IgnoreSelected true
ExtraStats "vcpu memory"
</Plugin>
<Plugin network>
Server "localhost" "25826"
</Plugin>
'''
tmpt = Template(conf)
conf = tmpt.render({
'INTERVAL': cmd.interval,
'INTERFACES': interfaces,
'VERSION': cmd.version,
})
need_restart_collectd = False
if os.path.exists(conf_path):
with open(conf_path, 'r') as fd:
old_conf = fd.read()
if old_conf != conf:
with open(conf_path, 'w') as fd:
fd.write(conf)
need_restart_collectd = True
else:
with open(conf_path, 'w') as fd:
fd.write(conf)
need_restart_collectd = True
cpid = linux.find_process_by_command('collectd', [conf_path])
mpid = linux.find_process_by_command('collectdmon', [conf_path])
if not cpid:
bash_errorout('collectdmon -- -C %s' % conf_path)
else:
bash_errorout('kill -TERM %s' % cpid)
if need_restart_collectd:
if not mpid:
bash_errorout('collectdmon -- -C %s' % conf_path)
else:
bash_errorout('kill -HUP %s' % mpid)
else:
if not mpid:
bash_errorout('collectdmon -- -C %s' % conf_path)
def run_in_systemd(binPath, args, log):
def get_systemd_name(path):
if "collectd_exporter" in path:
return "collectd_exporter"
elif "node_exporter" in path:
return "node_exporter"
elif "pushgateway" in path:
return "pushgateway"
def reload_and_restart_service(service_name):
bash_errorout("systemctl daemon-reload && systemctl restart %s.service" % service_name)
service_name = get_systemd_name(binPath)
service_path = '/etc/systemd/system/%s.service' % service_name
service_conf = '''
[Unit]
Description=prometheus %s
After=network.target
[Service]
ExecStart=/bin/sh -c '%s %s > %s 2>&1'
ExecStop=/bin/sh -c 'pkill -TERM -f %s'
Restart=always
RestartSec=30s
[Install]
WantedBy=multi-user.target
''' % (service_name, binPath, args, '/dev/null' if log.endswith('/pushgateway.log') else log, binPath)
if not os.path.exists(service_path):
linux.write_file(service_path, service_conf, True)
os.chmod(service_path, 0644)
reload_and_restart_service(service_name)
return
if linux.read_file(service_path) != service_conf:
linux.write_file(service_path, service_conf, True)
logger.info("%s.service conf changed" % service_name)
os.chmod(service_path, 0644)
# restart service regardless of conf changes, for ZSTAC-23539
reload_and_restart_service(service_name)
@lock.file_lock("/run/collectd-conf.lock", locker=lock.Flock())
def start_collectd_exporter(cmd):
start_collectd(cmd)
start_exporter(cmd)
@in_bash
def start_exporter(cmd):
EXPORTER_PATH = cmd.binaryPath
LOG_FILE = os.path.join(os.path.dirname(EXPORTER_PATH), cmd.binaryPath + '.log')
ARGUMENTS = cmd.startupArguments
if not ARGUMENTS:
ARGUMENTS = ""
os.chmod(EXPORTER_PATH, 0o755)
run_in_systemd(EXPORTER_PATH, ARGUMENTS, LOG_FILE)
para = jsonobject.loads(req[http.REQUEST_BODY])
rsp = kvmagent.AgentResponse()
eths = bash_o("ls /sys/class/net").split()
interfaces = []
for eth in eths:
eth = eth.strip(' \t\n\r')
if eth == 'lo': continue
if eth == 'bonding_masters': continue
elif eth.startswith('vnic'): continue
elif eth.startswith('outer'): continue
elif eth.startswith('br_'): continue
elif not eth: continue
else:
interfaces.append(eth)
for cmd in para.cmds:
if "collectd_exporter" in cmd.binaryPath:
start_collectd_exporter(cmd)
else:
start_exporter(cmd)
return jsonobject.dumps(rsp)
def install_colletor(self):
class Collector(object):
__collector_cache = {}
@classmethod
def __get_cache__(cls):
# type: () -> list
keys = cls.__collector_cache.keys()
if keys is None or len(keys) == 0:
return None
if (time.time() - keys[0]) < 9:
return cls.__collector_cache.get(keys[0])
return None
@classmethod
def __store_cache__(cls, ret):
# type: (list) -> None
cls.__collector_cache.clear()
cls.__collector_cache.update({time.time(): ret})
@classmethod
def check(cls, v):
try:
if v is None:
return False
if isinstance(v, GaugeMetricFamily):
return Collector.check(v.samples)
if isinstance(v, list) or isinstance(v, tuple):
for vl in v:
if Collector.check(vl) is False:
return False
if isinstance(v, dict):
for vk in v.iterkeys():
if vk == "timestamp" or vk == "exemplar":
continue
if Collector.check(v[vk]) is False:
return False
except Exception as e:
logger.warn("got exception in check value %s: %s" % (v, e))
return True
return True
def collect(self):
global latest_collect_result
ret = []
def get_result_run(f, fname):
# type: (typing.Callable, str) -> None
global collectResultLock
global latest_collect_result
r = f()
if not Collector.check(r):
logger.warn("result from collector %s contains illegal character None, details: \n%s" % (fname, r))
return
with collectResultLock:
latest_collect_result[fname] = r
cache = Collector.__get_cache__()
if cache is not None:
return cache
for c in kvmagent.metric_collectors:
name = "%s.%s" % (c.__module__, c.__name__)
if collector_dict.get(name) is not None and collector_dict.get(name).is_alive():
continue
collector_dict[name] = thread.ThreadFacade.run_in_thread(get_result_run, (c, name,))
for i in range(7):
for t in collector_dict.values():
if t.is_alive():
time.sleep(0.5)
continue
for k in collector_dict.iterkeys():
if collector_dict[k].is_alive():
logger.warn("It seems that the collector [%s] has not been completed yet,"
" temporarily use the last calculation result." % k)
for v in latest_collect_result.itervalues():
ret.extend(v)
Collector.__store_cache__(ret)
return ret
REGISTRY.register(Collector())
def start(self):
http_server = kvmagent.get_http_server()
http_server.register_async_uri(self.COLLECTD_PATH, self.start_prometheus_exporter)
self.install_colletor()
start_http_server(7069)
def stop(self):
pass
| [] |
rockandsalt/conan-center-index | recipes/libstudxml/all/conanfile.py | d739adcec3e4dd4c250eff559ceb738e420673dd | from conans import ConanFile, AutoToolsBuildEnvironment, MSBuild, tools
from conans.errors import ConanInvalidConfiguration
import os
import shutil
required_conan_version = ">=1.33.0"
class LibStudXmlConan(ConanFile):
name = "libstudxml"
description = "A streaming XML pull parser and streaming XML serializer implementation for modern, standard C++."
topics = ("xml", "xml-parser", "serialization")
url = "https://github.com/conan-io/conan-center-index"
homepage = "https://www.codesynthesis.com/projects/libstudxml/"
license = "MIT"
settings = "os", "compiler", "build_type", "arch"
exports_sources = "patches/*"
options = {
"shared": [True, False],
"fPIC": [True, False],
}
default_options = {
"shared": False,
"fPIC": True,
}
_autotools = None
@property
def _source_subfolder(self):
return "source_subfolder"
def config_options(self):
if self.settings.os == "Windows":
del self.options.fPIC
def configure(self):
if self.options.shared:
del self.options.fPIC
def requirements(self):
self.requires("expat/2.4.1")
def validate(self):
if self.settings.compiler == "Visual Studio":
if tools.Version(self.settings.compiler.version) < "9":
raise ConanInvalidConfiguration("Visual Studio {} is not supported.".format(self.settings.compiler.version))
@property
def _settings_build(self):
return getattr(self, "settings_build", self.settings)
def build_requirements(self):
if self.settings.compiler != "Visual Studio":
self.build_requires("gnu-config/cci.20201022")
self.build_requires("libtool/2.4.6")
if self._settings_build.os == "Windows" and not tools.get_env("CONAN_BASH_PATH"):
self.build_requires("msys2/cci.latest")
def source(self):
tools.get(**self.conan_data["sources"][self.version],
destination=self._source_subfolder, strip_root=True)
def _configure_autotools(self):
if not self._autotools:
args = ["--with-external-expat"]
if self.options.shared:
args.extend(["--enable-shared", "--disable-static"])
else:
args.extend(["--disable-shared", "--enable-static"])
self._autotools = AutoToolsBuildEnvironment(self, win_bash=tools.os_info.is_windows)
self._autotools.configure(configure_dir=self._source_subfolder, args=args)
return self._autotools
def _build_vs(self):
vc_ver = int(tools.Version(self.settings.compiler.version).major)
sln_path = None
def get_sln_path():
return os.path.join(self._source_subfolder, "libstudxml-vc{}.sln".format(vc_ver))
sln_path = get_sln_path()
while not os.path.exists(sln_path):
vc_ver -= 1
sln_path = get_sln_path()
proj_path = os.path.join(self._source_subfolder, "xml", "libstudxml-vc{}.vcxproj".format(vc_ver))
if not self.options.shared:
tools.replace_in_file(proj_path, "DynamicLibrary", "StaticLibrary")
tools.replace_in_file(proj_path, "LIBSTUDXML_DYNAMIC_LIB", "LIBSTUDXML_STATIC_LIB")
msbuild = MSBuild(self)
msbuild.build(sln_path, platforms={"x86": "Win32"})
@property
def _user_info_build(self):
return getattr(self, "user_info_build", self.deps_user_info)
def _build_autotools(self):
shutil.copy(self._user_info_build["gnu-config"].CONFIG_SUB,
os.path.join(self._source_subfolder, "config", "config.sub"))
shutil.copy(self._user_info_build["gnu-config"].CONFIG_GUESS,
os.path.join(self._source_subfolder, "config", "config.guess"))
if self.settings.compiler.get_safe("libcxx") == "libc++":
# libc++ includes a file called 'version', and since libstudxml adds source_subfolder as an
# include dir, libc++ ends up including their 'version' file instead, causing a compile error
tools.remove_files_by_mask(self._source_subfolder, "version")
with tools.chdir(self._source_subfolder):
self.run("{} -fiv".format(tools.get_env("AUTORECONF")), win_bash=tools.os_info.is_windows)
autotools = self._configure_autotools()
autotools.make()
def build(self):
for patch in self.conan_data.get("patches", {}).get(self.version, []):
tools.patch(**patch)
if self.settings.compiler == "Visual Studio":
self._build_vs()
else:
self._build_autotools()
def package(self):
self.copy(pattern="LICENSE", dst="licenses", src=self._source_subfolder)
if self.settings.compiler == "Visual Studio":
self.copy("xml/value-traits", dst="include", src=self._source_subfolder)
self.copy("xml/serializer", dst="include", src=self._source_subfolder)
self.copy("xml/qname", dst="include", src=self._source_subfolder)
self.copy("xml/parser", dst="include", src=self._source_subfolder)
self.copy("xml/forward", dst="include", src=self._source_subfolder)
self.copy("xml/exception", dst="include", src=self._source_subfolder)
self.copy("xml/content", dst="include", src=self._source_subfolder)
self.copy("xml/*.ixx", dst="include", src=self._source_subfolder)
self.copy("xml/*.txx", dst="include", src=self._source_subfolder)
self.copy("xml/*.hxx", dst="include", src=self._source_subfolder)
self.copy("xml/*.h", dst="include", src=self._source_subfolder)
suffix = ""
if self.settings.arch == "x86_64":
suffix = "64"
if self.options.shared:
self.copy("*.lib", dst="lib", src=os.path.join(self._source_subfolder, "lib" + suffix))
self.copy("*.dll", dst="bin", src=os.path.join(self._source_subfolder, "bin" + suffix))
else:
self.copy("*.lib", dst="lib", src=os.path.join(self._source_subfolder, "bin" + suffix))
else:
autotools = self._configure_autotools()
autotools.install()
tools.remove_files_by_mask(os.path.join(self.package_folder, "lib"), "libstudxml.la")
tools.rmdir(os.path.join(self.package_folder, "lib", "pkgconfig"))
tools.rmdir(os.path.join(self.package_folder, "share"))
def package_info(self):
self.cpp_info.libs = tools.collect_libs(self)
self.cpp_info.names["pkg_config"] = "libstudxml"
# If built with makefile, static library mechanism is provided by their buildsystem already
if self.settings.compiler == "Visual Studio" and not self.options.shared:
self.cpp_info.defines = ["LIBSTUDXML_STATIC_LIB=1"]
| [((63, 8, 64, 70), 'conans.tools.get', 'tools.get', (), '', False, 'from conans import ConanFile, AutoToolsBuildEnvironment, MSBuild, tools\n'), ((95, 18, 95, 31), 'conans.MSBuild', 'MSBuild', ({(95, 26, 95, 30): 'self'}, {}), '(self)', False, 'from conans import ConanFile, AutoToolsBuildEnvironment, MSBuild, tools\n'), ((159, 29, 159, 53), 'conans.tools.collect_libs', 'tools.collect_libs', ({(159, 48, 159, 52): 'self'}, {}), '(self)', False, 'from conans import ConanFile, AutoToolsBuildEnvironment, MSBuild, tools\n'), ((74, 30, 74, 96), 'conans.AutoToolsBuildEnvironment', 'AutoToolsBuildEnvironment', (), '', False, 'from conans import ConanFile, AutoToolsBuildEnvironment, MSBuild, tools\n'), ((85, 18, 85, 42), 'os.path.exists', 'os.path.exists', ({(85, 33, 85, 41): 'sln_path'}, {}), '(sln_path)', False, 'import os\n'), ((92, 12, 92, 79), 'conans.tools.replace_in_file', 'tools.replace_in_file', ({(92, 34, 92, 43): 'proj_path', (92, 45, 92, 61): '"""DynamicLibrary"""', (92, 63, 92, 78): '"""StaticLibrary"""'}, {}), "(proj_path, 'DynamicLibrary', 'StaticLibrary')", False, 'from conans import ConanFile, AutoToolsBuildEnvironment, MSBuild, tools\n'), ((93, 12, 93, 95), 'conans.tools.replace_in_file', 'tools.replace_in_file', ({(93, 34, 93, 43): 'proj_path', (93, 45, 93, 69): '"""LIBSTUDXML_DYNAMIC_LIB"""', (93, 71, 93, 94): '"""LIBSTUDXML_STATIC_LIB"""'}, {}), "(proj_path, 'LIBSTUDXML_DYNAMIC_LIB',\n 'LIBSTUDXML_STATIC_LIB')", False, 'from conans import ConanFile, AutoToolsBuildEnvironment, MSBuild, tools\n'), ((104, 20, 104, 80), 'os.path.join', 'os.path.join', ({(104, 33, 104, 55): 'self._source_subfolder', (104, 57, 104, 65): '"""config"""', (104, 67, 104, 79): '"""config.sub"""'}, {}), "(self._source_subfolder, 'config', 'config.sub')", False, 'import os\n'), ((106, 20, 106, 82), 'os.path.join', 'os.path.join', ({(106, 33, 106, 55): 'self._source_subfolder', (106, 57, 106, 65): '"""config"""', (106, 67, 106, 81): '"""config.guess"""'}, {}), "(self._source_subfolder, 'config', 'config.guess')", False, 'import os\n'), ((111, 12, 111, 73), 'conans.tools.remove_files_by_mask', 'tools.remove_files_by_mask', ({(111, 39, 111, 61): 'self._source_subfolder', (111, 63, 111, 72): '"""version"""'}, {}), "(self._source_subfolder, 'version')", False, 'from conans import ConanFile, AutoToolsBuildEnvironment, MSBuild, tools\n'), ((113, 13, 113, 48), 'conans.tools.chdir', 'tools.chdir', ({(113, 25, 113, 47): 'self._source_subfolder'}, {}), '(self._source_subfolder)', False, 'from conans import ConanFile, AutoToolsBuildEnvironment, MSBuild, tools\n'), ((121, 12, 121, 32), 'conans.tools.patch', 'tools.patch', ({}, {}), '(**patch)', False, 'from conans import ConanFile, AutoToolsBuildEnvironment, MSBuild, tools\n'), ((48, 15, 48, 60), 'conans.tools.Version', 'tools.Version', ({(48, 29, 48, 59): 'self.settings.compiler.version'}, {}), '(self.settings.compiler.version)', False, 'from conans import ConanFile, AutoToolsBuildEnvironment, MSBuild, tools\n'), ((79, 21, 79, 66), 'conans.tools.Version', 'tools.Version', ({(79, 35, 79, 65): 'self.settings.compiler.version'}, {}), '(self.settings.compiler.version)', False, 'from conans import ConanFile, AutoToolsBuildEnvironment, MSBuild, tools\n'), ((154, 39, 154, 79), 'os.path.join', 'os.path.join', ({(154, 52, 154, 71): 'self.package_folder', (154, 73, 154, 78): '"""lib"""'}, {}), "(self.package_folder, 'lib')", False, 'import os\n'), ((155, 24, 155, 77), 'os.path.join', 'os.path.join', ({(155, 37, 155, 56): 'self.package_folder', (155, 58, 155, 63): '"""lib"""', (155, 65, 155, 76): '"""pkgconfig"""'}, {}), "(self.package_folder, 'lib', 'pkgconfig')", False, 'import os\n'), ((156, 24, 156, 66), 'os.path.join', 'os.path.join', ({(156, 37, 156, 56): 'self.package_folder', (156, 58, 156, 65): '"""share"""'}, {}), "(self.package_folder, 'share')", False, 'import os\n'), ((59, 60, 59, 92), 'conans.tools.get_env', 'tools.get_env', ({(59, 74, 59, 91): '"""CONAN_BASH_PATH"""'}, {}), "('CONAN_BASH_PATH')", False, 'from conans import ConanFile, AutoToolsBuildEnvironment, MSBuild, tools\n'), ((114, 38, 114, 65), 'conans.tools.get_env', 'tools.get_env', ({(114, 52, 114, 64): '"""AUTORECONF"""'}, {}), "('AUTORECONF')", False, 'from conans import ConanFile, AutoToolsBuildEnvironment, MSBuild, tools\n'), ((147, 50, 147, 102), 'os.path.join', 'os.path.join', ({(147, 63, 147, 85): 'self._source_subfolder', (147, 87, 147, 101): "('lib' + suffix)"}, {}), "(self._source_subfolder, 'lib' + suffix)", False, 'import os\n'), ((148, 50, 148, 102), 'os.path.join', 'os.path.join', ({(148, 63, 148, 85): 'self._source_subfolder', (148, 87, 148, 101): "('bin' + suffix)"}, {}), "(self._source_subfolder, 'bin' + suffix)", False, 'import os\n'), ((150, 50, 150, 102), 'os.path.join', 'os.path.join', ({(150, 63, 150, 85): 'self._source_subfolder', (150, 87, 150, 101): "('bin' + suffix)"}, {}), "(self._source_subfolder, 'bin' + suffix)", False, 'import os\n')] |
KeleiHe/DAAN | dataset/WebCariA.py | 04e153c55f8d63e824adbee828e524573afe6a1c |
# Copyright 2020 Wen Ji & Kelei He ([email protected])
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
class WebCariA:
def __init__(self, dataType, modelType, parse, des_attri=None):
self.dir_path = "/data/jw/dataset/" + str(parse)
self.dataType = dataType
self.parse = parse
self.des_attri = des_attri
if self.dataType == 'train':
if self.parse == 'Caricature':
self.subPath = 'CariTrain'
elif self.parse == 'Photo':
self.subPath = 'PhotoTrain'
else:
self.subPath = 'WebCariTrain'
elif self.dataType == 'val':
if self.parse == 'Caricature':
self.subPath = 'CariVal'
elif self.parse == 'Photo':
self.subPath = 'PhotoVal'
else:
self.subPath = 'WebCariVal'
elif self.dataType == 'test':
if self.parse == 'Caricature':
self.subPath = 'CariTest'
elif self.parse == 'Photo':
self.subPath = 'PhotoTest'
else:
self.subPath = 'WebCariTest'
elif self.dataType == 'all_data':
if self.parse == 'Caricature':
self.subPath = 'all_cari_data'
elif self.parse == 'Photo':
self.subPath = 'all_photo_data'
else:
self.subPath = 'all_WebCari_data'
else:
print("Caricature error, please select a dataType from: train, val, github")
exit(1)
self.modelType = modelType
self.dir_path = os.path.join(self.dir_path, self.subPath)
self.attributes = ['Women',
'Asian',
'White',
'Black',
'Youth',
'Middle',
'Old',
'Wrinkle',
'MakeUp',
'Bald',
'LargeForehead',
'RoundFace',
'DiamondFace',
'OvalFace',
'SquareShapeFace',
'NarrowEye',
'SleepyEye',
'SlantEye',
'SharpEye',
'FlabbyEye',
'BigEye',
'SmallEye',
'UnderEyePuffiness',
'BigNose',
'SmallNose',
'HighNose',
'FlatNose',
'HookNose',
'WideNose',
'NarrowNose',
'Toothy',
'Smile',
'BigMouth',
'SmallMouth',
'ThickLips',
'ThinLips',
'DoubleChin',
'ArchedEyebrows',
'FlatEyebrow',
'SlantedEyebrows',
'UpsideDownSlantedEyebrows',
'BushyEyebrows',
'ThickEyebrows',
'ThinEyebrows',
'Mustache',
'Goatee',
'Whiskers',
'OtherBeard&NoBeard',
'HighCheekbones',
'SquareJaw']
self.names, self.annas, self.visuals, self.num_attribute = self.getImgNameAndAnnas()
print(parse+"dataset, images: ", len(self.names), " type for: ", self.dataType, " num_attribute: ",
self.num_attribute)
def getImgNameAndAnnas(self):
names = []
annas = []
visuals = []
file = self.subPath+".txt"
file_v = self.subPath+"_V.txt"
fileList = open(os.path.join(self.dir_path, file)).readlines()
fileVList = open((os.path.join(self.dir_path, file_v))).readlines()
if self.modelType == 'seperate':
num_attribute = 1
attribute = self.des_attri
print("des_attribute", attribute)
if attribute not in self.attributes:
print("error: ", attribute, "is not in this dataset, please write a correct attribute in param")
exit(1)
for line in fileList:
names.append(line[0])
attributes = line[1::]
index = self.attributes.index(attribute)
annas.append([int(attributes[index])])
for line in fileVList:
attributes_v = line[1::]
index = self.attributes.index(attribute)
visuals.append([int(attributes_v[index])])
else:
for line in fileList:
names.append(line[0])
annas.append([int(x) for x in line[1::]])
for line in fileVList:
visuals.append([int(x) for x in line[1::]])
self.attributes = self.attributes
num_attribute = len(self.attributes)
return names, annas, visuals, num_attribute
def getPath(self, name):
name = name.replace(' ', '_')
name = name.replace('._', '_')
name = name.replace('-', '_')
name = name + ".jpg"
return name
| [((60, 24, 60, 65), 'os.path.join', 'os.path.join', ({(60, 37, 60, 50): 'self.dir_path', (60, 52, 60, 64): 'self.subPath'}, {}), '(self.dir_path, self.subPath)', False, 'import os\n'), ((122, 24, 122, 57), 'os.path.join', 'os.path.join', ({(122, 37, 122, 50): 'self.dir_path', (122, 52, 122, 56): 'file'}, {}), '(self.dir_path, file)', False, 'import os\n'), ((123, 26, 123, 61), 'os.path.join', 'os.path.join', ({(123, 39, 123, 52): 'self.dir_path', (123, 54, 123, 60): 'file_v'}, {}), '(self.dir_path, file_v)', False, 'import os\n')] |
zomglings/moonworm | moonworm/crawler/state/json_state.py | 930e60199629b6a04adecc7f9ff9450e51bb4640 | import datetime
import json
import time
from typing import Optional
from web3.datastructures import AttributeDict
from .event_scanner_state import EventScannerState
class JSONifiedState(EventScannerState):
"""Store the state of scanned blocks and all events.
All state is an in-memory dict.
Simple load/store massive JSON on start up.
"""
def __init__(self):
self.state = None
self.fname = "test-state.json"
# How many second ago we saved the JSON file
self.last_save = 0
def reset(self):
"""Create initial state of nothing scanned."""
self.state = {
"last_scanned_block": 0,
"blocks": {},
}
def restore(self):
"""Restore the last scan state from a file."""
try:
self.state = json.load(open(self.fname, "rt"))
print(
f"Restored the state, previously {self.state['last_scanned_block']} blocks have been scanned"
)
except (IOError, json.decoder.JSONDecodeError):
print("State starting from scratch")
self.reset()
def save(self):
"""Save everything we have scanned so far in a file."""
with open(self.fname, "wt") as f:
json.dump(self.state, f)
self.last_save = time.time()
#
# EventScannerState methods implemented below
#
def get_last_scanned_block(self):
"""The number of the last block we have stored."""
return self.state["last_scanned_block"]
def delete_data(self, since_block):
"""Remove potentially reorganised blocks from the scan data."""
for block_num in range(since_block, self.get_last_scanned_block()):
if block_num in self.state["blocks"]:
del self.state["blocks"][block_num]
def start_chunk(self, block_number, chunk_size):
pass
def end_chunk(self, block_number):
"""Save at the end of each block, so we can resume in the case of a crash or CTRL+C"""
# Next time the scanner is started we will resume from this block
self.state["last_scanned_block"] = block_number
# Save the database file for every minute
if time.time() - self.last_save > 60:
self.save()
def process_event(
self, block_when: Optional[datetime.datetime], event: AttributeDict
) -> str:
"""Record a ERC-20 transfer in our database."""
# Events are keyed by their transaction hash and log index
# One transaction may contain multiple events
# and each one of those gets their own log index
# event_name = event.event # "Transfer"
log_index = event.logIndex # Log index within the block
# transaction_index = event.transactionIndex # Transaction index within the block
txhash = event.transactionHash.hex() # Transaction hash
block_number = event.blockNumber
# Convert ERC-20 Transfer event to our internal format
args = event["args"]
transfer = {
"from": args["from"],
"to": args.to,
"value": args.value,
}
if block_when is not None:
transfer["timestamp"] = block_when.isoformat()
# Create empty dict as the block that contains all transactions by txhash
if block_number not in self.state["blocks"]:
self.state["blocks"][block_number] = {}
block = self.state["blocks"][block_number]
if txhash not in block:
# We have not yet recorded any transfers in this transaction
# (One transaction may contain multiple events if executed by a smart contract).
# Create a tx entry that contains all events by a log index
self.state["blocks"][block_number][txhash] = {}
# Record ERC-20 transfer in our database
self.state["blocks"][block_number][txhash][log_index] = transfer
# Return a pointer that allows us to look up this event later if needed
return f"{block_number}-{txhash}-{log_index}"
| [((46, 25, 46, 36), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((45, 12, 45, 36), 'json.dump', 'json.dump', ({(45, 22, 45, 32): 'self.state', (45, 34, 45, 35): 'f'}, {}), '(self.state, f)', False, 'import json\n'), ((71, 11, 71, 22), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n')] |
frahlg/npbench | npbench/benchmarks/nbody/nbody_dace.py | 1bc4d9e2e22f3ca67fa2bc7f40e2e751a9c8dd26 | # Adapted from https://github.com/pmocz/nbody-python/blob/master/nbody.py
# TODO: Add GPL-3.0 License
import numpy as np
import dace as dc
"""
Create Your Own N-body Simulation (With Python)
Philip Mocz (2020) Princeton Univeristy, @PMocz
Simulate orbits of stars interacting due to gravity
Code calculates pairwise forces according to Newton's Law of Gravity
"""
N, Nt = (dc.symbol(s, dtype=dc.int64) for s in ('N', 'Nt'))
# @dc.program
# def hstack(out: dc.float64[N, 3], a: dc.float64[N],
# b: dc.float64[N], c: dc.float64[N]):
# out[:, 0] = a
# out[:, 1] = b
# out[:, 2] = c
@dc.program
def getAcc(pos: dc.float64[N, 3], mass: dc.float64[N], G: dc.float64,
softening: dc.float64):
"""
Calculate the acceleration on each particle due to Newton's Law
pos is an N x 3 matrix of positions
mass is an N x 1 vector of masses
G is Newton's Gravitational constant
softening is the softening length
a is N x 3 matrix of accelerations
"""
# positions r = [x,y,z] for all particles
x = pos[:, 0:1]
y = pos[:, 1:2]
z = pos[:, 2:3]
# matrix that stores all pairwise particle separations: r_j - r_i
# dx = x.T - x
# dy = y.T - y
# dz = z.T - z
# dx = np.transpose(x) - x
# dy = np.transpose(y) - y
# dz = np.transpose(z) - z
dx = np.add.outer(-x, x)
dy = np.add.outer(-y, y)
dz = np.add.outer(-z, z)
# matrix that stores 1/r^3 for all particle pairwise particle separations
inv_r3 = (dx**2 + dy**2 + dz**2 + softening**2)
# inv_r3[inv_r3>0] = inv_r3[inv_r3>0]**(-1.5)
I = inv_r3 > 0
np.power(inv_r3, -1.5, out=inv_r3, where=I)
ax = G * (dx * inv_r3) @ mass
ay = G * (dy * inv_r3) @ mass
az = G * (dz * inv_r3) @ mass
# pack together the acceleration components
# a = np.hstack((ax,ay,az))
a = np.ndarray((N, 3), dtype=np.float64)
# hstack(a, ax, ay, az)
a[:, 0] = ax
a[:, 1] = ay
a[:, 2] = az
return a
@dc.program
def getEnergy(pos: dc.float64[N, 3], vel: dc.float64[N, 3],
mass: dc.float64[N], G: dc.float64):
"""
Get kinetic energy (KE) and potential energy (PE) of simulation
pos is N x 3 matrix of positions
vel is N x 3 matrix of velocities
mass is an N x 1 vector of masses
G is Newton's Gravitational constant
KE is the kinetic energy of the system
PE is the potential energy of the system
"""
# Kinetic Energy:
# KE = 0.5 * np.sum(np.sum( mass * vel**2 ))
# KE = 0.5 * np.sum( mass * vel**2 )
KE = 0.5 * np.sum(np.reshape(mass, (N, 1)) * vel**2)
# Potential Energy:
# positions r = [x,y,z] for all particles
x = pos[:, 0:1]
y = pos[:, 1:2]
z = pos[:, 2:3]
# matrix that stores all pairwise particle separations: r_j - r_i
# dx = x.T - x
# dy = y.T - y
# dz = z.T - z
# dx = np.transpose(x) - x
# dy = np.transpose(y) - y
# dz = np.transpose(z) - z
dx = np.add.outer(-x, x)
dy = np.add.outer(-y, y)
dz = np.add.outer(-z, z)
# matrix that stores 1/r for all particle pairwise particle separations
inv_r = np.sqrt(dx**2 + dy**2 + dz**2)
# inv_r[inv_r>0] = 1.0/inv_r[inv_r>0]
I = inv_r > 0
np.divide(1.0, inv_r, out=inv_r, where=I)
# sum over upper triangle, to count each interaction only once
# PE = G * np.sum(np.sum(np.triu(-(mass*mass.T)*inv_r,1)))
# PE = G * np.sum(np.triu(-(mass*mass.T)*inv_r,1))
tmp = -np.multiply.outer(mass, mass) * inv_r
PE = 0.0
for j in range(N):
for k in range(j + 1, N):
PE += tmp[j, k]
PE *= G
return KE, PE
@dc.program
def nbody(mass: dc.float64[N], pos: dc.float64[N, 3], vel: dc.float64[N, 3],
dt: dc.float64, G: dc.float64, softening: dc.float64):
# Convert to Center-of-Mass frame
# vel -= np.mean(mass * vel, axis=0) / np.mean(mass)
# vel -= np.mean(np.reshape(mass, (N, 1)) * vel, axis=0) / np.mean(mass)
# tmp = np.divide(np.mean(np.reshape(mass, (N, 1)) * vel, axis=0), np.mean(mass))
np.subtract(vel,
np.mean(np.reshape(mass,
(N, 1)) * vel, axis=0) / np.mean(mass),
out=vel)
# calculate initial gravitational accelerations
acc = getAcc(pos, mass, G, softening)
# calculate initial energy of system
KE = np.ndarray(Nt + 1, dtype=np.float64)
PE = np.ndarray(Nt + 1, dtype=np.float64)
KE[0], PE[0] = getEnergy(pos, vel, mass, G)
t = 0.0
# Simulation Main Loop
for i in range(Nt):
# (1/2) kick
vel += acc * dt / 2.0
# drift
pos += vel * dt
# update accelerations
acc[:] = getAcc(pos, mass, G, softening)
# (1/2) kick
vel += acc * dt / 2.0
# update time
t += dt
# get energy of system
KE[i + 1], PE[i + 1] = getEnergy(pos, vel, mass, G)
return KE, PE
| [((13, 9, 13, 37), 'dace.symbol', 'dc.symbol', (), '', True, 'import dace as dc\n'), ((46, 9, 46, 28), 'numpy.add.outer', 'np.add.outer', ({(46, 22, 46, 24): '-x', (46, 26, 46, 27): 'x'}, {}), '(-x, x)', True, 'import numpy as np\n'), ((47, 9, 47, 28), 'numpy.add.outer', 'np.add.outer', ({(47, 22, 47, 24): '-y', (47, 26, 47, 27): 'y'}, {}), '(-y, y)', True, 'import numpy as np\n'), ((48, 9, 48, 28), 'numpy.add.outer', 'np.add.outer', ({(48, 22, 48, 24): '-z', (48, 26, 48, 27): 'z'}, {}), '(-z, z)', True, 'import numpy as np\n'), ((54, 4, 54, 47), 'numpy.power', 'np.power', (), '', True, 'import numpy as np\n'), ((62, 8, 62, 44), 'numpy.ndarray', 'np.ndarray', (), '', True, 'import numpy as np\n'), ((102, 9, 102, 28), 'numpy.add.outer', 'np.add.outer', ({(102, 22, 102, 24): '-x', (102, 26, 102, 27): 'x'}, {}), '(-x, x)', True, 'import numpy as np\n'), ((103, 9, 103, 28), 'numpy.add.outer', 'np.add.outer', ({(103, 22, 103, 24): '-y', (103, 26, 103, 27): 'y'}, {}), '(-y, y)', True, 'import numpy as np\n'), ((104, 9, 104, 28), 'numpy.add.outer', 'np.add.outer', ({(104, 22, 104, 24): '-z', (104, 26, 104, 27): 'z'}, {}), '(-z, z)', True, 'import numpy as np\n'), ((107, 12, 107, 42), 'numpy.sqrt', 'np.sqrt', ({(107, 20, 107, 41): 'dx ** 2 + dy ** 2 + dz ** 2'}, {}), '(dx ** 2 + dy ** 2 + dz ** 2)', True, 'import numpy as np\n'), ((110, 4, 110, 45), 'numpy.divide', 'np.divide', (), '', True, 'import numpy as np\n'), ((142, 9, 142, 45), 'numpy.ndarray', 'np.ndarray', (), '', True, 'import numpy as np\n'), ((143, 9, 143, 45), 'numpy.ndarray', 'np.ndarray', (), '', True, 'import numpy as np\n'), ((115, 11, 115, 40), 'numpy.multiply.outer', 'np.multiply.outer', ({(115, 29, 115, 33): 'mass', (115, 35, 115, 39): 'mass'}, {}), '(mass, mass)', True, 'import numpy as np\n'), ((135, 60, 135, 73), 'numpy.mean', 'np.mean', ({(135, 68, 135, 72): 'mass'}, {}), '(mass)', True, 'import numpy as np\n'), ((86, 22, 86, 46), 'numpy.reshape', 'np.reshape', ({(86, 33, 86, 37): 'mass', (86, 39, 86, 45): '(N, 1)'}, {}), '(mass, (N, 1))', True, 'import numpy as np\n'), ((134, 24, 135, 42), 'numpy.reshape', 'np.reshape', ({(134, 35, 134, 39): 'mass', (135, 35, 135, 41): '(N, 1)'}, {}), '(mass, (N, 1))', True, 'import numpy as np\n')] |
Healthy-Kokoro/Hiroshima | application/__init__.py | 87c6c533f97f55ceb33553a2409076bcd21a36d2 | # Third-party imports
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
configurations = {
'development': 'configurations.DevelopmentConfiguration',
'testing': 'configurations.TestingConfiguration',
'staging': 'configurations.StagingConfiguration',
'production': 'configurations.ProductionConfiguration'
}
database = SQLAlchemy()
def create_application(configuration):
application = Flask(__name__, instance_relative_config=True)
application.config.from_object(configurations[configuration])
application.config.from_pyfile('configuration.py', silent=True)
database.init_app(application)
from application.init.views import blueprint
application.register_blueprint(blueprint)
from application.metadata.views import blueprint
application.register_blueprint(blueprint)
return application
| [((12, 11, 12, 23), 'flask_sqlalchemy.SQLAlchemy', 'SQLAlchemy', ({}, {}), '()', False, 'from flask_sqlalchemy import SQLAlchemy\n'), ((15, 15, 15, 61), 'flask.Flask', 'Flask', (), '', False, 'from flask import Flask\n')] |
drednout/letspython | lesson5/exceptions_except.py | 9747442d63873b5f71e2c15ed5528bd98ad5ac31 | def take_beer(fridge, number=1):
if "beer" not in fridge:
raise Exception("No beer at all:(")
if number > fridge["beer"]:
raise Exception("Not enough beer:(")
fridge["beer"] -= number
if __name__ == "__main__":
fridge = {
"beer": 2,
"milk": 1,
"meat": 3,
}
print("I wanna drink 1 bottle of beer...")
take_beer(fridge)
print("Oooh, great!")
print("I wanna drink 2 bottle of beer...")
try:
take_beer(fridge, 2)
except Exception as e:
print("Error: {}. Let's continue".format(e))
print("Fallback. Try to take 1 bottle of beer...")
take_beer(fridge, 1)
print("Oooh, awesome!")
| [] |
bhanupratapjain/icfs | icfs/filesystem/exceptions.py | 44a2d5baadbc31bfebb931d713b426d22aabd969 | class ICFSError(IOError):
"""Error while making any filesystem API requests."""
| [] |
juhovan/synapse | synapse/storage/data_stores/state/store.py | 57feeab364325374b14ff67ac97c288983cc5cde | # -*- coding: utf-8 -*-
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from collections import namedtuple
from typing import Dict, Iterable, List, Set, Tuple
from twisted.internet import defer
from synapse.api.constants import EventTypes
from synapse.storage._base import SQLBaseStore
from synapse.storage.data_stores.state.bg_updates import StateBackgroundUpdateStore
from synapse.storage.database import Database
from synapse.storage.state import StateFilter
from synapse.types import StateMap
from synapse.util.caches.descriptors import cached
from synapse.util.caches.dictionary_cache import DictionaryCache
logger = logging.getLogger(__name__)
MAX_STATE_DELTA_HOPS = 100
class _GetStateGroupDelta(
namedtuple("_GetStateGroupDelta", ("prev_group", "delta_ids"))
):
"""Return type of get_state_group_delta that implements __len__, which lets
us use the itrable flag when caching
"""
__slots__ = []
def __len__(self):
return len(self.delta_ids) if self.delta_ids else 0
class StateGroupDataStore(StateBackgroundUpdateStore, SQLBaseStore):
"""A data store for fetching/storing state groups.
"""
def __init__(self, database: Database, db_conn, hs):
super(StateGroupDataStore, self).__init__(database, db_conn, hs)
# Originally the state store used a single DictionaryCache to cache the
# event IDs for the state types in a given state group to avoid hammering
# on the state_group* tables.
#
# The point of using a DictionaryCache is that it can cache a subset
# of the state events for a given state group (i.e. a subset of the keys for a
# given dict which is an entry in the cache for a given state group ID).
#
# However, this poses problems when performing complicated queries
# on the store - for instance: "give me all the state for this group, but
# limit members to this subset of users", as DictionaryCache's API isn't
# rich enough to say "please cache any of these fields, apart from this subset".
# This is problematic when lazy loading members, which requires this behaviour,
# as without it the cache has no choice but to speculatively load all
# state events for the group, which negates the efficiency being sought.
#
# Rather than overcomplicating DictionaryCache's API, we instead split the
# state_group_cache into two halves - one for tracking non-member events,
# and the other for tracking member_events. This means that lazy loading
# queries can be made in a cache-friendly manner by querying both caches
# separately and then merging the result. So for the example above, you
# would query the members cache for a specific subset of state keys
# (which DictionaryCache will handle efficiently and fine) and the non-members
# cache for all state (which DictionaryCache will similarly handle fine)
# and then just merge the results together.
#
# We size the non-members cache to be smaller than the members cache as the
# vast majority of state in Matrix (today) is member events.
self._state_group_cache = DictionaryCache(
"*stateGroupCache*",
# TODO: this hasn't been tuned yet
50000,
)
self._state_group_members_cache = DictionaryCache(
"*stateGroupMembersCache*", 500000,
)
@cached(max_entries=10000, iterable=True)
def get_state_group_delta(self, state_group):
"""Given a state group try to return a previous group and a delta between
the old and the new.
Returns:
(prev_group, delta_ids), where both may be None.
"""
def _get_state_group_delta_txn(txn):
prev_group = self.db.simple_select_one_onecol_txn(
txn,
table="state_group_edges",
keyvalues={"state_group": state_group},
retcol="prev_state_group",
allow_none=True,
)
if not prev_group:
return _GetStateGroupDelta(None, None)
delta_ids = self.db.simple_select_list_txn(
txn,
table="state_groups_state",
keyvalues={"state_group": state_group},
retcols=("type", "state_key", "event_id"),
)
return _GetStateGroupDelta(
prev_group,
{(row["type"], row["state_key"]): row["event_id"] for row in delta_ids},
)
return self.db.runInteraction(
"get_state_group_delta", _get_state_group_delta_txn
)
@defer.inlineCallbacks
def _get_state_groups_from_groups(
self, groups: List[int], state_filter: StateFilter
):
"""Returns the state groups for a given set of groups from the
database, filtering on types of state events.
Args:
groups: list of state group IDs to query
state_filter: The state filter used to fetch state
from the database.
Returns:
Deferred[Dict[int, StateMap[str]]]: Dict of state group to state map.
"""
results = {}
chunks = [groups[i : i + 100] for i in range(0, len(groups), 100)]
for chunk in chunks:
res = yield self.db.runInteraction(
"_get_state_groups_from_groups",
self._get_state_groups_from_groups_txn,
chunk,
state_filter,
)
results.update(res)
return results
def _get_state_for_group_using_cache(self, cache, group, state_filter):
"""Checks if group is in cache. See `_get_state_for_groups`
Args:
cache(DictionaryCache): the state group cache to use
group(int): The state group to lookup
state_filter (StateFilter): The state filter used to fetch state
from the database.
Returns 2-tuple (`state_dict`, `got_all`).
`got_all` is a bool indicating if we successfully retrieved all
requests state from the cache, if False we need to query the DB for the
missing state.
"""
is_all, known_absent, state_dict_ids = cache.get(group)
if is_all or state_filter.is_full():
# Either we have everything or want everything, either way
# `is_all` tells us whether we've gotten everything.
return state_filter.filter_state(state_dict_ids), is_all
# tracks whether any of our requested types are missing from the cache
missing_types = False
if state_filter.has_wildcards():
# We don't know if we fetched all the state keys for the types in
# the filter that are wildcards, so we have to assume that we may
# have missed some.
missing_types = True
else:
# There aren't any wild cards, so `concrete_types()` returns the
# complete list of event types we're wanting.
for key in state_filter.concrete_types():
if key not in state_dict_ids and key not in known_absent:
missing_types = True
break
return state_filter.filter_state(state_dict_ids), not missing_types
@defer.inlineCallbacks
def _get_state_for_groups(
self, groups: Iterable[int], state_filter: StateFilter = StateFilter.all()
):
"""Gets the state at each of a list of state groups, optionally
filtering by type/state_key
Args:
groups: list of state groups for which we want
to get the state.
state_filter: The state filter used to fetch state
from the database.
Returns:
Deferred[Dict[int, StateMap[str]]]: Dict of state group to state map.
"""
member_filter, non_member_filter = state_filter.get_member_split()
# Now we look them up in the member and non-member caches
(
non_member_state,
incomplete_groups_nm,
) = yield self._get_state_for_groups_using_cache(
groups, self._state_group_cache, state_filter=non_member_filter
)
(
member_state,
incomplete_groups_m,
) = yield self._get_state_for_groups_using_cache(
groups, self._state_group_members_cache, state_filter=member_filter
)
state = dict(non_member_state)
for group in groups:
state[group].update(member_state[group])
# Now fetch any missing groups from the database
incomplete_groups = incomplete_groups_m | incomplete_groups_nm
if not incomplete_groups:
return state
cache_sequence_nm = self._state_group_cache.sequence
cache_sequence_m = self._state_group_members_cache.sequence
# Help the cache hit ratio by expanding the filter a bit
db_state_filter = state_filter.return_expanded()
group_to_state_dict = yield self._get_state_groups_from_groups(
list(incomplete_groups), state_filter=db_state_filter
)
# Now lets update the caches
self._insert_into_cache(
group_to_state_dict,
db_state_filter,
cache_seq_num_members=cache_sequence_m,
cache_seq_num_non_members=cache_sequence_nm,
)
# And finally update the result dict, by filtering out any extra
# stuff we pulled out of the database.
for group, group_state_dict in group_to_state_dict.items():
# We just replace any existing entries, as we will have loaded
# everything we need from the database anyway.
state[group] = state_filter.filter_state(group_state_dict)
return state
def _get_state_for_groups_using_cache(
self, groups: Iterable[int], cache: DictionaryCache, state_filter: StateFilter
) -> Tuple[Dict[int, StateMap[str]], Set[int]]:
"""Gets the state at each of a list of state groups, optionally
filtering by type/state_key, querying from a specific cache.
Args:
groups: list of state groups for which we want to get the state.
cache: the cache of group ids to state dicts which
we will pass through - either the normal state cache or the
specific members state cache.
state_filter: The state filter used to fetch state from the
database.
Returns:
Tuple of dict of state_group_id to state map of entries in the
cache, and the state group ids either missing from the cache or
incomplete.
"""
results = {}
incomplete_groups = set()
for group in set(groups):
state_dict_ids, got_all = self._get_state_for_group_using_cache(
cache, group, state_filter
)
results[group] = state_dict_ids
if not got_all:
incomplete_groups.add(group)
return results, incomplete_groups
def _insert_into_cache(
self,
group_to_state_dict,
state_filter,
cache_seq_num_members,
cache_seq_num_non_members,
):
"""Inserts results from querying the database into the relevant cache.
Args:
group_to_state_dict (dict): The new entries pulled from database.
Map from state group to state dict
state_filter (StateFilter): The state filter used to fetch state
from the database.
cache_seq_num_members (int): Sequence number of member cache since
last lookup in cache
cache_seq_num_non_members (int): Sequence number of member cache since
last lookup in cache
"""
# We need to work out which types we've fetched from the DB for the
# member vs non-member caches. This should be as accurate as possible,
# but can be an underestimate (e.g. when we have wild cards)
member_filter, non_member_filter = state_filter.get_member_split()
if member_filter.is_full():
# We fetched all member events
member_types = None
else:
# `concrete_types()` will only return a subset when there are wild
# cards in the filter, but that's fine.
member_types = member_filter.concrete_types()
if non_member_filter.is_full():
# We fetched all non member events
non_member_types = None
else:
non_member_types = non_member_filter.concrete_types()
for group, group_state_dict in group_to_state_dict.items():
state_dict_members = {}
state_dict_non_members = {}
for k, v in group_state_dict.items():
if k[0] == EventTypes.Member:
state_dict_members[k] = v
else:
state_dict_non_members[k] = v
self._state_group_members_cache.update(
cache_seq_num_members,
key=group,
value=state_dict_members,
fetched_keys=member_types,
)
self._state_group_cache.update(
cache_seq_num_non_members,
key=group,
value=state_dict_non_members,
fetched_keys=non_member_types,
)
def store_state_group(
self, event_id, room_id, prev_group, delta_ids, current_state_ids
):
"""Store a new set of state, returning a newly assigned state group.
Args:
event_id (str): The event ID for which the state was calculated
room_id (str)
prev_group (int|None): A previous state group for the room, optional.
delta_ids (dict|None): The delta between state at `prev_group` and
`current_state_ids`, if `prev_group` was given. Same format as
`current_state_ids`.
current_state_ids (dict): The state to store. Map of (type, state_key)
to event_id.
Returns:
Deferred[int]: The state group ID
"""
def _store_state_group_txn(txn):
if current_state_ids is None:
# AFAIK, this can never happen
raise Exception("current_state_ids cannot be None")
state_group = self.database_engine.get_next_state_group_id(txn)
self.db.simple_insert_txn(
txn,
table="state_groups",
values={"id": state_group, "room_id": room_id, "event_id": event_id},
)
# We persist as a delta if we can, while also ensuring the chain
# of deltas isn't tooo long, as otherwise read performance degrades.
if prev_group:
is_in_db = self.db.simple_select_one_onecol_txn(
txn,
table="state_groups",
keyvalues={"id": prev_group},
retcol="id",
allow_none=True,
)
if not is_in_db:
raise Exception(
"Trying to persist state with unpersisted prev_group: %r"
% (prev_group,)
)
potential_hops = self._count_state_group_hops_txn(txn, prev_group)
if prev_group and potential_hops < MAX_STATE_DELTA_HOPS:
self.db.simple_insert_txn(
txn,
table="state_group_edges",
values={"state_group": state_group, "prev_state_group": prev_group},
)
self.db.simple_insert_many_txn(
txn,
table="state_groups_state",
values=[
{
"state_group": state_group,
"room_id": room_id,
"type": key[0],
"state_key": key[1],
"event_id": state_id,
}
for key, state_id in delta_ids.items()
],
)
else:
self.db.simple_insert_many_txn(
txn,
table="state_groups_state",
values=[
{
"state_group": state_group,
"room_id": room_id,
"type": key[0],
"state_key": key[1],
"event_id": state_id,
}
for key, state_id in current_state_ids.items()
],
)
# Prefill the state group caches with this group.
# It's fine to use the sequence like this as the state group map
# is immutable. (If the map wasn't immutable then this prefill could
# race with another update)
current_member_state_ids = {
s: ev
for (s, ev) in current_state_ids.items()
if s[0] == EventTypes.Member
}
txn.call_after(
self._state_group_members_cache.update,
self._state_group_members_cache.sequence,
key=state_group,
value=dict(current_member_state_ids),
)
current_non_member_state_ids = {
s: ev
for (s, ev) in current_state_ids.items()
if s[0] != EventTypes.Member
}
txn.call_after(
self._state_group_cache.update,
self._state_group_cache.sequence,
key=state_group,
value=dict(current_non_member_state_ids),
)
return state_group
return self.db.runInteraction("store_state_group", _store_state_group_txn)
def purge_unreferenced_state_groups(
self, room_id: str, state_groups_to_delete
) -> defer.Deferred:
"""Deletes no longer referenced state groups and de-deltas any state
groups that reference them.
Args:
room_id: The room the state groups belong to (must all be in the
same room).
state_groups_to_delete (Collection[int]): Set of all state groups
to delete.
"""
return self.db.runInteraction(
"purge_unreferenced_state_groups",
self._purge_unreferenced_state_groups,
room_id,
state_groups_to_delete,
)
def _purge_unreferenced_state_groups(self, txn, room_id, state_groups_to_delete):
logger.info(
"[purge] found %i state groups to delete", len(state_groups_to_delete)
)
rows = self.db.simple_select_many_txn(
txn,
table="state_group_edges",
column="prev_state_group",
iterable=state_groups_to_delete,
keyvalues={},
retcols=("state_group",),
)
remaining_state_groups = {
row["state_group"]
for row in rows
if row["state_group"] not in state_groups_to_delete
}
logger.info(
"[purge] de-delta-ing %i remaining state groups",
len(remaining_state_groups),
)
# Now we turn the state groups that reference to-be-deleted state
# groups to non delta versions.
for sg in remaining_state_groups:
logger.info("[purge] de-delta-ing remaining state group %s", sg)
curr_state = self._get_state_groups_from_groups_txn(txn, [sg])
curr_state = curr_state[sg]
self.db.simple_delete_txn(
txn, table="state_groups_state", keyvalues={"state_group": sg}
)
self.db.simple_delete_txn(
txn, table="state_group_edges", keyvalues={"state_group": sg}
)
self.db.simple_insert_many_txn(
txn,
table="state_groups_state",
values=[
{
"state_group": sg,
"room_id": room_id,
"type": key[0],
"state_key": key[1],
"event_id": state_id,
}
for key, state_id in curr_state.items()
],
)
logger.info("[purge] removing redundant state groups")
txn.executemany(
"DELETE FROM state_groups_state WHERE state_group = ?",
((sg,) for sg in state_groups_to_delete),
)
txn.executemany(
"DELETE FROM state_groups WHERE id = ?",
((sg,) for sg in state_groups_to_delete),
)
@defer.inlineCallbacks
def get_previous_state_groups(self, state_groups):
"""Fetch the previous groups of the given state groups.
Args:
state_groups (Iterable[int])
Returns:
Deferred[dict[int, int]]: mapping from state group to previous
state group.
"""
rows = yield self.db.simple_select_many_batch(
table="state_group_edges",
column="prev_state_group",
iterable=state_groups,
keyvalues={},
retcols=("prev_state_group", "state_group"),
desc="get_previous_state_groups",
)
return {row["state_group"]: row["prev_state_group"] for row in rows}
def purge_room_state(self, room_id, state_groups_to_delete):
"""Deletes all record of a room from state tables
Args:
room_id (str):
state_groups_to_delete (list[int]): State groups to delete
"""
return self.db.runInteraction(
"purge_room_state",
self._purge_room_state_txn,
room_id,
state_groups_to_delete,
)
def _purge_room_state_txn(self, txn, room_id, state_groups_to_delete):
# first we have to delete the state groups states
logger.info("[purge] removing %s from state_groups_state", room_id)
self.db.simple_delete_many_txn(
txn,
table="state_groups_state",
column="state_group",
iterable=state_groups_to_delete,
keyvalues={},
)
# ... and the state group edges
logger.info("[purge] removing %s from state_group_edges", room_id)
self.db.simple_delete_many_txn(
txn,
table="state_group_edges",
column="state_group",
iterable=state_groups_to_delete,
keyvalues={},
)
# ... and the state groups
logger.info("[purge] removing %s from state_groups", room_id)
self.db.simple_delete_many_txn(
txn,
table="state_groups",
column="id",
iterable=state_groups_to_delete,
keyvalues={},
)
| [((31, 9, 31, 36), 'logging.getLogger', 'logging.getLogger', ({(31, 27, 31, 35): '__name__'}, {}), '(__name__)', False, 'import logging\n'), ((38, 4, 38, 66), 'collections.namedtuple', 'namedtuple', ({(38, 15, 38, 36): '"""_GetStateGroupDelta"""', (38, 38, 38, 65): "('prev_group', 'delta_ids')"}, {}), "('_GetStateGroupDelta', ('prev_group', 'delta_ids'))", False, 'from collections import namedtuple\n'), ((95, 5, 95, 45), 'synapse.util.caches.descriptors.cached', 'cached', (), '', False, 'from synapse.util.caches.descriptors import cached\n'), ((86, 34, 90, 9), 'synapse.util.caches.dictionary_cache.DictionaryCache', 'DictionaryCache', ({(87, 12, 87, 31): '"""*stateGroupCache*"""', (89, 12, 89, 17): '50000'}, {}), "('*stateGroupCache*', 50000)", False, 'from synapse.util.caches.dictionary_cache import DictionaryCache\n'), ((91, 42, 93, 9), 'synapse.util.caches.dictionary_cache.DictionaryCache', 'DictionaryCache', ({(92, 12, 92, 38): '"""*stateGroupMembersCache*"""', (92, 40, 92, 46): '500000'}, {}), "('*stateGroupMembersCache*', 500000)", False, 'from synapse.util.caches.dictionary_cache import DictionaryCache\n'), ((201, 65, 201, 82), 'synapse.storage.state.StateFilter.all', 'StateFilter.all', ({}, {}), '()', False, 'from synapse.storage.state import StateFilter\n')] |
manulangat1/djcommerce | core/migrations/0011_itemvariation_variation.py | 2cd92631479ef949e0f05a255f2f50feca728802 | # Generated by Django 2.2.6 on 2020-02-09 12:24
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0010_auto_20200130_1135'),
]
operations = [
migrations.CreateModel(
name='Variation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('item', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Item')),
],
options={
'unique_together': {('item', 'name')},
},
),
migrations.CreateModel(
name='ItemVariation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('value', models.CharField(max_length=50)),
('attachment', models.ImageField(upload_to='variations/')),
('variation', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Variation')),
],
options={
'unique_together': {('variation', 'value')},
},
),
]
| [((17, 23, 17, 112), 'django.db.models.AutoField', 'models.AutoField', (), '', False, 'from django.db import migrations, models\n'), ((18, 25, 18, 56), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import migrations, models\n'), ((19, 25, 19, 103), 'django.db.models.ForeignKey', 'models.ForeignKey', (), '', False, 'from django.db import migrations, models\n'), ((28, 23, 28, 112), 'django.db.models.AutoField', 'models.AutoField', (), '', False, 'from django.db import migrations, models\n'), ((29, 26, 29, 57), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import migrations, models\n'), ((30, 31, 30, 73), 'django.db.models.ImageField', 'models.ImageField', (), '', False, 'from django.db import migrations, models\n'), ((31, 30, 31, 113), 'django.db.models.ForeignKey', 'models.ForeignKey', (), '', False, 'from django.db import migrations, models\n')] |
Colins-Ford/mnist-webapp | mnist/convolutional.py | 20e9b6f5520d5bda957d9501347f787450555db8 | import os
from mnist import model
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
data = input_data.read_data_sets("data/dataset/", one_hot=True)
# model
with tf.variable_scope("convolutional"):
x = tf.placeholder(tf.float32, [None, 784])
keep_prob = tf.placeholder(tf.float32)
y, variables = model.convolutional(x, keep_prob)
# train
y_ = tf.placeholder(tf.float32, [None, 10])
cross_entropy = -tf.reduce_sum(y_ * tf.log(y))
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
saver = tf.train.Saver(variables)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for i in range(20000):
batch = data.train.next_batch(50)
if i % 100 == 0:
train_accuracy = accuracy.eval(feed_dict={x: batch[0], y_: batch[1], keep_prob: 1.0})
print("step %d, training accuracy %g" % (i, train_accuracy))
sess.run(train_step, feed_dict={x: batch[0], y_: batch[1], keep_prob: 0.5})
print(sess.run(accuracy, feed_dict={x: data.test.images, y_: data.test.labels, keep_prob: 1.0}))
path = saver.save(
sess, os.path.join(os.path.dirname(__file__), 'data', 'convolutional.ckpt'),
write_meta_graph=False, write_state=False)
print("Saved:", path)
| [((7, 7, 7, 63), 'tensorflow.examples.tutorials.mnist.input_data.read_data_sets', 'input_data.read_data_sets', (), '', False, 'from tensorflow.examples.tutorials.mnist import input_data\n'), ((16, 5, 16, 43), 'tensorflow.placeholder', 'tf.placeholder', ({(16, 20, 16, 30): 'tf.float32', (16, 32, 16, 42): '[None, 10]'}, {}), '(tf.float32, [None, 10])', True, 'import tensorflow as tf\n'), ((22, 8, 22, 33), 'tensorflow.train.Saver', 'tf.train.Saver', ({(22, 23, 22, 32): 'variables'}, {}), '(variables)', True, 'import tensorflow as tf\n'), ((10, 5, 10, 39), 'tensorflow.variable_scope', 'tf.variable_scope', ({(10, 23, 10, 38): '"""convolutional"""'}, {}), "('convolutional')", True, 'import tensorflow as tf\n'), ((11, 8, 11, 47), 'tensorflow.placeholder', 'tf.placeholder', ({(11, 23, 11, 33): 'tf.float32', (11, 35, 11, 46): '[None, 784]'}, {}), '(tf.float32, [None, 784])', True, 'import tensorflow as tf\n'), ((12, 16, 12, 42), 'tensorflow.placeholder', 'tf.placeholder', ({(12, 31, 12, 41): 'tf.float32'}, {}), '(tf.float32)', True, 'import tensorflow as tf\n'), ((13, 19, 13, 52), 'mnist.model.convolutional', 'model.convolutional', ({(13, 39, 13, 40): 'x', (13, 42, 13, 51): 'keep_prob'}, {}), '(x, keep_prob)', False, 'from mnist import model\n'), ((19, 30, 19, 45), 'tensorflow.argmax', 'tf.argmax', ({(19, 40, 19, 41): 'y', (19, 43, 19, 44): '1'}, {}), '(y, 1)', True, 'import tensorflow as tf\n'), ((19, 47, 19, 63), 'tensorflow.argmax', 'tf.argmax', ({(19, 57, 19, 59): 'y_', (19, 61, 19, 62): '1'}, {}), '(y_, 1)', True, 'import tensorflow as tf\n'), ((20, 26, 20, 65), 'tensorflow.cast', 'tf.cast', ({(20, 34, 20, 52): 'correct_prediction', (20, 54, 20, 64): 'tf.float32'}, {}), '(correct_prediction, tf.float32)', True, 'import tensorflow as tf\n'), ((23, 5, 23, 17), 'tensorflow.Session', 'tf.Session', ({}, {}), '()', True, 'import tensorflow as tf\n'), ((18, 13, 18, 41), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ({(18, 36, 18, 40): '0.0001'}, {}), '(0.0001)', True, 'import tensorflow as tf\n'), ((24, 13, 24, 46), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ({}, {}), '()', True, 'import tensorflow as tf\n'), ((17, 36, 17, 45), 'tensorflow.log', 'tf.log', ({(17, 43, 17, 44): 'y'}, {}), '(y)', True, 'import tensorflow as tf\n'), ((35, 27, 35, 52), 'os.path.dirname', 'os.path.dirname', ({(35, 43, 35, 51): '__file__'}, {}), '(__file__)', False, 'import os\n')] |
nokout/au_address | parse_scripts/import_osm.py | 07138ecd8fedab9566435b609cb8124b67ad42ff | import requests
import codecs
query1 = """<union>
<query type="way">
<has-kv k="addr:housenumber"/>
<has-kv k="addr:street:name"/>
<has-kv k="addr:street:type"/>
<has-kv k="addr:state"/>
<bbox-query e="%s" n="%s" s="%s" w="%s"/>
</query>
<query type="way">
<has-kv k="addr:housenumber"/>
<has-kv k="addr:street:name"/>
<has-kv k="addr:street:type"/>
<has-kv k="addr:city"/>
<bbox-query e="%s" n="%s" s="%s" w="%s"/>
</query>
<query type="way">
<has-kv k="addr:housenumber"/>
<has-kv k="addr:street:name"/>
<has-kv k="addr:street:type"/>
<has-kv k="addr:postcode"/>
<bbox-query e="%s" n="%s" s="%s" w="%s"/>
</query>
<query type="node">
<has-kv k="addr:housenumber"/>
<has-kv k="addr:street:name"/>
<has-kv k="addr:street:type"/>
<has-kv k="addr:state"/>
<bbox-query e="%s" n="%s" s="%s" w="%s"/>
</query>
<query type="node">
<has-kv k="addr:housenumber"/>
<has-kv k="addr:street:name"/>
<has-kv k="addr:street:type"/>
<has-kv k="addr:city"/>
<bbox-query e="%s" n="%s" s="%s" w="%s"/>
</query>
<query type="node">
<has-kv k="addr:housenumber"/>
<has-kv k="addr:street:name"/>
<has-kv k="addr:street:type"/>
<has-kv k="addr:postcode"/>
<bbox-query e="%s" n="%s" s="%s" w="%s"/>
</query>
</union>
<print/>""" % ((-70.000000, 50.000000, 25.000000, -125.000000) * 6)
r1 = requests.post('http://overpass-api.de/api/interpreter/', data=query1)
r1.encoding = 'utf-8'
f = codecs.open('data/osm_data.xml', encoding='utf-8' , mode='w+')
f.write(r1.text)
query2 = """<union>
<query type="way">
<has-kv k="addr:street"/>
<has-kv k="addr:street:name"/>
<has-kv k="addr:street:prefix"/>
<has-kv k="addr:street:type"/>
<bbox-query e="%s" n="%s" s="%s" w="%s"/>
</query>
<query type="node">
<has-kv k="addr:street"/>
<has-kv k="addr:street:name"/>
<has-kv k="addr:street:prefix"/>
<has-kv k="addr:street:type"/>
<bbox-query e="%s" n="%s" s="%s" w="%s"/>
</query>
</union>
<print/>""" % ((-87.61309146881104, 41.890042371392965, 41.87234107841773, -87.64235973358154) * 2)
#r2 = requests.post('http://overpass-api.de/api/interpreter/', data=query2)
#f = codecs.open("data/osm_data_street.xml", "wb", "utf-8")
#r2.encoding = 'utf-8'
#f.write(r2.text)
query3 = """<union>
<query type="way">
<has-kv k="addr:full" regv="^[0-9]+.*[a-z]+.*[0-9]{5}.*"/>
<bbox-query e="%s" n="%s" s="%s" w="%s"/>
</query>
<query type="node">
<has-kv k="addr:full" regv="^[0-9]+.*[a-z]+.*[0-9]{5}.*"/>
<bbox-query e="%s" n="%s" s="%s" w="%s"/>
</query>
</union>
<print/>
""" % ((-70.000000, 50.000000, 25.000000, -125.000000) * 2)
if __name__ == '__main__' :
r3 = requests.post('http://overpass-api.de/api/interpreter/', data=query3)
f = codecs.open("data/osm_data_full_addr.xml", "wb", "utf-8")
r3.encoding = 'utf-8'
f.write(r3.text)
| [((50, 5, 50, 74), 'requests.post', 'requests.post', (), '', False, 'import requests\n'), ((53, 4, 53, 66), 'codecs.open', 'codecs.open', (), '', False, 'import codecs\n'), ((97, 9, 97, 78), 'requests.post', 'requests.post', (), '', False, 'import requests\n'), ((99, 8, 99, 65), 'codecs.open', 'codecs.open', ({(99, 20, 99, 49): '"""data/osm_data_full_addr.xml"""', (99, 51, 99, 55): '"""wb"""', (99, 57, 99, 64): '"""utf-8"""'}, {}), "('data/osm_data_full_addr.xml', 'wb', 'utf-8')", False, 'import codecs\n')] |
abhirevan/pedestrian-detector | tools/test_net_batch.py | f4fa4cd59315ea515ace3c529b716ff3173e2205 | #!/usr/bin/env python
# --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
"""Test a Fast R-CNN network on an image database."""
import _init_paths
from fast_rcnn.test import test_net
from fast_rcnn.config import cfg, cfg_from_file, cfg_from_list
from datasets.factory import get_imdb
import caffe
import argparse
import pprint
import time, os, sys
import pandas as pd
def splitall(path):
allparts = []
while 1:
parts = os.path.split(path)
if parts[0] == path: # sentinel for absolute paths
allparts.insert(0, parts[0])
break
elif parts[1] == path: # sentinel for relative paths
allparts.insert(0, parts[1])
break
else:
path = parts[0]
allparts.insert(0, parts[1])
return allparts
def parse_args():
"""
Parse input arguments
"""
parser = argparse.ArgumentParser(description='Test a Fast R-CNN network pipeline')
parser.add_argument('--gpu', dest='gpu_id', help='GPU id to use',
default=0, type=int, required=True)
parser.add_argument('--dir', dest='dir',
help='Directory of the model files',
default="", type=str, required=True)
parser.add_argument('--models', dest='model_files',
help='Text file with names of models',
default=None, type=str, required=True)
parser.add_argument('--prototxt', dest='prototxt',
help='prototxt', default=None, type=str, required=True)
parser.add_argument('--imdb', dest='imdb_name',
help='dataset to test',
default='ped_test_small', type=str, required=True)
parser.add_argument('--cfg', dest='cfg_file',
help='cfg',
default='experiments/cfgs/faster_rcnn_end2end.yml', type=str)
parser.add_argument('--res', dest='res_file',
help='result file',
default='', type=str, required=True)
args = parser.parse_args()
return args
def run_test_net(gpu_id, caffemodel, prototxt, imdb_name, cfg_file):
if cfg_file is not None:
cfg_from_file(cfg_file)
cfg.GPU_ID = gpu_id
print('Using config:')
pprint.pprint(cfg)
while not os.path.exists(caffemodel):
print('Waiting for {} to exist...'.format(caffemodel))
time.sleep(10)
caffe.set_mode_gpu()
caffe.set_device(gpu_id)
net = caffe.Net(prototxt, caffemodel, caffe.TEST)
net.name = os.path.splitext(os.path.basename(caffemodel))[0]
imdb = get_imdb(imdb_name)
if not cfg.TEST.HAS_RPN:
imdb.set_proposal_method(cfg.TEST.PROPOSAL_METHOD)
n, _ = os.path.splitext(args.caffemodel)
paths = splitall(n)
proposal_prefix = paths[-1]
return test_net(net, imdb, max_per_image=100, vis=False, proposal_prefix=proposal_prefix)
def run_test_nets(gpu_id, dir, model_files, prototxt, imdb_name, cfg_file, res_file):
models = [line.rstrip('\n') for line in open(os.path.join(dir, model_files))]
df_results = pd.DataFrame()
for model in models:
results = run_test_net(gpu_id, os.path.join(dir, model), prototxt, imdb_name, cfg_file)
for result in results:
result['file'] = model
df_results = df_results.append(results, ignore_index=True)
df_results.to_csv(os.path.join(dir, res_file))
if __name__ == '__main__':
# args = parse_args()
gpu_id = 0
# dir = '/home/abhijitcbim/git/pedestrian-detector/output/faster_rcnn_end2end/train/backup'
# model_files = 'test.txt'
args = parse_args()
print('Called with args:')
print(args)
run_test_nets(args.gpu_id, args.dir, args.model_files, args.prototxt, args.imdb_name, args.cfg_file, args.res_file)
# run_test_net(gpu_id,caffemodel, prototxt, imdb_name, cfg_file)
| [((43, 13, 43, 86), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (), '', False, 'import argparse\n'), ((74, 4, 74, 22), 'pprint.pprint', 'pprint.pprint', ({(74, 18, 74, 21): 'cfg'}, {}), '(cfg)', False, 'import pprint\n'), ((80, 4, 80, 24), 'caffe.set_mode_gpu', 'caffe.set_mode_gpu', ({}, {}), '()', False, 'import caffe\n'), ((81, 4, 81, 28), 'caffe.set_device', 'caffe.set_device', ({(81, 21, 81, 27): 'gpu_id'}, {}), '(gpu_id)', False, 'import caffe\n'), ((82, 10, 82, 53), 'caffe.Net', 'caffe.Net', ({(82, 20, 82, 28): 'prototxt', (82, 30, 82, 40): 'caffemodel', (82, 42, 82, 52): 'caffe.TEST'}, {}), '(prototxt, caffemodel, caffe.TEST)', False, 'import caffe\n'), ((85, 11, 85, 30), 'datasets.factory.get_imdb', 'get_imdb', ({(85, 20, 85, 29): 'imdb_name'}, {}), '(imdb_name)', False, 'from datasets.factory import get_imdb\n'), ((89, 11, 89, 44), 'os.path.splitext', 'os.path.splitext', ({(89, 28, 89, 43): 'args.caffemodel'}, {}), '(args.caffemodel)', False, 'import time, os, sys\n'), ((94, 11, 94, 93), 'fast_rcnn.test.test_net', 'test_net', (), '', False, 'from fast_rcnn.test import test_net\n'), ((99, 17, 99, 31), 'pandas.DataFrame', 'pd.DataFrame', ({}, {}), '()', True, 'import pandas as pd\n'), ((26, 16, 26, 35), 'os.path.split', 'os.path.split', ({(26, 30, 26, 34): 'path'}, {}), '(path)', False, 'import time, os, sys\n'), ((69, 8, 69, 31), 'fast_rcnn.config.cfg_from_file', 'cfg_from_file', ({(69, 22, 69, 30): 'cfg_file'}, {}), '(cfg_file)', False, 'from fast_rcnn.config import cfg, cfg_from_file, cfg_from_list\n'), ((76, 14, 76, 40), 'os.path.exists', 'os.path.exists', ({(76, 29, 76, 39): 'caffemodel'}, {}), '(caffemodel)', False, 'import time, os, sys\n'), ((78, 8, 78, 22), 'time.sleep', 'time.sleep', ({(78, 19, 78, 21): '(10)'}, {}), '(10)', False, 'import time, os, sys\n'), ((106, 22, 106, 49), 'os.path.join', 'os.path.join', ({(106, 35, 106, 38): 'dir', (106, 40, 106, 48): 'res_file'}, {}), '(dir, res_file)', False, 'import time, os, sys\n'), ((83, 32, 83, 60), 'os.path.basename', 'os.path.basename', ({(83, 49, 83, 59): 'caffemodel'}, {}), '(caffemodel)', False, 'import time, os, sys\n'), ((101, 39, 101, 63), 'os.path.join', 'os.path.join', ({(101, 52, 101, 55): 'dir', (101, 57, 101, 62): 'model'}, {}), '(dir, model)', False, 'import time, os, sys\n'), ((98, 49, 98, 79), 'os.path.join', 'os.path.join', ({(98, 62, 98, 65): 'dir', (98, 67, 98, 78): 'model_files'}, {}), '(dir, model_files)', False, 'import time, os, sys\n')] |
mangowilliam/my_gallary | mygallary/urls.py | 4c87fe055e5c28d6ca6a27ea5bde7df380750006 | from django.conf import settings
from django.conf.urls.static import static
from django.conf.urls import url
from . import views
urlpatterns = [
url('^$', views.gallary,name = 'gallary'),
url(r'^search/', views.search_image, name='search_image'),
url(r'^details/(\d+)',views.search_location,name ='images')
]
if settings.DEBUG:
urlpatterns+= static(settings.MEDIA_URL, document_root = settings.MEDIA_ROOT)
| [((9, 4, 9, 45), 'django.conf.urls.url', 'url', (), '', False, 'from django.conf.urls import url\n'), ((10, 4, 10, 61), 'django.conf.urls.url', 'url', (), '', False, 'from django.conf.urls import url\n'), ((11, 4, 11, 63), 'django.conf.urls.url', 'url', (), '', False, 'from django.conf.urls import url\n'), ((14, 18, 14, 81), 'django.conf.urls.static.static', 'static', (), '', False, 'from django.conf.urls.static import static\n')] |
ldelzott/ByteTrack | yolox/data/datasets/mot.py | 5f8ab49a913a551d041918607a0bd2473602ad39 | import cv2
import numpy as np
from pycocotools.coco import COCO
import os
from ..dataloading import get_yolox_datadir
from .datasets_wrapper import Dataset
class MOTDataset(Dataset):
"""
COCO dataset class.
"""
def __init__( # This function is called in the exps yolox_x_mot17_half.py in this way: dataset = MOTDataset(
# data_dir=os.path.join(get_yolox_datadir(), "mot"),
# json_file=self.train_ann,
# name='train',
# img_size=self.input_size,
# preproc=TrainTransform(rgb_means=(0.485, 0.456, 0.406),
# std=(0.229, 0.224, 0.225),
# max_labels=500,),)
self,
data_dir=None,
json_file="train_half.json",
name="train",
img_size=(608, 1088),
preproc=None,
):
"""
COCO dataset initialization. Annotation data are read into memory by COCO API.
Args:
data_dir (str): dataset root directory
json_file (str): COCO json file name
name (str): COCO data name (e.g. 'train2017' or 'val2017')
img_size (int): target image size after pre-processing
preproc: data augmentation strategy
"""
super().__init__(img_size)
if data_dir is None:
data_dir = os.path.join(get_yolox_datadir(), "mot")
self.data_dir = data_dir
self.json_file = json_file
self.coco = COCO(os.path.join(self.data_dir, "annotations", self.json_file))
self.ids = self.coco.getImgIds()
self.class_ids = sorted(self.coco.getCatIds())
cats = self.coco.loadCats(self.coco.getCatIds())
self._classes = tuple([c["name"] for c in cats])
self.annotations = self._load_coco_annotations()
self.name = name
self.img_size = img_size
self.preproc = preproc
def __len__(self):
return len(self.ids)
def _load_coco_annotations(self):
return [self.load_anno_from_ids(_ids) for _ids in self.ids]
def load_anno_from_ids(self, id_):
im_ann = self.coco.loadImgs(id_)[0]
width = im_ann["width"]
height = im_ann["height"]
#frame_id = im_ann["frame_id"] : the default value '1' avoid to break augmentation & evaluation processes
frame_id = 1
#video_id = im_ann["video_id"] : the default value '1' avoid to break augmentation & evaluation processes
video_id = 1
anno_ids = self.coco.getAnnIds(imgIds=[int(id_)], iscrowd=False)
annotations = self.coco.loadAnns(anno_ids)
objs = []
for obj in annotations:
x1 = obj["bbox"][0]
y1 = obj["bbox"][1]
x2 = x1 + obj["bbox"][2]
y2 = y1 + obj["bbox"][3]
if obj["area"] > 0 and x2 >= x1 and y2 >= y1:
obj["clean_bbox"] = [x1, y1, x2, y2]
objs.append(obj)
num_objs = len(objs)
res = np.zeros((num_objs, 6))
for ix, obj in enumerate(objs):
cls = self.class_ids.index(obj["category_id"])
res[ix, 0:4] = obj["clean_bbox"]
res[ix, 4] = cls
#res[ix, 5] = obj["track_id"] # See comment line 66; same comment for the default value 1
res[ix, 5] = 1
file_name = im_ann["file_name"] if "file_name" in im_ann else "{:012}".format(id_) + ".jpg"
img_info = (height, width, frame_id, video_id, file_name)
del im_ann, annotations
return (res, img_info, file_name)
def load_anno(self, index):
return self.annotations[index][0]
def pull_item(self, index):
id_ = self.ids[index]
res, img_info, file_name = self.annotations[index]
# load image and preprocess
img_file = os.path.join(
self.data_dir, self.name, file_name
)
img = cv2.imread(img_file)
assert img is not None
return img, res.copy(), img_info, np.array([id_])
@Dataset.resize_getitem
def __getitem__(self, index):
"""
One image / label pair for the given index is picked up and pre-processed.
Args:
index (int): data index
Returns:
img (numpy.ndarray): pre-processed image
padded_labels (torch.Tensor): pre-processed label data.
The shape is :math:`[max_labels, 5]`.
each label consists of [class, xc, yc, w, h]:
class (float): class index.
xc, yc (float) : center of bbox whose values range from 0 to 1.
w, h (float) : size of bbox whose values range from 0 to 1.
info_img : tuple of h, w, nh, nw, dx, dy.
h, w (int): original shape of the image
nh, nw (int): shape of the resized image without padding
dx, dy (int): pad size
img_id (int): same as the input index. Used for evaluation.
"""
img, target, img_info, img_id = self.pull_item(index)
if self.preproc is not None:
img, target = self.preproc(img, target, self.input_dim)
return img, target, img_info, img_id
| [((84, 14, 84, 37), 'numpy.zeros', 'np.zeros', ({(84, 23, 84, 36): '(num_objs, 6)'}, {}), '((num_objs, 6))', True, 'import numpy as np\n'), ((108, 19, 110, 9), 'os.path.join', 'os.path.join', ({(109, 12, 109, 25): 'self.data_dir', (109, 27, 109, 36): 'self.name', (109, 38, 109, 47): 'file_name'}, {}), '(self.data_dir, self.name, file_name)', False, 'import os\n'), ((111, 14, 111, 34), 'cv2.imread', 'cv2.imread', ({(111, 25, 111, 33): 'img_file'}, {}), '(img_file)', False, 'import cv2\n'), ((46, 25, 46, 83), 'os.path.join', 'os.path.join', ({(46, 38, 46, 51): 'self.data_dir', (46, 53, 46, 66): '"""annotations"""', (46, 68, 46, 82): 'self.json_file'}, {}), "(self.data_dir, 'annotations', self.json_file)", False, 'import os\n'), ((114, 42, 114, 57), 'numpy.array', 'np.array', ({(114, 51, 114, 56): '[id_]'}, {}), '([id_])', True, 'import numpy as np\n')] |
pkoch/poetry | src/poetry/console/commands/remove.py | d22c5a7187d8b5a30196a7df58111b3c90be7d22 | from __future__ import annotations
from typing import Any
from cleo.helpers import argument
from cleo.helpers import option
from tomlkit.toml_document import TOMLDocument
try:
from poetry.core.packages.dependency_group import MAIN_GROUP
except ImportError:
MAIN_GROUP = "default"
from poetry.console.commands.installer_command import InstallerCommand
class RemoveCommand(InstallerCommand):
name = "remove"
description = "Removes a package from the project dependencies."
arguments = [argument("packages", "The packages to remove.", multiple=True)]
options = [
option("group", "G", "The group to remove the dependency from.", flag=False),
option("dev", "D", "Remove a package from the development dependencies."),
option(
"dry-run",
None,
"Output the operations but do not execute anything "
"(implicitly enables --verbose).",
),
]
help = """The <info>remove</info> command removes a package from the current
list of installed packages
<info>poetry remove</info>"""
loggers = ["poetry.repositories.pypi_repository", "poetry.inspection.info"]
def handle(self) -> int:
packages = self.argument("packages")
if self.option("dev"):
self.line_error(
"<warning>The --dev option is deprecated, "
"use the `--group dev` notation instead.</warning>"
)
group = "dev"
else:
group = self.option("group", self.default_group)
content: dict[str, Any] = self.poetry.file.read()
poetry_content = content["tool"]["poetry"]
if group is None:
removed = []
group_sections = [
(group_name, group_section.get("dependencies", {}))
for group_name, group_section in poetry_content.get("group", {}).items()
]
for group_name, section in [
(MAIN_GROUP, poetry_content["dependencies"])
] + group_sections:
removed += self._remove_packages(packages, section, group_name)
if group_name != MAIN_GROUP:
if not section:
del poetry_content["group"][group_name]
else:
poetry_content["group"][group_name]["dependencies"] = section
elif group == "dev" and "dev-dependencies" in poetry_content:
# We need to account for the old `dev-dependencies` section
removed = self._remove_packages(
packages, poetry_content["dev-dependencies"], "dev"
)
if not poetry_content["dev-dependencies"]:
del poetry_content["dev-dependencies"]
else:
removed = self._remove_packages(
packages, poetry_content["group"][group].get("dependencies", {}), group
)
if not poetry_content["group"][group]:
del poetry_content["group"][group]
if "group" in poetry_content and not poetry_content["group"]:
del poetry_content["group"]
removed_set = set(removed)
not_found = set(packages).difference(removed_set)
if not_found:
raise ValueError(
"The following packages were not found: " + ", ".join(sorted(not_found))
)
# Refresh the locker
self.poetry.set_locker(
self.poetry.locker.__class__(self.poetry.locker.lock.path, poetry_content)
)
self._installer.set_locker(self.poetry.locker)
# Update packages
self._installer.use_executor(
self.poetry.config.get("experimental.new-installer", False)
)
self._installer.dry_run(self.option("dry-run", False))
self._installer.verbose(self._io.is_verbose())
self._installer.update(True)
self._installer.whitelist(removed_set)
status = self._installer.run()
if not self.option("dry-run") and status == 0:
assert isinstance(content, TOMLDocument)
self.poetry.file.write(content)
return status
def _remove_packages(
self, packages: list[str], section: dict[str, Any], group_name: str
) -> list[str]:
removed = []
group = self.poetry.package.dependency_group(group_name)
section_keys = list(section.keys())
for package in packages:
for existing_package in section_keys:
if existing_package.lower() == package.lower():
del section[existing_package]
removed.append(package)
group.remove_dependency(package)
return removed
| [((23, 17, 23, 79), 'cleo.helpers.argument', 'argument', (), '', False, 'from cleo.helpers import argument\n'), ((25, 8, 25, 84), 'cleo.helpers.option', 'option', (), '', False, 'from cleo.helpers import option\n'), ((26, 8, 26, 81), 'cleo.helpers.option', 'option', ({(26, 15, 26, 20): '"""dev"""', (26, 22, 26, 25): '"""D"""', (26, 27, 26, 80): '"""Remove a package from the development dependencies."""'}, {}), "('dev', 'D', 'Remove a package from the development dependencies.')", False, 'from cleo.helpers import option\n'), ((27, 8, 32, 9), 'cleo.helpers.option', 'option', ({(28, 12, 28, 21): '"""dry-run"""', (29, 12, 29, 16): 'None', (30, 12, 31, 45): '"""Output the operations but do not execute anything (implicitly enables --verbose)."""'}, {}), "('dry-run', None,\n 'Output the operations but do not execute anything (implicitly enables --verbose).'\n )", False, 'from cleo.helpers import option\n')] |
orrinjelo/AdventOfCode2021 | orrinjelo/aoc2021/day_11.py | 6fce5c48ec3dc602b393824f592a5c6db2a8b66f | from orrinjelo.utils.decorators import timeit
import numpy as np
def parse(lines):
return np.array([[int(c) for c in line.strip()] for line in lines])
visited = []
def flash(a, x, y):
global visited
if (x,y) in visited:
return
for dx in range(-1,2):
for dy in range(-1,2):
if dx == 0 and dy == 0:
continue
if x+dx < 0 or x+dx >= a.shape[0]:
continue
if y+dy < 0 or y+dy >= a.shape[1]:
continue
a[x+dx, y+dy] += 1
visited.append((x,y))
if a[x+dx, y+dy] > 9:
flash(a, x+dx, y+dy)
def progress(a):
global visited
a += 1
x,y = np.where(a > 9)
visited = []
for i in range(len(x)):
flash(a,x[i],y[i])
count = np.sum(a > 9)
# print('a:\n', a)
a[a > 9] = 0
return a, count
@timeit("Day 11 Part 1")
def part1(input_str, use_rust=False):
octomap = parse(input_str)
total_count = 0
for i in range(100):
octomap, count = progress(octomap)
total_count += count
return total_count
@timeit("Day 11 Part 2")
def part2(input_str, use_rust=False):
octomap = parse(input_str)
step = 0
while True:
step += 1
octomap, count = progress(octomap)
if count == octomap.shape[0]*octomap.shape[1]:
break
return step
# = Test ================================================
inputlist = [
'5483143223',
'2745854711',
'5264556173',
'6141336146',
'6357385478',
'4167524645',
'2176841721',
'6882881134',
'4846848554',
'5283751526',
]
def test_part1():
# import matplotlib.pyplot as plt
# plt.imshow(parse(inputlist))
# plt.show()
assert part1(inputlist) == 1656
def test_part2():
assert part2(inputlist) == 195
import pygame
import sys
def plot(input_str):
# octomap = parse(input_str)
octomap = np.random.randint(0,9,(100,100))
pygame.init()
clock = pygame.time.Clock()
scale = 5
screen = pygame.display.set_mode((octomap.shape[0]*scale,octomap.shape[1]*scale))
surface = pygame.Surface((octomap.shape[0]*scale,octomap.shape[1]*scale))
frame = 0
history = []
for i in range(500):
print('Generating frame #', i)
octomap, _ = progress(octomap)
history.append(np.copy(octomap))
input()
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit(); sys.exit();
# erase the screen
screen.fill((255,0,0))
try:
octomap = history[frame]
except:
frame = 0
for i in range(octomap.shape[0]):
for j in range(octomap.shape[1]):
if octomap[i,j] == 0:
brightness = 255
else:
brightness = int(255.0 * octomap[i,j]/10.0)
print(i*scale, j*scale, brightness)
pygame.draw.rect(
screen,
(brightness,brightness,brightness),
pygame.Rect(i*scale, j*scale, scale, scale)
)
pygame.display.update()
# surface.blit(screen, (0,0))
clock.tick(30)
frame += 1 | [((41, 1, 41, 24), 'orrinjelo.utils.decorators.timeit', 'timeit', ({(41, 8, 41, 23): '"""Day 11 Part 1"""'}, {}), "('Day 11 Part 1')", False, 'from orrinjelo.utils.decorators import timeit\n'), ((52, 1, 52, 24), 'orrinjelo.utils.decorators.timeit', 'timeit', ({(52, 8, 52, 23): '"""Day 11 Part 2"""'}, {}), "('Day 11 Part 2')", False, 'from orrinjelo.utils.decorators import timeit\n'), ((29, 10, 29, 25), 'numpy.where', 'np.where', ({(29, 19, 29, 24): 'a > 9'}, {}), '(a > 9)', True, 'import numpy as np\n'), ((34, 12, 34, 25), 'numpy.sum', 'np.sum', ({(34, 19, 34, 24): 'a > 9'}, {}), '(a > 9)', True, 'import numpy as np\n'), ((97, 14, 97, 46), 'numpy.random.randint', 'np.random.randint', ({(97, 32, 97, 33): '0', (97, 34, 97, 35): '9', (97, 36, 97, 45): '(100, 100)'}, {}), '(0, 9, (100, 100))', True, 'import numpy as np\n'), ((99, 4, 99, 17), 'pygame.init', 'pygame.init', ({}, {}), '()', False, 'import pygame\n'), ((100, 12, 100, 31), 'pygame.time.Clock', 'pygame.time.Clock', ({}, {}), '()', False, 'import pygame\n'), ((104, 13, 104, 85), 'pygame.display.set_mode', 'pygame.display.set_mode', ({(104, 37, 104, 84): '(octomap.shape[0] * scale, octomap.shape[1] * scale)'}, {}), '((octomap.shape[0] * scale, octomap.shape[1] * scale))', False, 'import pygame\n'), ((105, 14, 105, 77), 'pygame.Surface', 'pygame.Surface', ({(105, 29, 105, 76): '(octomap.shape[0] * scale, octomap.shape[1] * scale)'}, {}), '((octomap.shape[0] * scale, octomap.shape[1] * scale))', False, 'import pygame\n'), ((117, 21, 117, 39), 'pygame.event.get', 'pygame.event.get', ({}, {}), '()', False, 'import pygame\n'), ((143, 8, 143, 31), 'pygame.display.update', 'pygame.display.update', ({}, {}), '()', False, 'import pygame\n'), ((113, 23, 113, 39), 'numpy.copy', 'np.copy', ({(113, 31, 113, 38): 'octomap'}, {}), '(octomap)', True, 'import numpy as np\n'), ((119, 17, 119, 30), 'pygame.quit', 'pygame.quit', ({}, {}), '()', False, 'import pygame\n'), ((119, 32, 119, 42), 'sys.exit', 'sys.exit', ({}, {}), '()', False, 'import sys\n'), ((140, 20, 140, 63), 'pygame.Rect', 'pygame.Rect', ({(140, 32, 140, 39): '(i * scale)', (140, 41, 140, 48): '(j * scale)', (140, 50, 140, 55): 'scale', (140, 57, 140, 62): 'scale'}, {}), '(i * scale, j * scale, scale, scale)', False, 'import pygame\n')] |
lukaszbinden/ethz-iacv-2020 | exercise_2/exercise_2.1.py | 271de804315de98b816cda3e2498958ffa87ad59 |
camera_width = 640
camera_height = 480
film_back_width = 1.417
film_back_height = 0.945
x_center = 320
y_center = 240
P_1 = (-0.023, -0.261, 2.376)
p_11 = P_1[0]
p_12 = P_1[1]
p_13 = P_1[2]
P_2 = (0.659, -0.071, 2.082)
p_21 = P_2[0]
p_22 = P_2[1]
p_23 = P_2[2]
p_1_prime = (52, 163)
x_1 = p_1_prime[0]
y_1 = p_1_prime[1]
p_2_prime = (218, 216)
x_2 = p_2_prime[0]
y_2 = p_2_prime[1]
f = 1.378
k_x = camera_width / film_back_width
k_y = camera_height / film_back_height
# f_k_x = f * k_x
f_k_x = f
# f_k_y = f * k_y
f_k_y = f
u_1_prime = (x_1 - x_center) / k_x
v_1_prime = (y_1 - y_center) / k_y
u_2_prime = (x_2 - x_center) / k_x
v_2_prime = (y_2 - y_center) / k_y
c_1_prime = (f_k_x * p_21 + (p_13 - p_23) * u_2_prime - u_2_prime/u_1_prime * f_k_x * p_11) / (f_k_x * (1 - u_2_prime/u_1_prime))
c_2_prime = (f_k_y * p_22 - (p_23 - (p_13*u_1_prime - f_k_x*(p_11 - c_1_prime))/u_1_prime) * v_2_prime) / f_k_y
c_2_prime_alt = (f_k_y * p_12 - (p_13 - (p_13*u_1_prime - f_k_x*(p_11 - c_1_prime))/u_1_prime) * v_1_prime) / f_k_y
c_3_prime = p_13 - (f_k_x / u_1_prime) * (p_11 - c_1_prime)
rho_1_prime = p_13 - c_3_prime
rho_2_prime = p_23 - c_3_prime
print(f"C' = ({c_1_prime}, {c_2_prime}, {c_3_prime})")
print(f"c_2_prime_alt = {c_2_prime_alt}")
print(f"rho_1_prime = {rho_1_prime}")
print(f"rho_2_prime = {rho_2_prime}")
print("------------------")
r_11 = f_k_x * (p_11 - c_1_prime)
r_12 = f_k_y * (p_12 - c_2_prime)
r_13 = 1 * (p_13 - c_3_prime)
l_11 = rho_1_prime * u_1_prime
l_12 = rho_1_prime * v_1_prime
l_13 = rho_1_prime * 1
print(f"L: ({l_11}, {l_12}, {l_13})")
print(f"R: ({r_11}, {r_12}, {r_13})")
print("------------------")
r_21 = f_k_x * (p_21 - c_1_prime)
r_22 = f_k_y * (p_22 - c_2_prime)
r_23 = 1 * (p_23 - c_3_prime)
l_21 = rho_2_prime * u_2_prime
l_22 = rho_2_prime * v_2_prime
l_23 = rho_2_prime * 1
print(f"L: ({l_11}, {l_12}, {l_13})")
print(f"R: ({r_11}, {r_12}, {r_13})") | [] |
paper2code/torch2vec-restful-service | services/train/single.py | 6c4412d84d067268bf988b1f31cef716a2ed23a5 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Aug 26 19:15:34 2020
@author: deviantpadam
"""
import pandas as pd
import numpy as np
import concurrent.futures
import os
import tqdm
from collections import Counter
from torch2vec.data import DataPreparation
from torch2vec.torch2vec import DM
# train = pd.read_csv('/home/deviantpadam/Downloads/example.csv',delimiter='\t')
# train = pd.read_csv('/home/deviantpadam/Downloads/example (1).csv')
train = pd.read_csv('../data/suggest_dump.txt',delimiter='\t')
def cleaner(train):
sub=(train['subjects'].str.lower()).str.split(',',expand=True)
sub.drop([2,3],axis=1,inplace=True)
sub.columns = ['subject1','subject2']
sub.fillna('none',inplace=True)
tasks = (train['tasks'].str.lower()).str.split(',',expand=True)[0]
tasks.fillna('none',inplace=True)
tasks.name = 'task'
train = pd.concat([train,sub,tasks],axis=1).drop(['subjects','tasks'],axis=1)
train.fillna('none',inplace=True)
return train
train = cleaner(train)
corpus = train['authors']+' '+train['title']+' '+train['summary']+' '+train['subject1']+' '+train['subject2']+' '+train['task']
corpus.name = 'text'
corpus = pd.concat([train['subject1'],train['subject2'],train['task'],corpus],axis=1)
def phraser(corpus,workers=-1):
if workers==-1:
workers = os.cpu_count()
chunks = np.array_split(corpus,workers)
with concurrent.futures.ProcessPoolExecutor(workers) as executor:
result = np.concatenate(list(tqdm.tqdm(executor.map(_add_bigrams,chunks),total=workers,desc='Phrasing using {} cores'.format(workers))),axis=0)
executor.shutdown(wait=True)
# result = _add_bigrams(data)
global bigrams
del bigrams
return pd.DataFrame({'text':np.array(result)})['text']
def _add_bigrams(text):
for idx in range(len(text)):
length=len(text[idx])-1
word_count=0
while word_count<length:
if text[idx][word_count]+' '+text[idx][word_count+1] in bigrams:
text[idx][word_count] = text[idx][word_count]+' '+text[idx][word_count+1]
text[idx].remove(text[idx][word_count+1])
length = len(text[idx])-1
# print(cor[i][j]+' '+cor[i][j+1])
word_count+=1
return text
def _get_bigrams(corpus,min_count):
text = np.copy(corpus)
vocab = [word for sen in text for word in sen]
ngram = [(i,j) for i,j in zip(vocab[:-1],vocab[1:])]
freq = Counter(ngram)
filterbi = [bigram for bigram in freq.most_common() if bigram[1]>min_count]
bigrams = [" ".join(bigram[0]) for bigram in filterbi]
return bigrams
data = DataPreparation(corpus.reset_index(),f_size=3)
data.tokenize()
bigrams = _get_bigrams(data.corpus.values,min_count=700)
data.corpus = phraser(data.corpus.values)
bigrams = _get_bigrams(data.corpus.values,min_count=500)
data.corpus = phraser(data.corpus.values)
data.vocab_builder()
doc, context, target_noise_ids = data.get_data(window_size=5,num_noise_words=10)
model = DM(vec_dim=100,num_docs=len(data),num_words=data.vocab_size).cuda()
num_workers = os.cpu_count()
model.fit(doc_ids=doc,context=context,target_noise_ids=target_noise_ids,epochs=20,batch_size=8000,num_workers=num_workers)
model.save_model(data.document_ids,data.args,file_name='weights')
| [((19, 8, 19, 62), 'pandas.read_csv', 'pd.read_csv', (), '', True, 'import pandas as pd\n'), ((34, 9, 34, 85), 'pandas.concat', 'pd.concat', (), '', True, 'import pandas as pd\n'), ((86, 14, 86, 28), 'os.cpu_count', 'os.cpu_count', ({}, {}), '()', False, 'import os\n'), ((39, 13, 39, 43), 'numpy.array_split', 'np.array_split', ({(39, 28, 39, 34): 'corpus', (39, 35, 39, 42): 'workers'}, {}), '(corpus, workers)', True, 'import numpy as np\n'), ((65, 11, 65, 26), 'numpy.copy', 'np.copy', ({(65, 19, 65, 25): 'corpus'}, {}), '(corpus)', True, 'import numpy as np\n'), ((68, 11, 68, 25), 'collections.Counter', 'Counter', ({(68, 19, 68, 24): 'ngram'}, {}), '(ngram)', False, 'from collections import Counter\n'), ((38, 18, 38, 32), 'os.cpu_count', 'os.cpu_count', ({}, {}), '()', False, 'import os\n'), ((28, 12, 28, 47), 'pandas.concat', 'pd.concat', (), '', True, 'import pandas as pd\n'), ((46, 32, 46, 48), 'numpy.array', 'np.array', ({(46, 41, 46, 47): 'result'}, {}), '(result)', True, 'import numpy as np\n')] |
geo-bl-ch/pyramid_oereb | tests/sources/test_document_oereblex.py | 767375a4adda4589e12c4257377fc30258cdfcb3 | # -*- coding: utf-8 -*-
import datetime
import pytest
import requests_mock
from geolink_formatter.entity import Document, File
from requests.auth import HTTPBasicAuth
from pyramid_oereb.contrib.sources.document import OEREBlexSource
from pyramid_oereb.lib.records.documents import DocumentRecord, LegalProvisionRecord
from pyramid_oereb.lib.records.office import OfficeRecord
from tests.mockrequest import MockParameter
@pytest.mark.parametrize('valid,cfg', [
(True, {
'host': 'http://oereblex.example.com',
'language': 'de',
'canton': 'BL'
}),
(False, {
'language': 'de',
'canton': 'BL'
}),
(False, {
'host': 'http://oereblex.example.com',
'language': 'german',
'canton': 'BL'
}),
(False, {
'host': 'http://oereblex.example.com',
'language': 'de'
})
])
def test_init(valid, cfg):
if valid:
assert isinstance(OEREBlexSource(**cfg), OEREBlexSource)
else:
with pytest.raises(AssertionError):
OEREBlexSource(**cfg)
@pytest.mark.parametrize('key,language,result', [
('official_title', None, None),
('municipality', None, 'Liestal'),
('municipality', 'de', {'de': 'Liestal'})
])
def test_get_mapped_value(key, language, result):
file_ = File('Test', '/api/attachments/1', 'main')
document = Document(id='test', title='Test', category='main', doctype='decree', files=[file_],
enactment_date=datetime.date.today(), subtype='Liestal', authority='Office')
source = OEREBlexSource(host='http://oereblex.example.com', language='de', canton='BL',
mapping={'municipality': 'subtype'})
assert source._get_mapped_value(document, key, language=language) == result
@pytest.mark.parametrize('i,document', [
(1, Document(
id='doc1',
title='Document 1',
category='main',
doctype='edict',
authority='Office',
files=[File('File 1', '/api/attachments/1', 'main')],
enactment_date=datetime.date.today()
)),
(2, Document(
id='doc2',
title='Document 2',
category='main',
doctype='decree',
authority='Office',
files=[
File('File 2', '/api/attachments/2', 'main'),
File('File 3', '/api/attachments/3', 'additional')
],
enactment_date=datetime.date.today()
)),
(3, Document(
id='doc1',
title='Document 1',
category='main',
doctype='invalid',
authority='Office',
files=[File('File 1', '/api/attachments/1', 'main')],
enactment_date=datetime.date.today()
)),
(4, Document(
id='doc1',
title='Document 1',
category='main',
doctype='decree',
authority='Office',
files=[],
enactment_date=datetime.date.today()
))
])
def test_get_document_records(i, document):
language = 'de'
source = OEREBlexSource(host='http://oereblex.example.com', language='de', canton='BL')
references = [
Document(
id='ref',
title='Reference',
category='related',
doctype='edict',
authority='Office',
files=[File('Reference file', '/api/attachments/4', 'main')],
enactment_date=datetime.date.today()
)
]
if i == 3:
with pytest.raises(TypeError):
source._get_document_records(document, language, references)
elif i == 4:
assert source._get_document_records(document, language, references) == []
else:
records = source._get_document_records(document, language, references)
assert len(records) == i
for idx, record in enumerate(records):
if i == 1:
assert isinstance(record, DocumentRecord)
elif i == 2:
assert isinstance(record, LegalProvisionRecord)
assert record.title == {'de': 'Document {0}'.format(i)}
assert record.published_from == datetime.date.today()
assert record.canton == 'BL'
assert record.text_at_web == {'de': '/api/attachments/{fid}'.format(fid=i + idx)}
assert len(record.references) == 1
reference = record.references[0]
assert isinstance(reference, DocumentRecord)
assert reference.title == {'de': 'Reference'}
assert reference.canton == 'BL'
assert reference.text_at_web == {'de': '/api/attachments/4'}
def test_read():
with requests_mock.mock() as m:
with open('./tests/resources/geolink_v1.1.1.xml', 'rb') as f:
m.get('http://oereblex.example.com/api/geolinks/100.xml', content=f.read())
source = OEREBlexSource(host='http://oereblex.example.com', language='de', canton='BL')
source.read(MockParameter(), 100)
assert len(source.records) == 2
document = source.records[0]
assert isinstance(document, DocumentRecord)
assert isinstance(document.responsible_office, OfficeRecord)
assert document.responsible_office.name == {'de': 'Landeskanzlei'}
assert document.canton == 'BL'
assert document.text_at_web == {
'de': 'http://oereblex.example.com/api/attachments/313'
}
assert len(document.references) == 5
def test_read_related_decree_as_main():
with requests_mock.mock() as m:
with open('./tests/resources/geolink_v1.1.1.xml', 'rb') as f:
m.get('http://oereblex.example.com/api/geolinks/100.xml', content=f.read())
source = OEREBlexSource(host='http://oereblex.example.com', language='de', canton='BL',
related_decree_as_main=True)
source.read(MockParameter(), 100)
assert len(source.records) == 3
document = source.records[0]
assert isinstance(document, DocumentRecord)
assert isinstance(document.responsible_office, OfficeRecord)
assert document.responsible_office.name == {'de': 'Landeskanzlei'}
assert document.canton == 'BL'
assert document.text_at_web == {
'de': 'http://oereblex.example.com/api/attachments/313'
}
assert len(document.references) == 4
def test_read_with_version_in_url():
with requests_mock.mock() as m:
with open('./tests/resources/geolink_v1.1.1.xml', 'rb') as f:
m.get('http://oereblex.example.com/api/1.1.1/geolinks/100.xml', content=f.read())
source = OEREBlexSource(host='http://oereblex.example.com', language='de', canton='BL',
pass_version=True)
source.read(MockParameter(), 100)
assert len(source.records) == 2
def test_read_with_specified_version():
with requests_mock.mock() as m:
with open('./tests/resources/geolink_v1.0.0.xml', 'rb') as f:
m.get('http://oereblex.example.com/api/1.0.0/geolinks/100.xml', content=f.read())
source = OEREBlexSource(host='http://oereblex.example.com', language='de', canton='BL',
pass_version=True, version='1.0.0')
source.read(MockParameter(), 100)
assert len(source.records) == 2
def test_read_with_specified_language():
with requests_mock.mock() as m:
with open('./tests/resources/geolink_v1.1.1.xml', 'rb') as f:
m.get('http://oereblex.example.com/api/geolinks/100.xml?locale=fr', content=f.read())
source = OEREBlexSource(host='http://oereblex.example.com', language='de', canton='BL')
params = MockParameter()
params.set_language('fr')
source.read(params, 100)
assert len(source.records) == 2
document = source.records[0]
assert document.responsible_office.name == {'fr': 'Landeskanzlei'}
assert document.text_at_web == {
'fr': 'http://oereblex.example.com/api/attachments/313'
}
def test_authentication():
auth = {
'username': 'test',
'password': 'test'
}
source = OEREBlexSource(host='http://oereblex.example.com', language='de', canton='BL', auth=auth)
assert isinstance(source._auth, HTTPBasicAuth)
def test_get_document_title():
document = Document([], id='1', title='Test')
result = {'de': 'Test'}
assert OEREBlexSource._get_document_title(document, File(), 'de') == result
| [((16, 1, 35, 2), 'pytest.mark.parametrize', 'pytest.mark.parametrize', ({(16, 25, 16, 36): '"""valid,cfg"""', (16, 38, 35, 1): "[(True, {'host': 'http://oereblex.example.com', 'language': 'de', 'canton':\n 'BL'}), (False, {'language': 'de', 'canton': 'BL'}), (False, {'host':\n 'http://oereblex.example.com', 'language': 'german', 'canton': 'BL'}),\n (False, {'host': 'http://oereblex.example.com', 'language': 'de'})]"}, {}), "('valid,cfg', [(True, {'host':\n 'http://oereblex.example.com', 'language': 'de', 'canton': 'BL'}), (\n False, {'language': 'de', 'canton': 'BL'}), (False, {'host':\n 'http://oereblex.example.com', 'language': 'german', 'canton': 'BL'}),\n (False, {'host': 'http://oereblex.example.com', 'language': 'de'})])", False, 'import pytest\n'), ((44, 1, 48, 2), 'pytest.mark.parametrize', 'pytest.mark.parametrize', ({(44, 25, 44, 46): '"""key,language,result"""', (44, 48, 48, 1): "[('official_title', None, None), ('municipality', None, 'Liestal'), (\n 'municipality', 'de', {'de': 'Liestal'})]"}, {}), "('key,language,result', [('official_title', None,\n None), ('municipality', None, 'Liestal'), ('municipality', 'de', {'de':\n 'Liestal'})])", False, 'import pytest\n'), ((50, 12, 50, 54), 'geolink_formatter.entity.File', 'File', ({(50, 17, 50, 23): '"""Test"""', (50, 25, 50, 45): '"""/api/attachments/1"""', (50, 47, 50, 53): '"""main"""'}, {}), "('Test', '/api/attachments/1', 'main')", False, 'from geolink_formatter.entity import Document, File\n'), ((53, 13, 54, 64), 'pyramid_oereb.contrib.sources.document.OEREBlexSource', 'OEREBlexSource', (), '', False, 'from pyramid_oereb.contrib.sources.document import OEREBlexSource\n'), ((101, 13, 101, 91), 'pyramid_oereb.contrib.sources.document.OEREBlexSource', 'OEREBlexSource', (), '', False, 'from pyramid_oereb.contrib.sources.document import OEREBlexSource\n'), ((217, 13, 217, 102), 'pyramid_oereb.contrib.sources.document.OEREBlexSource', 'OEREBlexSource', (), '', False, 'from pyramid_oereb.contrib.sources.document import OEREBlexSource\n'), ((222, 15, 222, 49), 'geolink_formatter.entity.Document', 'Document', (), '', False, 'from geolink_formatter.entity import Document, File\n'), ((140, 9, 140, 29), 'requests_mock.mock', 'requests_mock.mock', ({}, {}), '()', False, 'import requests_mock\n'), ((143, 17, 143, 95), 'pyramid_oereb.contrib.sources.document.OEREBlexSource', 'OEREBlexSource', (), '', False, 'from pyramid_oereb.contrib.sources.document import OEREBlexSource\n'), ((158, 9, 158, 29), 'requests_mock.mock', 'requests_mock.mock', ({}, {}), '()', False, 'import requests_mock\n'), ((161, 17, 162, 60), 'pyramid_oereb.contrib.sources.document.OEREBlexSource', 'OEREBlexSource', (), '', False, 'from pyramid_oereb.contrib.sources.document import OEREBlexSource\n'), ((177, 9, 177, 29), 'requests_mock.mock', 'requests_mock.mock', ({}, {}), '()', False, 'import requests_mock\n'), ((180, 17, 181, 50), 'pyramid_oereb.contrib.sources.document.OEREBlexSource', 'OEREBlexSource', (), '', False, 'from pyramid_oereb.contrib.sources.document import OEREBlexSource\n'), ((187, 9, 187, 29), 'requests_mock.mock', 'requests_mock.mock', ({}, {}), '()', False, 'import requests_mock\n'), ((190, 17, 191, 67), 'pyramid_oereb.contrib.sources.document.OEREBlexSource', 'OEREBlexSource', (), '', False, 'from pyramid_oereb.contrib.sources.document import OEREBlexSource\n'), ((197, 9, 197, 29), 'requests_mock.mock', 'requests_mock.mock', ({}, {}), '()', False, 'import requests_mock\n'), ((200, 17, 200, 95), 'pyramid_oereb.contrib.sources.document.OEREBlexSource', 'OEREBlexSource', (), '', False, 'from pyramid_oereb.contrib.sources.document import OEREBlexSource\n'), ((201, 17, 201, 32), 'tests.mockrequest.MockParameter', 'MockParameter', ({}, {}), '()', False, 'from tests.mockrequest import MockParameter\n'), ((38, 26, 38, 47), 'pyramid_oereb.contrib.sources.document.OEREBlexSource', 'OEREBlexSource', ({}, {}), '(**cfg)', False, 'from pyramid_oereb.contrib.sources.document import OEREBlexSource\n'), ((40, 13, 40, 42), 'pytest.raises', 'pytest.raises', ({(40, 27, 40, 41): 'AssertionError'}, {}), '(AssertionError)', False, 'import pytest\n'), ((41, 12, 41, 33), 'pyramid_oereb.contrib.sources.document.OEREBlexSource', 'OEREBlexSource', ({}, {}), '(**cfg)', False, 'from pyramid_oereb.contrib.sources.document import OEREBlexSource\n'), ((52, 39, 52, 60), 'datetime.date.today', 'datetime.date.today', ({}, {}), '()', False, 'import datetime\n'), ((115, 13, 115, 37), 'pytest.raises', 'pytest.raises', ({(115, 27, 115, 36): 'TypeError'}, {}), '(TypeError)', False, 'import pytest\n'), ((144, 20, 144, 35), 'tests.mockrequest.MockParameter', 'MockParameter', ({}, {}), '()', False, 'from tests.mockrequest import MockParameter\n'), ((163, 20, 163, 35), 'tests.mockrequest.MockParameter', 'MockParameter', ({}, {}), '()', False, 'from tests.mockrequest import MockParameter\n'), ((182, 20, 182, 35), 'tests.mockrequest.MockParameter', 'MockParameter', ({}, {}), '()', False, 'from tests.mockrequest import MockParameter\n'), ((192, 20, 192, 35), 'tests.mockrequest.MockParameter', 'MockParameter', ({}, {}), '()', False, 'from tests.mockrequest import MockParameter\n'), ((224, 56, 224, 62), 'geolink_formatter.entity.File', 'File', ({}, {}), '()', False, 'from geolink_formatter.entity import Document, File\n'), ((110, 27, 110, 48), 'datetime.date.today', 'datetime.date.today', ({}, {}), '()', False, 'import datetime\n'), ((109, 19, 109, 71), 'geolink_formatter.entity.File', 'File', ({(109, 24, 109, 40): '"""Reference file"""', (109, 42, 109, 62): '"""/api/attachments/4"""', (109, 64, 109, 70): '"""main"""'}, {}), "('Reference file', '/api/attachments/4', 'main')", False, 'from geolink_formatter.entity import Document, File\n'), ((128, 44, 128, 65), 'datetime.date.today', 'datetime.date.today', ({}, {}), '()', False, 'import datetime\n'), ((66, 23, 66, 44), 'datetime.date.today', 'datetime.date.today', ({}, {}), '()', False, 'import datetime\n'), ((78, 23, 78, 44), 'datetime.date.today', 'datetime.date.today', ({}, {}), '()', False, 'import datetime\n'), ((87, 23, 87, 44), 'datetime.date.today', 'datetime.date.today', ({}, {}), '()', False, 'import datetime\n'), ((96, 23, 96, 44), 'datetime.date.today', 'datetime.date.today', ({}, {}), '()', False, 'import datetime\n'), ((65, 15, 65, 59), 'geolink_formatter.entity.File', 'File', ({(65, 20, 65, 28): '"""File 1"""', (65, 30, 65, 50): '"""/api/attachments/1"""', (65, 52, 65, 58): '"""main"""'}, {}), "('File 1', '/api/attachments/1', 'main')", False, 'from geolink_formatter.entity import Document, File\n'), ((75, 12, 75, 56), 'geolink_formatter.entity.File', 'File', ({(75, 17, 75, 25): '"""File 2"""', (75, 27, 75, 47): '"""/api/attachments/2"""', (75, 49, 75, 55): '"""main"""'}, {}), "('File 2', '/api/attachments/2', 'main')", False, 'from geolink_formatter.entity import Document, File\n'), ((76, 12, 76, 62), 'geolink_formatter.entity.File', 'File', ({(76, 17, 76, 25): '"""File 3"""', (76, 27, 76, 47): '"""/api/attachments/3"""', (76, 49, 76, 61): '"""additional"""'}, {}), "('File 3', '/api/attachments/3', 'additional')", False, 'from geolink_formatter.entity import Document, File\n'), ((86, 15, 86, 59), 'geolink_formatter.entity.File', 'File', ({(86, 20, 86, 28): '"""File 1"""', (86, 30, 86, 50): '"""/api/attachments/1"""', (86, 52, 86, 58): '"""main"""'}, {}), "('File 1', '/api/attachments/1', 'main')", False, 'from geolink_formatter.entity import Document, File\n')] |
codecat555/codecat555-fidgetingbits_knausj_talon | apps/zsh/singletons.py | 62f9be0459e6631c99d58eee97054ddd970cc5f3 | # A rarely-updated module to assist in writing reload-safe talon modules using
# things like threads, which are not normally safe for reloading with talon.
# If this file is ever updated, you'll need to restart talon.
import logging
_singletons = {}
def singleton(fn):
name = f"{fn.__module__}.{fn.__name__}"
# Do any cleanup actions from before.
if name in _singletons:
old = _singletons.pop(name)
try:
next(old)
except StopIteration:
pass
else:
logging.error(
f"the old @singleton function {name} had more than one yield!"
)
# Do the startup actions on the new object.
it = iter(fn())
obj = next(it)
# Remember the iterator so we can call the cleanup actions later.
_singletons[name] = it
# We want the object yielded by the iterator to be available at the name
# of the function, so instead of returning a function we return an object.
return obj
| [((20, 12, 22, 13), 'logging.error', 'logging.error', ({(21, 16, 21, 78): 'f"""the old @singleton function {name} had more than one yield!"""'}, {}), "(f'the old @singleton function {name} had more than one yield!')", False, 'import logging\n')] |
yztxwd/Bichrom | trainNN/run_bichrom.py | 3939b8e52816a02b34122feef27c8e0a06e31d8e | import argparse
import yaml
from subprocess import call
from train import train_bichrom
if __name__ == '__main__':
# parsing
parser = argparse.ArgumentParser(description='Train and Evaluate Bichrom')
parser.add_argument('-training_schema_yaml', required=True,
help='YAML file with paths to train, test and val data')
parser.add_argument('-len', help='Size of genomic windows',
required=True, type=int)
parser.add_argument('-outdir', required=True, help='Output directory')
parser.add_argument('-nbins', type=int, required=True, help='Number of bins')
args = parser.parse_args()
# load the yaml file with input data paths:
with open(args.training_schema_yaml, 'r') as f:
try:
data_paths = yaml.safe_load(f)
except yaml.YAMLError as exc:
print(exc)
# create the output directory:
outdir = args.outdir
call(['mkdir', outdir])
train_bichrom(data_paths=data_paths, outdir=outdir, seq_len=args.len,
bin_size=int(args.len/args.nbins)) | [((8, 13, 8, 78), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (), '', False, 'import argparse\n'), ((25, 4, 25, 27), 'subprocess.call', 'call', ({(25, 9, 25, 26): "['mkdir', outdir]"}, {}), "(['mkdir', outdir])", False, 'from subprocess import call\n'), ((20, 25, 20, 42), 'yaml.safe_load', 'yaml.safe_load', ({(20, 40, 20, 41): 'f'}, {}), '(f)', False, 'import yaml\n')] |
Fronius-SED/rapidyaml | setup.py | 20d44ff0c43085d08cb17f37fd6b0b305938a3ea | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# SPDX-License-Identifier: MIT
import os
import shutil
import sys
from pathlib import Path
from distutils import log
from setuptools import setup
from setuptools.command.sdist import sdist as SdistCommand
from cmake_build_extension import BuildExtension, CMakeExtension
TOP_DIR = (Path(__file__).parent).resolve()
# Where the Python library is actually found.
PYTHON_DIR = "api/python"
setup_kw = {}
# Read in the package version when not in a git repository.
VERSION_FILE = os.path.join(PYTHON_DIR, 'ryml', 'version.py')
if not (TOP_DIR / '.git').exists() and os.path.exists(VERSION_FILE):
exec(open(VERSION_FILE).read())
setup_kw['version'] = version
else:
setup_kw['use_scm_version']= {
"version_scheme": "post-release",
"local_scheme": "no-local-version",
"write_to": VERSION_FILE,
}
# Read in the module description from the README.md file.
README_FILE = TOP_DIR / "README.md"
if README_FILE.exists():
with open(TOP_DIR / "README.md", "r") as fh:
setup_kw['long_description'] = fh.read()
setup_kw['long_description_content_type'] = "text/markdown"
# define a CMake package
cmake_args = dict(
name='ryml.ryml',
install_prefix='',
source_dir='',
cmake_component='python',
cmake_configure_options=[
"-DRYML_BUILD_API:BOOL=ON",
# Force cmake to use the Python interpreter we are currently using to
# run setup.py
"-DPython3_EXECUTABLE:FILEPATH="+sys.executable,
],
)
try:
ext = CMakeExtension(**cmake_args)
except TypeError:
del cmake_args['cmake_component']
ext = CMakeExtension(**cmake_args)
# If the CMakeExtension doesn't support `cmake_component` then we have to
# do some manual cleanup.
_BuildExtension=BuildExtension
class BuildExtension(_BuildExtension):
def build_extension(self, ext):
_BuildExtension.build_extension(self, ext)
ext_dir = Path(self.get_ext_fullpath(ext.name)).parent.absolute()
cmake_install_prefix = ext_dir / ext.install_prefix
assert cmake_install_prefix.exists(), cmake_install_prefix
try:
lib_path = cmake_install_prefix / "lib"
assert lib_path.exists(), lib_path
log.info("Removing everything under: %s", lib_path)
shutil.rmtree(lib_path)
inc_path = cmake_install_prefix / "include"
assert inc_path.exists(), inc_path
log.info("Removing everything under: %s", inc_path)
shutil.rmtree(inc_path)
# Windows only
cm_path = cmake_install_prefix / "cmake"
if cm_path.exists():
log.info("Removing everything under: %s", cm_path)
shutil.rmtree(cm_path)
except:
log.info('Found following installed files:')
for f in cmake_install_prefix.rglob("*"):
log.info(' - %s', f)
raise
setup(
# Package human readable information
name='rapidyaml',
#author='Joao Paulo Magalhaes',
description='Rapid YAML - a library to parse and emit YAML, and do it fast.',
url='https://github.com/biojppm/rapidyaml',
license='MIT',
license_files=['LICENSE.txt'],
# Package contents control
cmdclass={
"build_ext": BuildExtension,
},
package_dir={"": PYTHON_DIR},
packages=['ryml'],
ext_modules=[ext],
include_package_data=True,
# Requirements
python_requires=">=3.7",
setup_requires=['setuptools_scm'],
# Extra arguments
**setup_kw,
)
| [((23, 15, 23, 61), 'os.path.join', 'os.path.join', ({(23, 28, 23, 38): 'PYTHON_DIR', (23, 40, 23, 46): '"""ryml"""', (23, 48, 23, 60): '"""version.py"""'}, {}), "(PYTHON_DIR, 'ryml', 'version.py')", False, 'import os\n'), ((92, 0, 113, 1), 'setuptools.setup', 'setup', (), '', False, 'from setuptools import setup\n'), ((24, 39, 24, 67), 'os.path.exists', 'os.path.exists', ({(24, 54, 24, 66): 'VERSION_FILE'}, {}), '(VERSION_FILE)', False, 'import os\n'), ((56, 10, 56, 38), 'cmake_build_extension.CMakeExtension', 'CMakeExtension', ({}, {}), '(**cmake_args)', False, 'from cmake_build_extension import BuildExtension, CMakeExtension\n'), ((59, 10, 59, 38), 'cmake_build_extension.CMakeExtension', 'CMakeExtension', ({}, {}), '(**cmake_args)', False, 'from cmake_build_extension import BuildExtension, CMakeExtension\n'), ((15, 11, 15, 25), 'pathlib.Path', 'Path', ({(15, 16, 15, 24): '__file__'}, {}), '(__file__)', False, 'from pathlib import Path\n'), ((73, 16, 73, 67), 'distutils.log.info', 'log.info', ({(73, 25, 73, 56): '"""Removing everything under: %s"""', (73, 58, 73, 66): 'lib_path'}, {}), "('Removing everything under: %s', lib_path)", False, 'from distutils import log\n'), ((74, 16, 74, 39), 'shutil.rmtree', 'shutil.rmtree', ({(74, 30, 74, 38): 'lib_path'}, {}), '(lib_path)', False, 'import shutil\n'), ((78, 16, 78, 67), 'distutils.log.info', 'log.info', ({(78, 25, 78, 56): '"""Removing everything under: %s"""', (78, 58, 78, 66): 'inc_path'}, {}), "('Removing everything under: %s', inc_path)", False, 'from distutils import log\n'), ((79, 16, 79, 39), 'shutil.rmtree', 'shutil.rmtree', ({(79, 30, 79, 38): 'inc_path'}, {}), '(inc_path)', False, 'import shutil\n'), ((84, 20, 84, 70), 'distutils.log.info', 'log.info', ({(84, 29, 84, 60): '"""Removing everything under: %s"""', (84, 62, 84, 69): 'cm_path'}, {}), "('Removing everything under: %s', cm_path)", False, 'from distutils import log\n'), ((85, 20, 85, 42), 'shutil.rmtree', 'shutil.rmtree', ({(85, 34, 85, 41): 'cm_path'}, {}), '(cm_path)', False, 'import shutil\n'), ((87, 16, 87, 60), 'distutils.log.info', 'log.info', ({(87, 25, 87, 59): '"""Found following installed files:"""'}, {}), "('Found following installed files:')", False, 'from distutils import log\n'), ((89, 20, 89, 40), 'distutils.log.info', 'log.info', ({(89, 29, 89, 36): '""" - %s"""', (89, 38, 89, 39): 'f'}, {}), "(' - %s', f)", False, 'from distutils import log\n')] |
machdyne/litex-boards | litex_boards/targets/digilent_arty_z7.py | 2311db18f8c92f80f03226fa984e6110caf25b88 | #!/usr/bin/env python3
#
# This file is part of LiteX-Boards.
#
# Copyright (c) 2021 Gwenhael Goavec-Merou <[email protected]>
# SPDX-License-Identifier: BSD-2-Clause
import argparse
import subprocess
from migen import *
from litex_boards.platforms import digilent_arty_z7
from litex.build import tools
from litex.build.xilinx import common as xil_common
from litex.build.xilinx.vivado import vivado_build_args, vivado_build_argdict
from litex.soc.interconnect import axi
from litex.soc.interconnect import wishbone
from litex.soc.cores.clock import *
from litex.soc.integration.soc_core import *
from litex.soc.integration.soc import SoCRegion
from litex.soc.integration.builder import *
from litex.soc.cores.led import LedChaser
# CRG ----------------------------------------------------------------------------------------------
class _CRG(Module):
def __init__(self, platform, sys_clk_freq, use_ps7_clk=False):
self.rst = Signal()
self.clock_domains.cd_sys = ClockDomain()
# # #
if use_ps7_clk:
self.comb += ClockSignal("sys").eq(ClockSignal("ps7"))
self.comb += ResetSignal("sys").eq(ResetSignal("ps7") | self.rst)
else:
# Clk.
clk125 = platform.request("clk125")
# PLL.
self.submodules.pll = pll = S7PLL(speedgrade=-1)
self.comb += pll.reset.eq(self.rst)
pll.register_clkin(clk125, 125e6)
pll.create_clkout(self.cd_sys, sys_clk_freq)
# Ignore sys_clk to pll.clkin path created by SoC's rst.
platform.add_false_path_constraints(self.cd_sys.clk, pll.clkin)
# BaseSoC ------------------------------------------------------------------------------------------
class BaseSoC(SoCCore):
def __init__(self, variant="z7-20", toolchain="vivado", sys_clk_freq=int(125e6),
with_led_chaser=True, **kwargs):
platform = digilent_arty_z7.Platform(variant=variant, toolchain=toolchain)
if kwargs.get("cpu_type", None) == "zynq7000":
kwargs['integrated_sram_size'] = 0
kwargs['with_uart'] = False
self.mem_map = {
'csr': 0x4000_0000, # Zynq GP0 default
}
# SoCCore ----------------------------------------------------------------------------------
SoCCore.__init__(self, platform, sys_clk_freq,
ident = "LiteX SoC on Arty Z7",
**kwargs)
# Zynq7000 Integration ---------------------------------------------------------------------
if kwargs.get("cpu_type", None) == "zynq7000":
assert toolchain == "vivado", ' not tested / specific vivado cmds'
preset_name = "arty_z7_20.tcl" if variant == "z7-20" else "arty_z7_10.tcl"
os.system("wget http://kmf2.trabucayre.com/" + preset_name)
self.cpu.set_ps7(preset=preset_name)
# Connect AXI GP0 to the SoC
wb_gp0 = wishbone.Interface()
self.submodules += axi.AXI2Wishbone(
axi = self.cpu.add_axi_gp_master(),
wishbone = wb_gp0,
base_address = self.mem_map['csr'])
self.add_wb_master(wb_gp0)
use_ps7_clk = True
else:
use_ps7_clk = False
# CRG --------------------------------------------------------------------------------------
self.submodules.crg = _CRG(platform, sys_clk_freq, use_ps7_clk)
# Leds -------------------------------------------------------------------------------------
if with_led_chaser:
self.submodules.leds = LedChaser(
pads = platform.request_all("user_led"),
sys_clk_freq = sys_clk_freq)
# Build --------------------------------------------------------------------------------------------
def main():
parser = argparse.ArgumentParser(description="LiteX SoC on Arty Z7")
parser.add_argument("--toolchain", default="vivado", help="FPGA toolchain (vivado, symbiflow or yosys+nextpnr).")
parser.add_argument("--build", action="store_true", help="Build bitstream.")
parser.add_argument("--load", action="store_true", help="Load bitstream.")
parser.add_argument("--variant", default="z7-20", help="Board variant (z7-20 or z7-10).")
parser.add_argument("--sys-clk-freq", default=125e6, help="System clock frequency.")
builder_args(parser)
soc_core_args(parser)
vivado_build_args(parser)
parser.set_defaults(cpu_type="zynq7000")
args = parser.parse_args()
soc = BaseSoC(
variant = args.variant,
toolchain = args.toolchain,
sys_clk_freq=int(float(args.sys_clk_freq)),
**soc_core_argdict(args)
)
builder = Builder(soc, **builder_argdict(args))
builder_kwargs = vivado_build_argdict(args) if args.toolchain == "vivado" else {}
builder.build(**builder_kwargs, run=args.build)
if args.load:
prog = soc.platform.create_programmer()
prog.load_bitstream(os.path.join(builder.gateware_dir, soc.build_name + ".bit"))
if __name__ == "__main__":
main()
| [((106, 13, 106, 72), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (), '', False, 'import argparse\n'), ((114, 4, 114, 29), 'litex.build.xilinx.vivado.vivado_build_args', 'vivado_build_args', ({(114, 22, 114, 28): 'parser'}, {}), '(parser)', False, 'from litex.build.xilinx.vivado import vivado_build_args, vivado_build_argdict\n'), ((59, 19, 59, 82), 'litex_boards.platforms.digilent_arty_z7.Platform', 'digilent_arty_z7.Platform', (), '', False, 'from litex_boards.platforms import digilent_arty_z7\n'), ((125, 21, 125, 47), 'litex.build.xilinx.vivado.vivado_build_argdict', 'vivado_build_argdict', ({(125, 42, 125, 46): 'args'}, {}), '(args)', False, 'from litex.build.xilinx.vivado import vivado_build_args, vivado_build_argdict\n'), ((83, 21, 83, 41), 'litex.soc.interconnect.wishbone.Interface', 'wishbone.Interface', ({}, {}), '()', False, 'from litex.soc.interconnect import wishbone\n')] |
allmalaysianews/article-extractor | goose/parsers.py | 8d0ff3ed01258d0fad56fc22d2c1852e603096b4 | # -*- coding: utf-8 -*-
"""\
This is a python port of "Goose" orignialy licensed to Gravity.com
under one or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership.
Python port was written by Xavier Grangier for Recrutae
Gravity.com licenses this file
to you under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import lxml.html as lxmlhtml
from lxml.html import soupparser
from lxml import etree
from copy import deepcopy
from goose.text import innerTrim
from goose.text import encodeValue
class Parser(object):
@classmethod
def xpath_re(self, node, expression):
regexp_namespace = "http://exslt.org/regular-expressions"
items = node.xpath(expression, namespaces={'re': regexp_namespace})
return items
@classmethod
def drop_tag(self, nodes):
if isinstance(nodes, list):
for node in nodes:
node.drop_tag()
else:
nodes.drop_tag()
@classmethod
def css_select(self, node, selector):
return node.cssselect(selector)
@classmethod
def fromstring(self, html):
html = encodeValue(html)
self.doc = lxmlhtml.fromstring(html)
return self.doc
@classmethod
def nodeToString(self, node):
return etree.tostring(node)
@classmethod
def replaceTag(self, node, tag):
node.tag = tag
@classmethod
def stripTags(self, node, *tags):
etree.strip_tags(node, *tags)
@classmethod
def getElementById(self, node, idd):
selector = '//*[@id="%s"]' % idd
elems = node.xpath(selector)
if elems:
return elems[0]
return None
@classmethod
def getElementsByTag(self, node, tag=None, attr=None, value=None, childs=False):
NS = "http://exslt.org/regular-expressions"
# selector = tag or '*'
selector = 'descendant-or-self::%s' % (tag or '*')
if attr and value:
selector = '%s[re:test(@%s, "%s", "i")]' % (selector, attr, value)
elems = node.xpath(selector, namespaces={"re": NS})
# remove the root node
# if we have a selection tag
if node in elems and (tag or childs):
elems.remove(node)
return elems
@classmethod
def appendChild(self, node, child):
node.append(child)
@classmethod
def childNodes(self, node):
return list(node)
@classmethod
def childNodesWithText(self, node):
root = node
# create the first text node
# if we have some text in the node
if root.text:
t = lxmlhtml.HtmlElement()
t.text = root.text
t.tag = 'text'
root.text = None
root.insert(0, t)
# loop childs
for c, n in enumerate(list(root)):
idx = root.index(n)
# don't process texts nodes
if n.tag == 'text':
continue
# create a text node for tail
if n.tail:
t = self.createElement(tag='text', text=n.tail, tail=None)
root.insert(idx + 1, t)
return list(root)
@classmethod
def textToPara(self, text):
return self.fromstring(text)
@classmethod
def getChildren(self, node):
return node.getchildren()
@classmethod
def getElementsByTags(self, node, tags):
selector = ','.join(tags)
elems = self.css_select(node, selector)
# remove the root node
# if we have a selection tag
if node in elems:
elems.remove(node)
return elems
@classmethod
def createElement(self, tag='p', text=None, tail=None):
t = lxmlhtml.HtmlElement()
t.tag = tag
t.text = text
t.tail = tail
return t
@classmethod
def getComments(self, node):
return node.xpath('//comment()')
@classmethod
def getParent(self, node):
return node.getparent()
@classmethod
def remove(self, node):
parent = node.getparent()
if parent is not None:
if node.tail:
prev = node.getprevious()
if prev is None:
if not parent.text:
parent.text = ''
parent.text += u' ' + node.tail
else:
if not prev.tail:
prev.tail = ''
prev.tail += u' ' + node.tail
node.clear()
parent.remove(node)
@classmethod
def getTag(self, node):
return node.tag
@classmethod
def getText(self, node):
txts = [i for i in node.itertext()]
return innerTrim(u' '.join(txts).strip())
@classmethod
def previousSiblings(self, node):
nodes = []
for c, n in enumerate(node.itersiblings(preceding=True)):
nodes.append(n)
return nodes
@classmethod
def previousSibling(self, node):
nodes = []
for c, n in enumerate(node.itersiblings(preceding=True)):
nodes.append(n)
if c == 0:
break
return nodes[0] if nodes else None
@classmethod
def nextSibling(self, node):
nodes = []
for c, n in enumerate(node.itersiblings(preceding=False)):
nodes.append(n)
if c == 0:
break
return nodes[0] if nodes else None
@classmethod
def isTextNode(self, node):
return True if node.tag == 'text' else False
@classmethod
def getAttribute(self, node, attr=None):
if attr:
return node.attrib.get(attr, None)
return attr
@classmethod
def delAttribute(self, node, attr=None):
if attr:
_attr = node.attrib.get(attr, None)
if _attr:
del node.attrib[attr]
@classmethod
def setAttribute(self, node, attr=None, value=None):
if attr and value:
node.set(attr, value)
@classmethod
def outerHtml(self, node):
e0 = node
if e0.tail:
e0 = deepcopy(e0)
e0.tail = None
return self.nodeToString(e0)
class ParserSoup(Parser):
@classmethod
def fromstring(self, html):
html = encodeValue(html)
self.doc = soupparser.fromstring(html)
return self.doc
| [((53, 15, 53, 32), 'goose.text.encodeValue', 'encodeValue', ({(53, 27, 53, 31): 'html'}, {}), '(html)', False, 'from goose.text import encodeValue\n'), ((54, 19, 54, 44), 'lxml.html.fromstring', 'lxmlhtml.fromstring', ({(54, 39, 54, 43): 'html'}, {}), '(html)', True, 'import lxml.html as lxmlhtml\n'), ((59, 15, 59, 35), 'lxml.etree.tostring', 'etree.tostring', ({(59, 30, 59, 34): 'node'}, {}), '(node)', False, 'from lxml import etree\n'), ((67, 8, 67, 37), 'lxml.etree.strip_tags', 'etree.strip_tags', ({(67, 25, 67, 29): 'node', (67, 31, 67, 36): '*tags'}, {}), '(node, *tags)', False, 'from lxml import etree\n'), ((142, 12, 142, 34), 'lxml.html.HtmlElement', 'lxmlhtml.HtmlElement', ({}, {}), '()', True, 'import lxml.html as lxmlhtml\n'), ((242, 15, 242, 32), 'goose.text.encodeValue', 'encodeValue', ({(242, 27, 242, 31): 'html'}, {}), '(html)', False, 'from goose.text import encodeValue\n'), ((243, 19, 243, 46), 'lxml.html.soupparser.fromstring', 'soupparser.fromstring', ({(243, 41, 243, 45): 'html'}, {}), '(html)', False, 'from lxml.html import soupparser\n'), ((105, 16, 105, 38), 'lxml.html.HtmlElement', 'lxmlhtml.HtmlElement', ({}, {}), '()', True, 'import lxml.html as lxmlhtml\n'), ((233, 17, 233, 29), 'copy.deepcopy', 'deepcopy', ({(233, 26, 233, 28): 'e0'}, {}), '(e0)', False, 'from copy import deepcopy\n')] |
SoyBeansLab/daizu-online-judge-backend | src/infrastructure/database/postgres/sqlhandler.py | 873f81fdad2f216e28b83341a6d88b0e21078d6e | from logging import getLogger
import os
from typing import List, Union
import psycopg2
from interface.database.sqlhandler import Cursor as AbsCursor
from interface.database.sqlhandler import Result as AbsResult
from interface.database.sqlhandler import SqlHandler as AbsSqlHandler
from exceptions.waf import SqlTransactionException
logger = getLogger("daizu").getChild("infrastracture.SqlHandler")
class Result(AbsResult):
def __init__(self, rowid: int):
self.last_insertid = rowid
def lastrowid(self) -> int:
return self.last_insertid
class Cursor(AbsCursor):
def __init__(self, cursor):
self.cursor = cursor
def fetch_all(self):
return self.cursor
def fetch_one(self):
if len(self.cursor) == 0:
return []
return self.cursor[0]
class SqlHandler(AbsSqlHandler):
def __init__(self):
# 環境から取るようにする
self.host = os.getenv("DAIZU_DATABASE_HOST", "localhost")
self.dbname = os.getenv("DAIZU_DATABASE_NAME", "doj")
self.user = os.getenv("DAIZU_DATABASE_USERNAME", "daizu")
self.password = os.getenv("DAIZU_DATABASE_PASSWORD", "soybeanslab")
try:
self.connection = psycopg2.connect(
host=self.host,
dbname=self.dbname,
user=self.user,
password=self.password,
)
except psycopg2.OperationalError as err:
raise err
# self.cursor = self.connection.cursor()
def execute(self, query: str, *args) -> Result:
try:
with self.connection.cursor() as cursor:
cursor.execute(query, args)
lastrowid = cursor.lastrowid
self.connection.commit()
except psycopg2.errors.InFailedSqlTransaction as e:
logger.error(e)
self.connection.rollback()
raise SqlTransactionException()
return lastrowid
def query(self, query: str, *args) -> Cursor:
try:
with self.connection.cursor() as cursor:
cursor.execute(query, *args)
data = cursor.fetchall()
except psycopg2.errors.InFailedSqlTransaction as e:
logger.error(e)
self.connection.rollback()
raise SqlTransactionException()
return Cursor(data)
| [((14, 9, 14, 27), 'logging.getLogger', 'getLogger', ({(14, 19, 14, 26): '"""daizu"""'}, {}), "('daizu')", False, 'from logging import getLogger\n'), ((41, 20, 41, 65), 'os.getenv', 'os.getenv', ({(41, 30, 41, 51): '"""DAIZU_DATABASE_HOST"""', (41, 53, 41, 64): '"""localhost"""'}, {}), "('DAIZU_DATABASE_HOST', 'localhost')", False, 'import os\n'), ((42, 22, 42, 61), 'os.getenv', 'os.getenv', ({(42, 32, 42, 53): '"""DAIZU_DATABASE_NAME"""', (42, 55, 42, 60): '"""doj"""'}, {}), "('DAIZU_DATABASE_NAME', 'doj')", False, 'import os\n'), ((43, 20, 43, 65), 'os.getenv', 'os.getenv', ({(43, 30, 43, 55): '"""DAIZU_DATABASE_USERNAME"""', (43, 57, 43, 64): '"""daizu"""'}, {}), "('DAIZU_DATABASE_USERNAME', 'daizu')", False, 'import os\n'), ((44, 24, 44, 75), 'os.getenv', 'os.getenv', ({(44, 34, 44, 59): '"""DAIZU_DATABASE_PASSWORD"""', (44, 61, 44, 74): '"""soybeanslab"""'}, {}), "('DAIZU_DATABASE_PASSWORD', 'soybeanslab')", False, 'import os\n'), ((46, 30, 51, 13), 'psycopg2.connect', 'psycopg2.connect', (), '', False, 'import psycopg2\n'), ((65, 18, 65, 43), 'exceptions.waf.SqlTransactionException', 'SqlTransactionException', ({}, {}), '()', False, 'from exceptions.waf import SqlTransactionException\n'), ((77, 18, 77, 43), 'exceptions.waf.SqlTransactionException', 'SqlTransactionException', ({}, {}), '()', False, 'from exceptions.waf import SqlTransactionException\n')] |
CityPulse/CP_Resourcemanagement | virtualisation/wrapper/parser/xmlparser.py | aa670fa89d5e086a98ade3ccc152518be55abf2e | from virtualisation.clock.abstractclock import AbstractClock
__author__ = 'Marten Fischer ([email protected])'
from virtualisation.wrapper.parser.abstractparser import AbstractParser
from virtualisation.misc.jsonobject import JSONObject as JOb
import datetime as dt
class XMLParser(AbstractParser):
"""
Maps a list of values read by a CSVReader with a given naming list
"""
def __init__(self, wrapper):
super(XMLParser, self).__init__(wrapper)
self.timestampcell = -1
if self.wrapper.getSensorDescription().isTimestampedStream():
try:
self.timestampcell = -1
self.timestampformat = self.wrapper.getSensorDescription().timestamp.format
except ValueError:
self.timestampcell = -1
def parse(self, data, clock):
raise Exception("not implemented yet!")
if not data: # nothing received or nothing in the history -> nothing to parse
return None
| [] |
jonathan-greig/plaso | plaso/formatters/interface.py | b88a6e54c06a162295d09b016bddbfbfe7ca9070 | # -*- coding: utf-8 -*-
"""This file contains the event formatters interface classes.
The l2t_csv and other formats are dependent on a message field,
referred to as description_long and description_short in l2t_csv.
Plaso no longer stores these field explicitly.
A formatter, with a format string definition, is used to convert
the event object values into a formatted string that is similar
to the description_long and description_short field.
"""
import abc
import re
from plaso.formatters import logger
class EventFormatterHelper(object):
"""Base class of helper for formatting event data."""
@abc.abstractmethod
def FormatEventValues(self, event_values):
"""Formats event values using the helper.
Args:
event_values (dict[str, object]): event values.
"""
class BooleanEventFormatterHelper(EventFormatterHelper):
"""Helper for formatting boolean event data.
Attributes:
input_attribute (str): name of the attribute that contains the boolean
input value.
output_attribute (str): name of the attribute where the boolean output
value should be stored.
value_if_false (str): output value if the boolean input value is False.
value_if_true (str): output value if the boolean input value is True.
"""
def __init__(
self, input_attribute=None, output_attribute=None, value_if_false=None,
value_if_true=None):
"""Initialized a helper for formatting boolean event data.
Args:
input_attribute (Optional[str]): name of the attribute that contains
the boolean input value.
output_attribute (Optional[str]): name of the attribute where the
boolean output value should be stored.
value_if_false (str): output value if the boolean input value is False.
value_if_true (str): output value if the boolean input value is True.
"""
super(BooleanEventFormatterHelper, self).__init__()
self.input_attribute = input_attribute
self.output_attribute = output_attribute
self.value_if_false = value_if_false
self.value_if_true = value_if_true
def FormatEventValues(self, event_values):
"""Formats event values using the helper.
Args:
event_values (dict[str, object]): event values.
"""
input_value = event_values.get(self.input_attribute, None)
if input_value:
output_value = self.value_if_true
else:
output_value = self.value_if_false
event_values[self.output_attribute] = output_value
class CustomEventFormatterHelper(EventFormatterHelper):
"""Base class for a helper for custom formatting of event data."""
DATA_TYPE = ''
IDENTIFIER = ''
@abc.abstractmethod
def FormatEventValues(self, event_values):
"""Formats event values using the helper.
Args:
event_values (dict[str, object]): event values.
"""
class EnumerationEventFormatterHelper(EventFormatterHelper):
"""Helper for formatting enumeration event data.
Attributes:
default (str): default value.
input_attribute (str): name of the attribute that contains the enumeration
input value.
output_attribute (str): name of the attribute where the enumeration output
value should be stored.
values (dict[str, str]): mapping of enumeration input and output values.
"""
def __init__(
self, default=None, input_attribute=None, output_attribute=None,
values=None):
"""Initialized a helper for formatting enumeration event data.
Args:
default (Optional[str]): default value.
input_attribute (Optional[str]): name of the attribute that contains
the enumeration input value.
output_attribute (Optional[str]): name of the attribute where the
enumeration output value should be stored.
values (Optional[dict[str, str]]): mapping of enumeration input and
output values.
"""
super(EnumerationEventFormatterHelper, self).__init__()
self.default = default
self.input_attribute = input_attribute
self.output_attribute = output_attribute
self.values = values or {}
def FormatEventValues(self, event_values):
"""Formats event values using the helper.
If default value is None and there is no corresponding enumeration value
then the original value is used.
Args:
event_values (dict[str, object]): event values.
"""
input_value = event_values.get(self.input_attribute, None)
if input_value is not None:
default_value = self.default
if default_value is None:
default_value = input_value
event_values[self.output_attribute] = self.values.get(
input_value, default_value)
class FlagsEventFormatterHelper(EventFormatterHelper):
"""Helper for formatting flags event data.
Attributes:
input_attribute (str): name of the attribute that contains the flags
input value.
output_attribute (str): name of the attribute where the flags output
value should be stored.
values (dict[str, str]): mapping of flags input and output values.
"""
def __init__(
self, input_attribute=None, output_attribute=None, values=None):
"""Initialized a helper for formatting flags event data.
Args:
input_attribute (Optional[str]): name of the attribute that contains
the flags input value.
output_attribute (Optional[str]): name of the attribute where the
flags output value should be stored.
values (Optional[dict[str, str]]): mapping of flags input and output
values.
"""
super(FlagsEventFormatterHelper, self).__init__()
self.input_attribute = input_attribute
self.output_attribute = output_attribute
self.values = values or {}
def FormatEventValues(self, event_values):
"""Formats event values using the helper.
Args:
event_values (dict[str, object]): event values.
"""
input_value = event_values.get(self.input_attribute, None)
if input_value is None:
return
output_values = []
for flag, mapped_value in self.values.items():
if flag & input_value:
output_values.append(mapped_value)
event_values[self.output_attribute] = ', '.join(output_values)
class EventFormatter(object):
"""Base class to format event values.
Attributes:
custom_helpers (list[str]): identifiers of custom event formatter helpers.
helpers (list[EventFormatterHelper]): event formatter helpers.
"""
# The format string can be defined as:
# {name}, {name:format}, {name!conversion}, {name!conversion:format}
_FORMAT_STRING_ATTRIBUTE_NAME_RE = re.compile(
'{([a-z][a-zA-Z0-9_]*)[!]?[^:}]*[:]?[^}]*}')
def __init__(self, data_type='internal'):
"""Initializes an event formatter.
Args:
data_type (Optional[str]): unique identifier for the event data supported
by the formatter.
"""
super(EventFormatter, self).__init__()
self._data_type = data_type
self._format_string_attribute_names = None
self.custom_helpers = []
self.helpers = []
@property
def data_type(self):
"""str: unique identifier for the event data supported by the formatter."""
return self._data_type.lower()
def _FormatMessage(self, format_string, event_values):
"""Determines the formatted message.
Args:
format_string (str): message format string.
event_values (dict[str, object]): event values.
Returns:
str: formatted message.
"""
try:
message_string = format_string.format(**event_values)
except KeyError as exception:
data_type = event_values.get('data_type', 'N/A')
display_name = event_values.get('display_name', 'N/A')
event_identifier = event_values.get('uuid', 'N/A')
parser_chain = event_values.get('parser', 'N/A')
error_message = (
'unable to format string: "{0:s}" missing required event '
'value: {1!s}').format(format_string, exception)
error_message = (
'Event: {0:s} data type: {1:s} display name: {2:s} '
'parser chain: {3:s} with error: {4:s}').format(
event_identifier, data_type, display_name, parser_chain,
error_message)
logger.error(error_message)
attribute_values = []
for attribute, value in event_values.items():
attribute_values.append('{0:s}: {1!s}'.format(attribute, value))
message_string = ' '.join(attribute_values)
except UnicodeDecodeError as exception:
data_type = event_values.get('data_type', 'N/A')
display_name = event_values.get('display_name', 'N/A')
event_identifier = event_values.get('uuid', 'N/A')
parser_chain = event_values.get('parser', 'N/A')
error_message = 'Unicode decode error: {0!s}'.format(exception)
error_message = (
'Event: {0:s} data type: {1:s} display name: {2:s} '
'parser chain: {3:s} with error: {4:s}').format(
event_identifier, data_type, display_name, parser_chain,
error_message)
logger.error(error_message)
message_string = ''
# Strip carriage return and linefeed form the message strings.
# Using replace function here because it is faster than re.sub() or
# string.strip().
return message_string.replace('\r', '').replace('\n', '')
def FormatEventValues(self, event_values):
"""Formats event values using the helpers.
Args:
event_values (dict[str, object]): event values.
"""
for helper in self.helpers:
helper.FormatEventValues(event_values)
@abc.abstractmethod
def GetFormatStringAttributeNames(self):
"""Retrieves the attribute names in the format string.
Returns:
set(str): attribute names.
"""
# pylint: disable=unused-argument
def AddCustomHelper(
self, identifier, input_attribute=None, output_attribute=None):
"""Adds a custom event formatter helper.
Args:
identifier (str): identifier.
input_attribute (Optional[str]): name of the attribute that contains
the input value.
output_attribute (Optional[str]): name of the attribute where the
output value should be stored.
"""
self.custom_helpers.append(identifier)
def AddHelper(self, helper):
"""Adds an event formatter helper.
Args:
helper (EventFormatterHelper): event formatter helper to add.
"""
self.helpers.append(helper)
@abc.abstractmethod
def GetMessage(self, event_values):
"""Determines the message.
Args:
event_values (dict[str, object]): event values.
Returns:
str: message.
"""
@abc.abstractmethod
def GetMessageShort(self, event_values):
"""Determines the short message.
Args:
event_values (dict[str, object]): event values.
Returns:
str: short message.
"""
class BasicEventFormatter(EventFormatter):
"""Format event values using a message format string.
Attributes:
custom_helpers (list[str]): identifiers of custom event formatter helpers.
helpers (list[EventFormatterHelper]): event formatter helpers.
"""
def __init__(
self, data_type='basic', format_string=None, format_string_short=None):
"""Initializes a basic event formatter.
The syntax of the format strings is similar to that of format() where
the place holder for a certain event object attribute is defined as
{attribute_name}.
Args:
data_type (Optional[str]): unique identifier for the event data supported
by the formatter.
format_string (Optional[str]): (long) message format string.
format_string_short (Optional[str]): short message format string.
"""
super(BasicEventFormatter, self).__init__(data_type=data_type)
self._format_string_attribute_names = None
self._format_string = format_string
self._format_string_short = format_string_short
def GetFormatStringAttributeNames(self):
"""Retrieves the attribute names in the format string.
Returns:
set(str): attribute names.
"""
if self._format_string_attribute_names is None:
self._format_string_attribute_names = (
self._FORMAT_STRING_ATTRIBUTE_NAME_RE.findall(
self._format_string))
return set(self._format_string_attribute_names)
def GetMessage(self, event_values):
"""Determines the message.
Args:
event_values (dict[str, object]): event values.
Returns:
str: message.
"""
return self._FormatMessage(self._format_string, event_values)
def GetMessageShort(self, event_values):
"""Determines the short message.
Args:
event_values (dict[str, object]): event values.
Returns:
str: short message.
"""
if self._format_string_short:
format_string = self._format_string_short
else:
format_string = self._format_string
short_message_string = self._FormatMessage(format_string, event_values)
# Truncate the short message string if necessary.
if len(short_message_string) > 80:
short_message_string = '{0:s}...'.format(short_message_string[:77])
return short_message_string
class ConditionalEventFormatter(EventFormatter):
"""Conditionally format event values using format string pieces."""
_DEFAULT_FORMAT_STRING_SEPARATOR = ' '
def __init__(
self, data_type='conditional', format_string_pieces=None,
format_string_separator=None, format_string_short_pieces=None):
"""Initializes a conditional event formatter.
The syntax of the format strings pieces is similar to of the basic event
formatter (BasicEventFormatter). Every format string piece should contain
at maximum one unique attribute name. Format string pieces without an
attribute name are supported.
Args:
data_type (Optional[str]): unique identifier for the event data supported
by the formatter.
format_string_pieces (Optional[list[str]]): (long) message format string
pieces.
format_string_separator (Optional[str]): string by which separate format
string pieces should be joined.
format_string_short_pieces (Optional[list[str]]): short message format
string pieces.
"""
if format_string_separator is None:
format_string_separator = self._DEFAULT_FORMAT_STRING_SEPARATOR
super(ConditionalEventFormatter, self).__init__(data_type=data_type)
self._format_string_pieces = format_string_pieces or []
self._format_string_pieces_map = []
self._format_string_separator = format_string_separator
self._format_string_short_pieces = format_string_short_pieces or []
self._format_string_short_pieces_map = []
def _CreateFormatStringMap(
self, format_string_pieces, format_string_pieces_map):
"""Creates a format string map.
The format string pieces map is a list containing the attribute name
per format string piece. E.g. ["Description: {description}"] would be
mapped to: [0] = "description". If the string piece does not contain
an attribute name it is treated as text that does not needs formatting.
Args:
format_string_pieces (list[str]): format string pieces.
format_string_pieces_map (list[str]): format string pieces map.
Raises:
RuntimeError: when an invalid format string piece is encountered.
"""
for format_string_piece in format_string_pieces:
attribute_names = self._FORMAT_STRING_ATTRIBUTE_NAME_RE.findall(
format_string_piece)
if len(set(attribute_names)) > 1:
raise RuntimeError((
'Invalid format string piece: [{0:s}] contains more than 1 '
'attribute name.').format(format_string_piece))
if not attribute_names:
# The text format string piece is stored as an empty map entry to keep
# the index in the map equal to the format string pieces.
attribute_name = ''
else:
attribute_name = attribute_names[0]
format_string_pieces_map.append(attribute_name)
def _CreateFormatStringMaps(self):
"""Creates the format string maps.
Maps are built of the string pieces and their corresponding attribute
name to optimize conditional string formatting.
Raises:
RuntimeError: when an invalid format string piece is encountered.
"""
self._format_string_pieces_map = []
self._CreateFormatStringMap(
self._format_string_pieces, self._format_string_pieces_map)
self._format_string_short_pieces_map = []
self._CreateFormatStringMap(
self._format_string_short_pieces, self._format_string_short_pieces_map)
def _ConditionalFormatMessage(
self, format_string_pieces, format_string_pieces_map, event_values):
"""Determines the conditional formatted message.
Args:
format_string_pieces (dict[str, str]): format string pieces.
format_string_pieces_map (list[int, str]): format string pieces map.
event_values (dict[str, object]): event values.
Returns:
str: conditional formatted message.
Raises:
RuntimeError: when an invalid format string piece is encountered.
"""
string_pieces = []
for map_index, attribute_name in enumerate(format_string_pieces_map):
if not attribute_name or event_values.get(
attribute_name, None) is not None:
string_pieces.append(format_string_pieces[map_index])
format_string = self._format_string_separator.join(string_pieces)
return self._FormatMessage(format_string, event_values)
def GetFormatStringAttributeNames(self):
"""Retrieves the attribute names in the format string.
Returns:
set(str): attribute names.
"""
if self._format_string_attribute_names is None:
self._format_string_attribute_names = []
for format_string_piece in self._format_string_pieces:
attribute_names = self._FORMAT_STRING_ATTRIBUTE_NAME_RE.findall(
format_string_piece)
if attribute_names:
self._format_string_attribute_names.extend(attribute_names)
return set(self._format_string_attribute_names)
def GetMessage(self, event_values):
"""Determines the message.
Args:
event_values (dict[str, object]): event values.
Returns:
str: message.
"""
if not self._format_string_pieces_map:
self._CreateFormatStringMaps()
return self._ConditionalFormatMessage(
self._format_string_pieces, self._format_string_pieces_map,
event_values)
def GetMessageShort(self, event_values):
"""Determines the short message.
Args:
event_values (dict[str, object]): event values.
Returns:
str: short message.
"""
if not self._format_string_pieces_map:
self._CreateFormatStringMaps()
if (self._format_string_short_pieces and
self._format_string_short_pieces != ['']):
format_string_pieces = self._format_string_short_pieces
format_string_pieces_map = self._format_string_short_pieces_map
else:
format_string_pieces = self._format_string_pieces
format_string_pieces_map = self._format_string_pieces_map
short_message_string = self._ConditionalFormatMessage(
format_string_pieces, format_string_pieces_map, event_values)
# Truncate the short message string if necessary.
if len(short_message_string) > 80:
short_message_string = '{0:s}...'.format(short_message_string[:77])
return short_message_string
| [((200, 37, 201, 50), 're.compile', 're.compile', ({(201, 6, 201, 49): '"""{([a-z][a-zA-Z0-9_]*)[!]?[^:}]*[:]?[^}]*}"""'}, {}), "('{([a-z][a-zA-Z0-9_]*)[!]?[^:}]*[:]?[^}]*}')", False, 'import re\n'), ((249, 6, 249, 33), 'plaso.formatters.logger.error', 'logger.error', ({(249, 19, 249, 32): 'error_message'}, {}), '(error_message)', False, 'from plaso.formatters import logger\n'), ((269, 6, 269, 33), 'plaso.formatters.logger.error', 'logger.error', ({(269, 19, 269, 32): 'error_message'}, {}), '(error_message)', False, 'from plaso.formatters import logger\n')] |
LiuKaiqiang94/PyStudyExample | python_program/condition.py | b30212718b218c71e06b68677f55c33e3a1dbf46 |
def main():
val=int(input("input a num"))
if val<10:
print("A")
elif val<20:
print("B")
elif val<30:
print("C")
else:
print("D")
main()
| [] |
Rukaume/LRCN | Annotated_video/test/Annotatedvideo_worm.py | 0d1928cc72544f59a4335fea7febc561d3dfc118 | # -*- coding: utf-8 -*-
"""
Created on Fri Sep 4 22:27:11 2020
@author: Miyazaki
"""
imdir = "C:/Users/Miyazaki/Desktop/hayashi_lab/20200527_lethargus_analysis/renamed_pillar_chamber-N2/chamber3"
resultdir= "C:/Users/Miyazaki/Desktop/hayashi_lab/20200527_lethargus_analysis/renamed_pillar_chamber-N2/result0918.csv"
import os, cv2, shutil
from tqdm import tqdm
import pandas as pd
os.chdir(imdir)
os.makedirs("../annotatedimages", exist_ok = True)
imlist = os.listdir("./")
imlist = [i for i in imlist if os.path.splitext(i)[1] == '.jpg' \
or os.path.splitext(i)[1] == '.png']
imlist.sort()
result = pd.read_csv(resultdir)
font = cv2.FONT_HERSHEY_SIMPLEX
for i in tqdm(range(len(imlist))):
if int(result.loc[i]) == 0:
tempim = cv2.imread(imlist[i])
tempim = cv2.putText(tempim,'quiescent',(10,500), font, 1,(255,0,0),2,cv2.LINE_AA)
cv2.imwrite('../annotatedimages/{}'.format(imlist[i]), tempim)
elif int(result.loc[i]) == 1:
tempim = cv2.imread(imlist[i])
tempim = cv2.putText(tempim,'dwell',(10,500), font, 1,(0,255,0),2,cv2.LINE_AA)
cv2.imwrite('../annotatedimages/{}'.format(imlist[i]), tempim)
elif int(result.loc[i]) == 2:
tempim = cv2.imread(imlist[i])
tempim = cv2.putText(tempim,'forward',(10,500), font, 1,(0,0,255),2,cv2.LINE_AA)
cv2.imwrite('../annotatedimages/{}'.format(imlist[i]), tempim)
elif int(result.loc[i]) == 3:
tempim = cv2.imread(imlist[i])
tempim = cv2.putText(tempim,'backward',(10,500), font, 1,(100,100,0),2,cv2.LINE_AA)
cv2.imwrite('../annotatedimages/{}'.format(imlist[i]), tempim)
else:
pass
| [((15, 0, 15, 15), 'os.chdir', 'os.chdir', ({(15, 9, 15, 14): 'imdir'}, {}), '(imdir)', False, 'import os, cv2, shutil\n'), ((16, 0, 16, 50), 'os.makedirs', 'os.makedirs', (), '', False, 'import os, cv2, shutil\n'), ((18, 10, 18, 26), 'os.listdir', 'os.listdir', ({(18, 21, 18, 25): '"""./"""'}, {}), "('./')", False, 'import os, cv2, shutil\n'), ((23, 9, 23, 31), 'pandas.read_csv', 'pd.read_csv', ({(23, 21, 23, 30): 'resultdir'}, {}), '(resultdir)', True, 'import pandas as pd\n'), ((27, 13, 27, 34), 'cv2.imread', 'cv2.imread', ({(27, 24, 27, 33): 'imlist[i]'}, {}), '(imlist[i])', False, 'import os, cv2, shutil\n'), ((28, 13, 28, 86), 'cv2.putText', 'cv2.putText', ({(28, 25, 28, 31): 'tempim', (28, 32, 28, 43): '"""quiescent"""', (28, 44, 28, 52): '(10, 500)', (28, 54, 28, 58): 'font', (28, 60, 28, 61): '1', (28, 62, 28, 71): '(255, 0, 0)', (28, 72, 28, 73): '2', (28, 74, 28, 85): 'cv2.LINE_AA'}, {}), "(tempim, 'quiescent', (10, 500), font, 1, (255, 0, 0), 2, cv2.\n LINE_AA)", False, 'import os, cv2, shutil\n'), ((31, 13, 31, 34), 'cv2.imread', 'cv2.imread', ({(31, 24, 31, 33): 'imlist[i]'}, {}), '(imlist[i])', False, 'import os, cv2, shutil\n'), ((32, 13, 32, 82), 'cv2.putText', 'cv2.putText', ({(32, 25, 32, 31): 'tempim', (32, 32, 32, 39): '"""dwell"""', (32, 40, 32, 48): '(10, 500)', (32, 50, 32, 54): 'font', (32, 56, 32, 57): '1', (32, 58, 32, 67): '(0, 255, 0)', (32, 68, 32, 69): '2', (32, 70, 32, 81): 'cv2.LINE_AA'}, {}), "(tempim, 'dwell', (10, 500), font, 1, (0, 255, 0), 2, cv2.LINE_AA)", False, 'import os, cv2, shutil\n'), ((35, 13, 35, 34), 'cv2.imread', 'cv2.imread', ({(35, 24, 35, 33): 'imlist[i]'}, {}), '(imlist[i])', False, 'import os, cv2, shutil\n'), ((36, 13, 36, 84), 'cv2.putText', 'cv2.putText', ({(36, 25, 36, 31): 'tempim', (36, 32, 36, 41): '"""forward"""', (36, 42, 36, 50): '(10, 500)', (36, 52, 36, 56): 'font', (36, 58, 36, 59): '1', (36, 60, 36, 69): '(0, 0, 255)', (36, 70, 36, 71): '2', (36, 72, 36, 83): 'cv2.LINE_AA'}, {}), "(tempim, 'forward', (10, 500), font, 1, (0, 0, 255), 2, cv2.LINE_AA)", False, 'import os, cv2, shutil\n'), ((19, 31, 19, 50), 'os.path.splitext', 'os.path.splitext', ({(19, 48, 19, 49): 'i'}, {}), '(i)', False, 'import os, cv2, shutil\n'), ((20, 19, 20, 38), 'os.path.splitext', 'os.path.splitext', ({(20, 36, 20, 37): 'i'}, {}), '(i)', False, 'import os, cv2, shutil\n'), ((39, 13, 39, 34), 'cv2.imread', 'cv2.imread', ({(39, 24, 39, 33): 'imlist[i]'}, {}), '(imlist[i])', False, 'import os, cv2, shutil\n'), ((40, 13, 40, 87), 'cv2.putText', 'cv2.putText', ({(40, 25, 40, 31): 'tempim', (40, 32, 40, 42): '"""backward"""', (40, 43, 40, 51): '(10, 500)', (40, 53, 40, 57): 'font', (40, 59, 40, 60): '1', (40, 61, 40, 72): '(100, 100, 0)', (40, 73, 40, 74): '2', (40, 75, 40, 86): 'cv2.LINE_AA'}, {}), "(tempim, 'backward', (10, 500), font, 1, (100, 100, 0), 2, cv2.\n LINE_AA)", False, 'import os, cv2, shutil\n')] |
masterisira/ELIZA_OF-master | emilia/modules/math.py | 02a7dbf48e4a3d4ee0981e6a074529ab1497aafe | from typing import List
import requests
from telegram import Message, Update, Bot, MessageEntity
from telegram.ext import CommandHandler, run_async
from emilia import dispatcher
from emilia.modules.disable import DisableAbleCommandHandler
from emilia.modules.helper_funcs.alternate import send_message
import pynewtonmath as newton
import math
@run_async
def simplify(update, context):
args=context.args
args=str(args)
message = update.effective_message
message.reply_text(newton.simplify('{}'.format(args[0])))
@run_async
def factor(update, context):
args=context.args
args=str(args)
message = update.effective_message
message.reply_text(newton.factor('{}'.format(args[0])))
@run_async
def derive(update, context):
args=context.args
args=str(args)
message = update.effective_message
message.reply_text(newton.derive('{}'.format(args[0])))
@run_async
def integrate(update, context):
args=context.args
args=str(args)
message = update.effective_message
message.reply_text(newton.integrate('{}'.format(args[0])))
@run_async
def zeroes(update, context):
args=context.args
args=str(args)
message = update.effective_message
message.reply_text(newton.zeroes('{}'.format(args[0])))
@run_async
def tangent(update, context):
args=context.args
args=str(args)
message = update.effective_message
message.reply_text(newton.tangent('{}'.format(args[0])))
@run_async
def area(update, context):
args=context.args
args=str(args)
message = update.effective_message
message.reply_text(newton.area('{}'.format(args[0])))
@run_async
def cos(update, context):
args = context.args
message = update.effective_message
message.reply_text(math.cos(int(args[0])))
@run_async
def sin(update, context):
args = context.args
message = update.effective_message
message.reply_text(math.sin(int(args[0])))
@run_async
def tan(update, context):
args = context.args
message = update.effective_message
message.reply_text(math.tan(int(args[0])))
@run_async
def arccos(update, context):
args = context.args
message = update.effective_message
message.reply_text(math.acos(int(args[0])))
@run_async
def arcsin(update, context):
args = context.args
message = update.effective_message
message.reply_text(math.asin(int(args[0])))
@run_async
def arctan(update, context):
args = context.args
message = update.effective_message
message.reply_text(math.atan(int(args[0])))
@run_async
def abs(update, context):
args = context.args
message = update.effective_message
message.reply_text(math.fabs(int(args[0])))
@run_async
def log(update, context):
args = context.args
message = update.effective_message
message.reply_text(math.log(int(args[0])))
__help__ = """
Under Developmeent.. More features soon
- /cos: Cosine `/cos pi`
- /sin: Sine `/sin 0`
- /tan: Tangent `/tan 0`
- /arccos: Inverse Cosine `/arccos 1`
- /arcsin: Inverse Sine `/arcsin 0`
- /arctan: Inverse Tangent `/arctan 0`
- /abs: Absolute Value `/abs -1`
- /log: Logarithm `/log 2l8`
__Keep in mind__: To find the tangent line of a function at a certain x value, send the request as c|f(x) where c is the given x value and f(x) is the function expression, the separator is a vertical bar '|'. See the table above for an example request.
To find the area under a function, send the request as c:d|f(x) where c is the starting x value, d is the ending x value, and f(x) is the function under which you want the curve between the two x values.
To compute fractions, enter expressions as numerator(over)denominator. For example, to process 2/4 you must send in your expression as 2(over)4. The result expression will be in standard math notation (1/2, 3/4).
"""
SIMPLIFY_HANDLER = DisableAbleCommandHandler("math", simplify, pass_args=True)
FACTOR_HANDLER = DisableAbleCommandHandler("factor", factor, pass_args=True)
DERIVE_HANDLER = DisableAbleCommandHandler("derive", derive, pass_args=True)
INTEGRATE_HANDLER = DisableAbleCommandHandler("integrate", integrate, pass_args=True)
ZEROES_HANDLER = DisableAbleCommandHandler("zeroes", zeroes, pass_args=True)
TANGENT_HANDLER = DisableAbleCommandHandler("tangent", tangent, pass_args=True)
AREA_HANDLER = DisableAbleCommandHandler("area", area, pass_args=True)
COS_HANDLER = DisableAbleCommandHandler("cos", cos, pass_args=True)
SIN_HANDLER = DisableAbleCommandHandler("sin", sin, pass_args=True)
TAN_HANDLER = DisableAbleCommandHandler("tan", tan, pass_args=True)
ARCCOS_HANDLER = DisableAbleCommandHandler("arccos", arccos, pass_args=True)
ARCSIN_HANDLER = DisableAbleCommandHandler("arcsin", arcsin, pass_args=True)
ARCTAN_HANDLER = DisableAbleCommandHandler("arctan", arctan, pass_args=True)
ABS_HANDLER = DisableAbleCommandHandler("abs", abs, pass_args=True)
LOG_HANDLER = DisableAbleCommandHandler("log", log, pass_args=True)
dispatcher.add_handler(SIMPLIFY_HANDLER)
dispatcher.add_handler(FACTOR_HANDLER)
dispatcher.add_handler(DERIVE_HANDLER)
dispatcher.add_handler(INTEGRATE_HANDLER)
dispatcher.add_handler(ZEROES_HANDLER)
dispatcher.add_handler(TANGENT_HANDLER)
dispatcher.add_handler(AREA_HANDLER)
dispatcher.add_handler(COS_HANDLER)
dispatcher.add_handler(SIN_HANDLER)
dispatcher.add_handler(TAN_HANDLER)
dispatcher.add_handler(ARCCOS_HANDLER)
dispatcher.add_handler(ARCSIN_HANDLER)
dispatcher.add_handler(ARCTAN_HANDLER)
dispatcher.add_handler(ABS_HANDLER)
dispatcher.add_handler(LOG_HANDLER)
__mod_name__ = "Math"
__command_list__ = ["math","factor","derive","integrate","zeroes","tangent","area","cos","sin","tan","arccos","arcsin","arctan","abs","log"]
__handlers__ = [
SIMPLIFY_HANDLER,FACTOR_HANDLER,DERIVE_HANDLER,INTEGRATE_HANDLER,TANGENT_HANDLER,ZEROES_HANDLER,AREA_HANDLER,COS_HANDLER,SIN_HANDLER,TAN_HANDLER,ARCCOS_HANDLER,ARCSIN_HANDLER,ARCTAN_HANDLER,ABS_HANDLER,LOG_HANDLER
]
| [((128, 19, 128, 78), 'emilia.modules.disable.DisableAbleCommandHandler', 'DisableAbleCommandHandler', (), '', False, 'from emilia.modules.disable import DisableAbleCommandHandler\n'), ((129, 17, 129, 76), 'emilia.modules.disable.DisableAbleCommandHandler', 'DisableAbleCommandHandler', (), '', False, 'from emilia.modules.disable import DisableAbleCommandHandler\n'), ((130, 17, 130, 76), 'emilia.modules.disable.DisableAbleCommandHandler', 'DisableAbleCommandHandler', (), '', False, 'from emilia.modules.disable import DisableAbleCommandHandler\n'), ((131, 20, 131, 85), 'emilia.modules.disable.DisableAbleCommandHandler', 'DisableAbleCommandHandler', (), '', False, 'from emilia.modules.disable import DisableAbleCommandHandler\n'), ((132, 17, 132, 76), 'emilia.modules.disable.DisableAbleCommandHandler', 'DisableAbleCommandHandler', (), '', False, 'from emilia.modules.disable import DisableAbleCommandHandler\n'), ((133, 18, 133, 79), 'emilia.modules.disable.DisableAbleCommandHandler', 'DisableAbleCommandHandler', (), '', False, 'from emilia.modules.disable import DisableAbleCommandHandler\n'), ((134, 15, 134, 70), 'emilia.modules.disable.DisableAbleCommandHandler', 'DisableAbleCommandHandler', (), '', False, 'from emilia.modules.disable import DisableAbleCommandHandler\n'), ((135, 14, 135, 67), 'emilia.modules.disable.DisableAbleCommandHandler', 'DisableAbleCommandHandler', (), '', False, 'from emilia.modules.disable import DisableAbleCommandHandler\n'), ((136, 14, 136, 67), 'emilia.modules.disable.DisableAbleCommandHandler', 'DisableAbleCommandHandler', (), '', False, 'from emilia.modules.disable import DisableAbleCommandHandler\n'), ((137, 14, 137, 67), 'emilia.modules.disable.DisableAbleCommandHandler', 'DisableAbleCommandHandler', (), '', False, 'from emilia.modules.disable import DisableAbleCommandHandler\n'), ((138, 17, 138, 76), 'emilia.modules.disable.DisableAbleCommandHandler', 'DisableAbleCommandHandler', (), '', False, 'from emilia.modules.disable import DisableAbleCommandHandler\n'), ((139, 17, 139, 76), 'emilia.modules.disable.DisableAbleCommandHandler', 'DisableAbleCommandHandler', (), '', False, 'from emilia.modules.disable import DisableAbleCommandHandler\n'), ((140, 17, 140, 76), 'emilia.modules.disable.DisableAbleCommandHandler', 'DisableAbleCommandHandler', (), '', False, 'from emilia.modules.disable import DisableAbleCommandHandler\n'), ((141, 14, 141, 67), 'emilia.modules.disable.DisableAbleCommandHandler', 'DisableAbleCommandHandler', (), '', False, 'from emilia.modules.disable import DisableAbleCommandHandler\n'), ((142, 14, 142, 67), 'emilia.modules.disable.DisableAbleCommandHandler', 'DisableAbleCommandHandler', (), '', False, 'from emilia.modules.disable import DisableAbleCommandHandler\n'), ((144, 0, 144, 40), 'emilia.dispatcher.add_handler', 'dispatcher.add_handler', ({(144, 23, 144, 39): 'SIMPLIFY_HANDLER'}, {}), '(SIMPLIFY_HANDLER)', False, 'from emilia import dispatcher\n'), ((145, 0, 145, 38), 'emilia.dispatcher.add_handler', 'dispatcher.add_handler', ({(145, 23, 145, 37): 'FACTOR_HANDLER'}, {}), '(FACTOR_HANDLER)', False, 'from emilia import dispatcher\n'), ((146, 0, 146, 38), 'emilia.dispatcher.add_handler', 'dispatcher.add_handler', ({(146, 23, 146, 37): 'DERIVE_HANDLER'}, {}), '(DERIVE_HANDLER)', False, 'from emilia import dispatcher\n'), ((147, 0, 147, 41), 'emilia.dispatcher.add_handler', 'dispatcher.add_handler', ({(147, 23, 147, 40): 'INTEGRATE_HANDLER'}, {}), '(INTEGRATE_HANDLER)', False, 'from emilia import dispatcher\n'), ((148, 0, 148, 38), 'emilia.dispatcher.add_handler', 'dispatcher.add_handler', ({(148, 23, 148, 37): 'ZEROES_HANDLER'}, {}), '(ZEROES_HANDLER)', False, 'from emilia import dispatcher\n'), ((149, 0, 149, 39), 'emilia.dispatcher.add_handler', 'dispatcher.add_handler', ({(149, 23, 149, 38): 'TANGENT_HANDLER'}, {}), '(TANGENT_HANDLER)', False, 'from emilia import dispatcher\n'), ((150, 0, 150, 36), 'emilia.dispatcher.add_handler', 'dispatcher.add_handler', ({(150, 23, 150, 35): 'AREA_HANDLER'}, {}), '(AREA_HANDLER)', False, 'from emilia import dispatcher\n'), ((151, 0, 151, 35), 'emilia.dispatcher.add_handler', 'dispatcher.add_handler', ({(151, 23, 151, 34): 'COS_HANDLER'}, {}), '(COS_HANDLER)', False, 'from emilia import dispatcher\n'), ((152, 0, 152, 35), 'emilia.dispatcher.add_handler', 'dispatcher.add_handler', ({(152, 23, 152, 34): 'SIN_HANDLER'}, {}), '(SIN_HANDLER)', False, 'from emilia import dispatcher\n'), ((153, 0, 153, 35), 'emilia.dispatcher.add_handler', 'dispatcher.add_handler', ({(153, 23, 153, 34): 'TAN_HANDLER'}, {}), '(TAN_HANDLER)', False, 'from emilia import dispatcher\n'), ((154, 0, 154, 38), 'emilia.dispatcher.add_handler', 'dispatcher.add_handler', ({(154, 23, 154, 37): 'ARCCOS_HANDLER'}, {}), '(ARCCOS_HANDLER)', False, 'from emilia import dispatcher\n'), ((155, 0, 155, 38), 'emilia.dispatcher.add_handler', 'dispatcher.add_handler', ({(155, 23, 155, 37): 'ARCSIN_HANDLER'}, {}), '(ARCSIN_HANDLER)', False, 'from emilia import dispatcher\n'), ((156, 0, 156, 38), 'emilia.dispatcher.add_handler', 'dispatcher.add_handler', ({(156, 23, 156, 37): 'ARCTAN_HANDLER'}, {}), '(ARCTAN_HANDLER)', False, 'from emilia import dispatcher\n'), ((157, 0, 157, 35), 'emilia.dispatcher.add_handler', 'dispatcher.add_handler', ({(157, 23, 157, 34): 'ABS_HANDLER'}, {}), '(ABS_HANDLER)', False, 'from emilia import dispatcher\n'), ((158, 0, 158, 35), 'emilia.dispatcher.add_handler', 'dispatcher.add_handler', ({(158, 23, 158, 34): 'LOG_HANDLER'}, {}), '(LOG_HANDLER)', False, 'from emilia import dispatcher\n')] |
matteobjornsson/serverless-rock-paper-scissors | services/IAm.py | 32b6f11644c59dc3bb159ee9e1118fed26a3983d | #
# Created on Thu Apr 22 2021
# Matteo Bjornsson
#
import boto3
from botocore.exceptions import ClientError
import logging
logging.basicConfig(filename="rps.log", level=logging.INFO)
iam_resource = boto3.resource("iam")
sts_client = boto3.client("sts")
def create_role(
iam_role_name: str, assume_role_policy_json: str, policy_arns: list
) -> iam_resource.Role:
"""
Create an IAM role with a given policy.
:param assume_role_policy_json: A json string that represents the assume
role policy defining what resources are allowed to assume the role.
:param policy_arns: a list of strings representing existing policy arns to
also attach to the role
:return: IAM role object
This method was adapted from the create_iam_role_for_lambda() method found here:
https://docs.aws.amazon.com/code-samples/latest/catalog/python-lambda-boto_client_examples-lambda_basics.py.html
"""
try:
role = iam_resource.create_role(
RoleName=iam_role_name,
AssumeRolePolicyDocument=assume_role_policy_json,
)
# wait for the creation to complete
iam_resource.meta.client.get_waiter("role_exists").wait(RoleName=iam_role_name)
# attach the additional supplied policies
for arn in policy_arns:
role.attach_policy(PolicyArn=arn)
except ClientError as error:
if error.response["Error"]["Code"] == "EntityAlreadyExists":
role = iam_resource.Role(iam_role_name)
logging.warning("The role %s already exists. Using it.", iam_role_name)
return role
else:
logging.error(error.response["Error"]["Message"])
logging.exception(
"Couldn't create role %s or attach policy %s.",
iam_role_name,
str(policy_arns),
)
raise
else:
logging.info("Created IAM role %s.", role.name)
logging.info("Attached policies %s to role %s.", policy_arns, role.name)
return role
def create_policy(policy_name: str, policy_json: str) -> iam_resource.Policy:
"""
Create an IAM policy of given name and json description.
Policies define permissions in AWS and can be associated with IAM roles.
:param policy_json: just be a valid policy json string
:return: IAM Policy object
"""
try:
policy = iam_resource.create_policy(
PolicyName=policy_name, PolicyDocument=policy_json
)
except ClientError as error:
if error.response["Error"]["Code"] == "EntityAlreadyExists":
policy = get_policy_by_name(policy_name)
logging.warning("The policy %s already exists. Using it.", policy.arn)
return policy
else:
logging.error(error.response["Error"]["Message"])
logging.exception("Couldn't create policy %s", policy_name)
raise
else:
logging.info("Created Policy '%s'", policy_name)
return policy
def get_policy_by_name(policy_name: str) -> iam_resource.Policy:
"""
Get an existing policy by name.
:return: IAM Policy object
"""
# sts provides the account number of the current credentials
account_id = sts_client.get_caller_identity()["Account"]
# policy arns consist of an account id and policy name
policy_arn = f"arn:aws:iam::{account_id}:policy/{policy_name}"
# policies are created in the Python SDK via their arn
policy = iam_resource.Policy(policy_arn)
return policy
def delete_role(iam_role) -> dict:
"""
Delete a role.
:param iam_role: this parameter is an IAM role object, such as returned
by create_role()
"""
try:
# remove all policies before deleting role
for policy in iam_role.attached_policies.all():
policy.detach_role(RoleName=iam_role.name)
response = iam_role.delete()
except ClientError as error:
logging.error(error.response["Error"]["Message"])
logging.error("Couldn't delete role %s", iam_role.name)
else:
logging.info("Deleted role '%s'", iam_role.name)
return response
def delete_policy(iam_policy) -> dict:
"""
Delete a role.
:param iam_policy: this parameter is an IAM policy object, such as returned
by create_policy()
"""
try:
response = iam_policy.delete()
except ClientError as error:
logging.error(error.response["Error"]["Message"])
logging.error("Couldn't delete policy %s", iam_policy.arn)
else:
logging.info("Deleted policy '%s'", iam_policy.arn)
return response
if __name__ == "__main__":
# brief functionality test with delete() cleanup at end
policy_json_file = "./policy/lambda_policy.json"
with open(policy_json_file) as file:
policy_json = file.read()
policy_name = "test_policy"
policy = create_policy(policy_name, policy_json)
print("new policy arn: ", policy.arn)
policy.delete()
| [((9, 0, 9, 59), 'logging.basicConfig', 'logging.basicConfig', (), '', False, 'import logging\n'), ((11, 15, 11, 36), 'boto3.resource', 'boto3.resource', ({(11, 30, 11, 35): '"""iam"""'}, {}), "('iam')", False, 'import boto3\n'), ((12, 13, 12, 32), 'boto3.client', 'boto3.client', ({(12, 26, 12, 31): '"""sts"""'}, {}), "('sts')", False, 'import boto3\n'), ((55, 8, 55, 55), 'logging.info', 'logging.info', ({(55, 21, 55, 43): '"""Created IAM role %s."""', (55, 45, 55, 54): 'role.name'}, {}), "('Created IAM role %s.', role.name)", False, 'import logging\n'), ((56, 8, 56, 80), 'logging.info', 'logging.info', ({(56, 21, 56, 55): '"""Attached policies %s to role %s."""', (56, 57, 56, 68): 'policy_arns', (56, 70, 56, 79): 'role.name'}, {}), "('Attached policies %s to role %s.', policy_arns, role.name)", False, 'import logging\n'), ((81, 8, 81, 56), 'logging.info', 'logging.info', ({(81, 21, 81, 42): '"""Created Policy \'%s\'"""', (81, 44, 81, 55): 'policy_name'}, {}), '("Created Policy \'%s\'", policy_name)', False, 'import logging\n'), ((114, 8, 114, 56), 'logging.info', 'logging.info', ({(114, 21, 114, 40): '"""Deleted role \'%s\'"""', (114, 42, 114, 55): 'iam_role.name'}, {}), '("Deleted role \'%s\'", iam_role.name)', False, 'import logging\n'), ((130, 8, 130, 59), 'logging.info', 'logging.info', ({(130, 21, 130, 42): '"""Deleted policy \'%s\'"""', (130, 44, 130, 58): 'iam_policy.arn'}, {}), '("Deleted policy \'%s\'", iam_policy.arn)', False, 'import logging\n'), ((111, 8, 111, 57), 'logging.error', 'logging.error', ({(111, 22, 111, 56): "error.response['Error']['Message']"}, {}), "(error.response['Error']['Message'])", False, 'import logging\n'), ((112, 8, 112, 63), 'logging.error', 'logging.error', ({(112, 22, 112, 47): '"""Couldn\'t delete role %s"""', (112, 49, 112, 62): 'iam_role.name'}, {}), '("Couldn\'t delete role %s", iam_role.name)', False, 'import logging\n'), ((127, 8, 127, 57), 'logging.error', 'logging.error', ({(127, 22, 127, 56): "error.response['Error']['Message']"}, {}), "(error.response['Error']['Message'])", False, 'import logging\n'), ((128, 8, 128, 66), 'logging.error', 'logging.error', ({(128, 22, 128, 49): '"""Couldn\'t delete policy %s"""', (128, 51, 128, 65): 'iam_policy.arn'}, {}), '("Couldn\'t delete policy %s", iam_policy.arn)', False, 'import logging\n'), ((44, 12, 44, 83), 'logging.warning', 'logging.warning', ({(44, 28, 44, 67): '"""The role %s already exists. Using it."""', (44, 69, 44, 82): 'iam_role_name'}, {}), "('The role %s already exists. Using it.', iam_role_name)", False, 'import logging\n'), ((47, 12, 47, 61), 'logging.error', 'logging.error', ({(47, 26, 47, 60): "error.response['Error']['Message']"}, {}), "(error.response['Error']['Message'])", False, 'import logging\n'), ((74, 12, 74, 82), 'logging.warning', 'logging.warning', ({(74, 28, 74, 69): '"""The policy %s already exists. Using it."""', (74, 71, 74, 81): 'policy.arn'}, {}), "('The policy %s already exists. Using it.', policy.arn)", False, 'import logging\n'), ((77, 12, 77, 61), 'logging.error', 'logging.error', ({(77, 26, 77, 60): "error.response['Error']['Message']"}, {}), "(error.response['Error']['Message'])", False, 'import logging\n'), ((78, 12, 78, 71), 'logging.exception', 'logging.exception', ({(78, 30, 78, 57): '"""Couldn\'t create policy %s"""', (78, 59, 78, 70): 'policy_name'}, {}), '("Couldn\'t create policy %s", policy_name)', False, 'import logging\n')] |
babatana/stograde | stograde/common/run_status.py | c1c447e99c44c23cef9dd857e669861f3708ae77 | from enum import auto, Enum
class RunStatus(Enum):
SUCCESS = auto()
CALLED_PROCESS_ERROR = auto()
FILE_NOT_FOUND = auto()
PROCESS_LOOKUP_ERROR = auto()
TIMEOUT_EXPIRED = auto()
| [((5, 14, 5, 20), 'enum.auto', 'auto', ({}, {}), '()', False, 'from enum import auto, Enum\n'), ((6, 27, 6, 33), 'enum.auto', 'auto', ({}, {}), '()', False, 'from enum import auto, Enum\n'), ((7, 21, 7, 27), 'enum.auto', 'auto', ({}, {}), '()', False, 'from enum import auto, Enum\n'), ((8, 27, 8, 33), 'enum.auto', 'auto', ({}, {}), '()', False, 'from enum import auto, Enum\n'), ((9, 22, 9, 28), 'enum.auto', 'auto', ({}, {}), '()', False, 'from enum import auto, Enum\n')] |
shenghuiliuu/recsys | recsys/__init__.py | d706d1ae2558816c1e11ca790baeb7748200b404 |
__all__ = ['cross_validation',
'metrics',
'datasets',
'recommender']
| [] |
CostanzoPablo/audiomate | audiomate/annotations/label_list.py | 080402eadaa81f77f64c8680510a2de64bc18e74 | import collections
import copy
import intervaltree
from .label import Label
class LabelList:
"""
Represents a list of labels which describe an utterance.
An utterance can have multiple label-lists.
Args:
idx (str): An unique identifier for the label-list
within a corpus for one utterance.
labels (list): The list containing the
:py:class:`audiomate.annotations.Label`.
Attributes:
utterance (Utterance): The utterance this label-list is belonging to.
label_tree (IntervalTree): The interval-tree storing the labels.
Example:
>>> label_list = LabelList(idx='transcription', labels=[
>>> Label('this', 0, 2),
>>> Label('is', 2, 4),
>>> Label('timmy', 4, 8)
>>> ])
"""
__slots__ = ['idx', 'label_tree', 'utterance']
def __init__(self, idx='default', labels=None):
self.idx = idx
self.utterance = None
self.label_tree = intervaltree.IntervalTree()
if labels is not None:
self.update(labels)
def __eq__(self, other):
data_this = (self.idx, self.label_tree)
data_other = (other.idx, other.label_tree)
return data_this == data_other
def __iter__(self):
for interval in self.label_tree:
yield interval.data
def __len__(self):
return self.label_tree.__len__()
def __copy__(self):
# utterance is ignored intentionally,
# since it is kind of a weak ref
return LabelList(
idx=self.idx,
labels=[iv.data for iv in self.label_tree]
)
def __deepcopy__(self, memo):
# utterance is ignored intentionally,
# since it is kind of a weak ref
return LabelList(
idx=self.idx,
labels=copy.deepcopy([iv.data for iv in self.label_tree], memo)
)
@property
def labels(self):
""" Return list of labels. """
return list(self)
@property
def start(self):
""" Return start of the earliest starting label (lower bound). """
return self.label_tree.begin()
@property
def end(self):
""" Return end of the lastly ending label (upper bound). """
return self.label_tree.end()
@property
def total_length(self):
"""
Return the cumulative length of all labels
(Number of characters).
"""
return sum(label.length for label in self.labels)
#
# Alteration
#
def add(self, label):
"""
Add a label to the end of the list.
Args:
label (Label): The label to add.
"""
label.label_list = self
self.label_tree.addi(label.start, label.end, label)
def addl(self, value, start=0.0, end=float('inf')):
""" Shortcut for ``add(Label(value, start, end))``. """
self.add(Label(value, start=start, end=end))
def update(self, labels):
"""
Add a list of labels to the end of the list.
Args:
labels (list): Labels to add.
"""
ivs = []
for label in labels:
label.label_list = self
ivs.append(intervaltree.Interval(label.start, label.end, label))
self.label_tree.update(ivs)
def apply(self, fn):
"""
Apply the given function `fn` to every label in this label list.
`fn` is a function of one argument that receives the current label
which can then be edited in place.
Args:
fn (func): Function to apply to every label
Example:
>>> ll = LabelList(labels=[
... Label('a_label', 1.0, 2.0),
... Label('another_label', 2.0, 3.0)
... ])
>>> def shift_labels(label):
... label.start += 1.0
... label.end += 1.0
...
>>> ll.apply(shift_labels)
>>> ll.labels
[Label(a_label, 2.0, 3.0), Label(another_label, 3.0, 4.0)]
"""
for label in self.labels:
fn(label)
def merge_overlaps(self, threshold=0.0):
"""
Merge overlapping labels with the same value.
Two labels are considered overlapping,
if ``l2.start - l1.end < threshold``.
Args:
threshold (float): Maximal distance between two labels
to be considered as overlapping.
(default: 0.0)
Example:
>>> ll = LabelList(labels=[
... Label('a_label', 1.0, 2.0),
... Label('a_label', 1.5, 2.7),
... Label('b_label', 1.0, 2.0),
... ])
>>> ll.merge_overlapping_labels()
>>> ll.labels
[
Label('a_label', 1.0, 2.7),
Label('b_label', 1.0, 2.0),
]
"""
updated_labels = []
all_intervals = self.label_tree.copy()
# recursivly find a group of overlapping labels with the same value
def recursive_overlaps(interval):
range_start = interval.begin - threshold
range_end = interval.end + threshold
direct_overlaps = all_intervals.overlap(range_start, range_end)
all_overlaps = [interval]
all_intervals.discard(interval)
for overlap in direct_overlaps:
if overlap.data.value == interval.data.value:
all_overlaps.extend(recursive_overlaps(overlap))
return all_overlaps
# For every remaining interval
# - Find overlapping intervals recursively
# - Remove them
# - Create a concatenated new label
while not all_intervals.is_empty():
next_interval = list(all_intervals)[0]
overlapping = recursive_overlaps(next_interval)
ov_start = float('inf')
ov_end = 0.0
ov_value = next_interval.data.value
for overlap in overlapping:
ov_start = min(ov_start, overlap.begin)
ov_end = max(ov_end, overlap.end)
all_intervals.discard(overlap)
updated_labels.append(Label(
ov_value,
ov_start,
ov_end
))
# Replace the old labels with the updated ones
self.label_tree.clear()
self.update(updated_labels)
#
# Statistics
#
def label_total_duration(self):
"""
Return for each distinct label value the total duration of
all occurrences.
Returns:
dict: A dictionary containing for every label-value (key)
the total duration in seconds (value).
Example:
>>> ll = LabelList(labels=[
>>> Label('a', 3, 5),
>>> Label('b', 5, 8),
>>> Label('a', 8, 10),
>>> Label('b', 10, 14),
>>> Label('a', 15, 18.5)
>>> ])
>>> ll.label_total_duration()
{'a': 7.5 'b': 7.0}
"""
durations = collections.defaultdict(float)
for label in self:
durations[label.value] += label.duration
return durations
def label_values(self):
"""
Return a list of all occuring label values.
Returns:
list: Lexicographically sorted list (str) of label values.
Example:
>>> ll = LabelList(labels=[
>>> Label('a', 3.2, 4.5),
>>> Label('b', 5.1, 8.9),
>>> Label('c', 7.2, 10.5),
>>> Label('d', 10.5, 14),
>>> Label('d', 15, 18)
>>> ])
>>> ll.label_values()
['a', 'b', 'c', 'd']
"""
all_labels = {l.value for l in self}
return sorted(all_labels)
def label_count(self):
"""
Return for each label the number of occurrences within the list.
Returns:
dict: A dictionary containing for every label-value (key)
the number of occurrences (value).
Example:
>>> ll = LabelList(labels=[
>>> Label('a', 3.2, 4.5),
>>> Label('b', 5.1, 8.9),
>>> Label('a', 7.2, 10.5),
>>> Label('b', 10.5, 14),
>>> Label('a', 15, 18)
>>> ])
>>> ll.label_count()
{'a': 3 'b': 2}
"""
occurrences = collections.defaultdict(int)
for label in self:
occurrences[label.value] += 1
return occurrences
def all_tokens(self, delimiter=' '):
"""
Return a list of all tokens occurring in the label-list.
Args:
delimiter (str): The delimiter used to split labels into tokens.
See :meth:`audiomate.annotations.Label.tokenized`
Returns:
:class:`set`: A set of distinct tokens.
"""
tokens = set()
for label in self:
tokens = tokens.union(set(label.tokenized(delimiter=delimiter)))
return tokens
#
# Query Label Values
#
def join(self, delimiter=' ', overlap_threshold=0.1):
"""
Return a string with all labels concatenated together.
The order of the labels is defined by the start of the label.
If the overlapping between two labels is greater than
``overlap_threshold``, an Exception is thrown.
Args:
delimiter (str): A string to join two consecutive labels.
overlap_threshold (float): Maximum overlap between two
consecutive labels.
Returns:
str: A string with all labels concatenated together.
Example:
>>> ll = LabelList(idx='some', labels=[
>>> Label('a', start=0, end=4),
>>> Label('b', start=3.95, end=6.0),
>>> Label('c', start=7.0, end=10.2),
>>> Label('d', start=10.3, end=14.0)
>>> ])
>>> ll.join(' - ')
'a - b - c - d'
"""
sorted_by_start = sorted(self.labels)
concat_values = []
last_label_end = None
for label in sorted_by_start:
if last_label_end is None or (last_label_end - label.start < overlap_threshold and last_label_end > 0):
concat_values.append(label.value)
last_label_end = label.end
else:
raise ValueError('Labels overlap, not able to define the correct order')
return delimiter.join(concat_values)
def tokenized(self, delimiter=' ', overlap_threshold=0.1):
"""
Return a ordered list of tokens based on all labels.
Joins all token from all labels (``label.tokenized()```).
If the overlapping between two labels is greater than
``overlap_threshold``, an Exception is thrown.
Args:
delimiter (str): The delimiter used to split labels into tokens.
(default: space)
overlap_threshold (float): Maximum overlap between two
consecutive labels.
Returns:
str: A list containing tokens of all labels ordered according
to the label order.
Example:
>>> ll = LabelList(idx='some', labels=[
>>> Label('a d q', start=0, end=4),
>>> Label('b', start=3.95, end=6.0),
>>> Label('c a', start=7.0, end=10.2),
>>> Label('f g', start=10.3, end=14.0)
>>> ])
>>> ll.tokenized(delimiter=' ', overlap_threshold=0.1)
['a', 'd', 'q', 'b', 'c', 'a', 'f', 'g']
"""
sorted_by_start = sorted(self.labels)
tokens = []
last_label_end = None
for label in sorted_by_start:
if last_label_end is None or (last_label_end - label.start < overlap_threshold and last_label_end > 0):
tokens.extend(label.tokenized(delimiter=delimiter))
last_label_end = label.end
else:
raise ValueError('Labels overlap, not able to define the correct order')
return tokens
#
# Restructuring
#
def separated(self):
"""
Create a separate Label-List for every distinct label-value.
Returns:
dict: A dictionary with distinct label-values as keys. Every value
is a LabelList containing only labels with the same value.
Example:
>>> ll = LabelList(idx='some', labels=[
>>> Label('a', start=0, end=4),
>>> Label('b', start=3.95, end=6.0),
>>> Label('a', start=7.0, end=10.2),
>>> Label('b', start=10.3, end=14.0)
>>> ])
>>> s = ll.separate()
>>> s['a'].labels
[Label('a', start=0, end=4), Label('a', start=7.0, end=10.2)]
>>> s['b'].labels
[Label('b', start=3.95, end=6.0), Label('b', start=10.3, end=14.0)]
"""
separated_lls = collections.defaultdict(LabelList)
for label in self.labels:
separated_lls[label.value].add(label)
for ll in separated_lls.values():
ll.idx = self.idx
return separated_lls
def labels_in_range(self, start, end, fully_included=False):
"""
Return a list of labels, that are within the given range.
Also labels that only overlap are included.
Args:
start(float): Start-time in seconds.
end(float): End-time in seconds.
fully_included(bool): If ``True``, only labels fully included
in the range are returned. Otherwise
also overlapping ones are returned.
(default ``False``)
Returns:
list: List of labels in the range.
Example:
>>> ll = LabelList(labels=[
>>> Label('a', 3.2, 4.5),
>>> Label('b', 5.1, 8.9),
>>> Label('c', 7.2, 10.5),
>>> Label('d', 10.5, 14)
>>>])
>>> ll.labels_in_range(6.2, 10.1)
[Label('b', 5.1, 8.9), Label('c', 7.2, 10.5)]
"""
if fully_included:
intervals = self.label_tree.envelop(start, end)
else:
intervals = self.label_tree.overlap(start, end)
return [iv.data for iv in intervals]
def ranges(self, yield_ranges_without_labels=False, include_labels=None):
"""
Generate all ranges of the label-list. A range is defined
as a part of the label-list for which the same labels are defined.
Args:
yield_ranges_without_labels(bool): If True also yields ranges for
which no labels are defined.
include_labels(list): If not empty, only the label values in
the list will be considered.
Returns:
generator: A generator which yields one range
(tuple start/end/list-of-labels) at a time.
Example:
>>> ll = LabelList(labels=[
>>> Label('a', 3.2, 4.5),
>>> Label('b', 5.1, 8.9),
>>> Label('c', 7.2, 10.5),
>>> Label('d', 10.5, 14)
>>>])
>>> ranges = ll.ranges()
>>> next(ranges)
(3.2, 4.5, [ < audiomate.annotations.Label at 0x1090527c8 > ])
>>> next(ranges)
(4.5, 5.1, [])
>>> next(ranges)
(5.1, 7.2, [ < audiomate.annotations.label.Label at 0x1090484c8 > ])
"""
tree_copy = self.label_tree.copy()
# Remove labels not included
if include_labels is not None:
for iv in list(tree_copy):
if iv.data.value not in include_labels:
tree_copy.remove(iv)
def reduce(x, y):
x.append(y)
return x
# Split labels when overlapping and merge equal ranges to a list of labels
tree_copy.split_overlaps()
tree_copy.merge_equals(data_reducer=reduce, data_initializer=[])
intervals = sorted(tree_copy)
last_end = intervals[0].begin
# yield range by range
for iv in intervals:
# yield an empty range if necessary
if yield_ranges_without_labels and iv.begin > last_end:
yield (last_end, iv.begin, [])
yield (iv.begin, iv.end, iv.data)
last_end = iv.end
def split(self, cutting_points, shift_times=False, overlap=0.0):
"""
Split the label-list into x parts and return them as new label-lists.
x is defined by the number of cutting-points
(``x == len(cutting_points) + 1``).
The result is a list of label-lists corresponding to each part.
Label-list 0 contains labels between ``0`` and ``cutting_points[0]``.
Label-list 1 contains labels between ``cutting_points[0]`` and
``cutting_points[1]``. And so on.
Args:
cutting_points(list): List of floats defining the points in seconds,
where the label-list is splitted.
shift_times(bool): If True, start and end-time are shifted in
splitted label-lists. So the start is relative
to the cutting point and not to the beginning
of the original label-list.
overlap(float): Amount of overlap in seconds. This amount is
subtracted from a start-cutting-point, and added
to a end-cutting-point.
Returns:
list: A list of of: class: `audiomate.annotations.LabelList`.
Example:
>>> ll = LabelList(labels=[
>>> Label('a', 0, 5),
>>> Label('b', 5, 10),
>>> Label('c', 11, 15),
>>>])
>>>
>>> res = ll.split([4.1, 8.9, 12.0])
>>> len(res)
4
>>> res[0].labels
[Label('a', 0.0, 4.1)]
>>> res[1].labels
[
Label('a', 4.1, 5.0),
Label('b', 5.0, 8.9)
]
>>> res[2].labels
[
Label('b', 8.9, 10.0),
Label('c', 11.0, 12.0)
]
>>> res[3].labels
[Label('c', 12.0, 15.0)]
If ``shift_times = True``, the times are adjusted to be relative
to the cutting-points for every label-list but the first.
>>> ll = LabelList(labels=[
>>> Label('a', 0, 5),
>>> Label('b', 5, 10),
>>>])
>>>
>>> res = ll.split([4.6])
>>> len(res)
4
>>> res[0].labels
[Label('a', 0.0, 4.6)]
>>> res[1].labels
[
Label('a', 0.0, 0.4),
Label('b', 0.4, 5.4)
]
"""
if len(cutting_points) == 0:
raise ValueError('At least one cutting-point is needed!')
# we have to loop in sorted order
cutting_points = sorted(cutting_points)
splits = []
iv_start = 0.0
for i in range(len(cutting_points) + 1):
if i < len(cutting_points):
iv_end = cutting_points[i]
else:
iv_end = float('inf')
# get all intervals intersecting range
intervals = self.label_tree.overlap(
iv_start - overlap,
iv_end + overlap
)
cp_splits = LabelList(idx=self.idx)
# Extract labels from intervals with updated times
for iv in intervals:
label = copy.deepcopy(iv.data)
label.start = max(0, iv_start - overlap, label.start)
label.end = min(iv_end + overlap, label.end)
if shift_times:
orig_start = max(0, iv_start - overlap)
label.start -= orig_start
label.end -= orig_start
cp_splits.add(label)
splits.append(cp_splits)
iv_start = iv_end
return splits
#
# Convenience Constructors
#
@classmethod
def create_single(cls, value, idx='default'):
"""
Create a label-list with a single label
containing the given value.
"""
return LabelList(idx=idx, labels=[
Label(value=value)
])
@classmethod
def with_label_values(cls, values, idx='default'):
"""
Create a new label-list containing labels with the given values.
All labels will have default start/end values of 0 and ``inf``.
Args:
values(list): List of values(str) that should be created and
appended to the label-list.
idx(str): The idx of the label-list.
Returns:
(LabelList): New label-list.
Example:
>>> ll = LabelList.with_label_values(['a', 'x', 'z'], idx='letters')
>>> ll.idx
'letters'
>>> ll.labels
[
Label('a', 0, inf),
Label('x', 0, inf),
Label('z', 0, inf),
]
"""
ll = LabelList(idx=idx)
for label_value in values:
ll.add(Label(label_value))
return ll
| [((38, 26, 38, 53), 'intervaltree.IntervalTree', 'intervaltree.IntervalTree', ({}, {}), '()', False, 'import intervaltree\n'), ((247, 20, 247, 50), 'collections.defaultdict', 'collections.defaultdict', ({(247, 44, 247, 49): 'float'}, {}), '(float)', False, 'import collections\n'), ((296, 22, 296, 50), 'collections.defaultdict', 'collections.defaultdict', ({(296, 46, 296, 49): 'int'}, {}), '(int)', False, 'import collections\n'), ((430, 24, 430, 58), 'collections.defaultdict', 'collections.defaultdict', ({(430, 48, 430, 57): 'LabelList'}, {}), '(LabelList)', False, 'import collections\n'), ((68, 19, 68, 75), 'copy.deepcopy', 'copy.deepcopy', ({(68, 33, 68, 68): '[iv.data for iv in self.label_tree]', (68, 70, 68, 74): 'memo'}, {}), '([iv.data for iv in self.label_tree], memo)', False, 'import copy\n'), ((123, 23, 123, 75), 'intervaltree.Interval', 'intervaltree.Interval', ({(123, 45, 123, 56): 'label.start', (123, 58, 123, 67): 'label.end', (123, 69, 123, 74): 'label'}, {}), '(label.start, label.end, label)', False, 'import intervaltree\n'), ((631, 24, 631, 46), 'copy.deepcopy', 'copy.deepcopy', ({(631, 38, 631, 45): 'iv.data'}, {}), '(iv.data)', False, 'import copy\n')] |
RubyMarsden/Crayfish | src/views/age_results_widget.py | 33bbb1248beec2fc40eee59e462711dd8cbc33da | import matplotlib
from PyQt5.QtCore import Qt
from PyQt5.QtWidgets import QHBoxLayout, QDialog, QPushButton, QWidget, QVBoxLayout, QLabel
matplotlib.use('QT5Agg')
import matplotlib.pyplot as plt
from models.data_key import DataKey
from utils import ui_utils
class AgeResultsWidget(QWidget):
def __init__(self, results_dialog):
QWidget.__init__(self)
self.results_dialog = results_dialog
layout = QHBoxLayout()
layout.addLayout(self._create_widget())
self.setLayout(layout)
results_dialog.sample_tree.tree.currentItemChanged.connect(lambda i, j: self.replot_graph())
results_dialog.configuration_changed.connect(self.replot_graph)
def _create_widget(self):
layout = QVBoxLayout()
layout.addWidget(QLabel("Sample and spot name"))
layout.addWidget(self._create_age_graph_and_point_selection())
return layout
def _create_age_graph_and_point_selection(self):
graph_and_points = QWidget()
layout = QVBoxLayout()
fig = plt.figure()
self.axes = plt.axes()
graph_widget, self.canvas = ui_utils.create_figure_widget(fig, self)
layout.addWidget(graph_widget)
graph_and_points.setLayout(layout)
return graph_and_points
###############
### Actions ###
###############
def replot_graph(self):
current_spot = self.results_dialog.sample_tree.current_spot()
config = self.results_dialog.configuration_widget.current_config
if config and current_spot:
self.plot_cps_graph(current_spot, config)
def plot_cps_graph(self, spot, config):
axis = self.axes
axis.clear()
if spot is None:
return
axis.spines['top'].set_visible(False)
axis.spines['right'].set_visible(False)
xs = []
ys = []
errors = []
if DataKey.AGES not in spot.data[config]:
# TODO plot words on graph
return
ages = spot.data[config][DataKey.AGES]
if len(ages) != 0:
for i, age in enumerate(ages):
if isinstance(age, str):
continue
x = i + 1
y, dy = age
xs.append(x)
if y is None:
ys.append(0)
errors.append(0)
else:
ys.append(y)
errors.append(dy)
else:
# TODO plot some text
return
weighted_age, age_st_dev = spot.data[config][DataKey.WEIGHTED_AGE]
if isinstance(weighted_age, str):
string = "No weighted age"
else:
string = f"Weighted age: {weighted_age:.0f}, 1σ: {age_st_dev:.0f}"
axis.errorbar(xs, ys, yerr=errors, linestyle="none", marker='o')
axis.text(0.5, 1, string, transform=axis.transAxes, horizontalalignment="center")
axis.set_xlabel("Scan number")
axis.set_ylabel("Age (ka)")
self.canvas.draw()
| [((5, 0, 5, 24), 'matplotlib.use', 'matplotlib.use', ({(5, 15, 5, 23): '"""QT5Agg"""'}, {}), "('QT5Agg')", False, 'import matplotlib\n'), ((14, 8, 14, 30), 'PyQt5.QtWidgets.QWidget.__init__', 'QWidget.__init__', ({(14, 25, 14, 29): 'self'}, {}), '(self)', False, 'from PyQt5.QtWidgets import QHBoxLayout, QDialog, QPushButton, QWidget, QVBoxLayout, QLabel\n'), ((18, 17, 18, 30), 'PyQt5.QtWidgets.QHBoxLayout', 'QHBoxLayout', ({}, {}), '()', False, 'from PyQt5.QtWidgets import QHBoxLayout, QDialog, QPushButton, QWidget, QVBoxLayout, QLabel\n'), ((26, 17, 26, 30), 'PyQt5.QtWidgets.QVBoxLayout', 'QVBoxLayout', ({}, {}), '()', False, 'from PyQt5.QtWidgets import QHBoxLayout, QDialog, QPushButton, QWidget, QVBoxLayout, QLabel\n'), ((34, 27, 34, 36), 'PyQt5.QtWidgets.QWidget', 'QWidget', ({}, {}), '()', False, 'from PyQt5.QtWidgets import QHBoxLayout, QDialog, QPushButton, QWidget, QVBoxLayout, QLabel\n'), ((35, 17, 35, 30), 'PyQt5.QtWidgets.QVBoxLayout', 'QVBoxLayout', ({}, {}), '()', False, 'from PyQt5.QtWidgets import QHBoxLayout, QDialog, QPushButton, QWidget, QVBoxLayout, QLabel\n'), ((37, 14, 37, 26), 'matplotlib.pyplot.figure', 'plt.figure', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n'), ((38, 20, 38, 30), 'matplotlib.pyplot.axes', 'plt.axes', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n'), ((40, 36, 40, 76), 'utils.ui_utils.create_figure_widget', 'ui_utils.create_figure_widget', ({(40, 66, 40, 69): 'fig', (40, 71, 40, 75): 'self'}, {}), '(fig, self)', False, 'from utils import ui_utils\n'), ((27, 25, 27, 55), 'PyQt5.QtWidgets.QLabel', 'QLabel', ({(27, 32, 27, 54): '"""Sample and spot name"""'}, {}), "('Sample and spot name')", False, 'from PyQt5.QtWidgets import QHBoxLayout, QDialog, QPushButton, QWidget, QVBoxLayout, QLabel\n')] |
EnergyModels/OCAES | examples/single_run/ocaes_single_run.py | d848d9fa621767e036824110de87450d524b7687 | import pandas as pd
from OCAES import ocaes
# ----------------------
# create and run model
# ----------------------
data = pd.read_csv('timeseries_inputs_2019.csv')
inputs = ocaes.get_default_inputs()
# inputs['C_well'] = 5000.0
# inputs['X_well'] = 50.0
# inputs['L_well'] = 50.0
# inputs['X_cmp'] = 0
# inputs['X_exp'] = 0
model = ocaes(data, inputs)
df, s = model.get_full_results()
revenue, LCOE, COVE, avoided_emissions = model.post_process(s)
s['revenue'] = revenue
s['LCOE'] = LCOE
s['COVE'] = COVE
s['avoided_emissions'] = avoided_emissions
df.to_csv('results_timeseries.csv')
s.to_csv('results_values.csv')
print(model.calculate_LCOE(s))
# ----------------------
# create plots using built-in functions
# ----------------------
model.plot_overview()
model.plot_power_energy()
| [((7, 7, 7, 48), 'pandas.read_csv', 'pd.read_csv', ({(7, 19, 7, 47): '"""timeseries_inputs_2019.csv"""'}, {}), "('timeseries_inputs_2019.csv')", True, 'import pandas as pd\n'), ((8, 9, 8, 35), 'OCAES.ocaes.get_default_inputs', 'ocaes.get_default_inputs', ({}, {}), '()', False, 'from OCAES import ocaes\n'), ((14, 8, 14, 27), 'OCAES.ocaes', 'ocaes', ({(14, 14, 14, 18): 'data', (14, 20, 14, 26): 'inputs'}, {}), '(data, inputs)', False, 'from OCAES import ocaes\n')] |
am-ivanov/dace | tests/transformations/local_storage_test.py | c35f0b3cecc04a2c9fb668bd42a72045891e7a42 | import unittest
import dace
import numpy as np
from dace.transformation.dataflow import MapTiling, OutLocalStorage
N = dace.symbol('N')
@dace.program
def arange():
out = np.ndarray([N], np.int32)
for i in dace.map[0:N]:
with dace.tasklet:
o >> out[i]
o = i
return out
class LocalStorageTests(unittest.TestCase):
def test_even(self):
sdfg = arange.to_sdfg()
sdfg.apply_transformations([MapTiling, OutLocalStorage],
options=[{
'tile_sizes': [8]
}, {}])
self.assertTrue(
np.array_equal(sdfg(N=16), np.arange(16, dtype=np.int32)))
def test_uneven(self):
# For testing uneven decomposition, use longer buffer and ensure
# it's not filled over
output = np.ones(20, np.int32)
sdfg = arange.to_sdfg()
sdfg.apply_transformations([MapTiling, OutLocalStorage],
options=[{
'tile_sizes': [5]
}, {}])
dace.propagate_memlets_sdfg(sdfg)
sdfg(N=16, __return=output)
self.assertTrue(
np.array_equal(output[:16], np.arange(16, dtype=np.int32)))
self.assertTrue(np.array_equal(output[16:], np.ones(4, np.int32)))
if __name__ == '__main__':
unittest.main()
| [((6, 4, 6, 20), 'dace.symbol', 'dace.symbol', ({(6, 16, 6, 19): '"""N"""'}, {}), "('N')", False, 'import dace\n'), ((11, 10, 11, 35), 'numpy.ndarray', 'np.ndarray', ({(11, 21, 11, 24): '[N]', (11, 26, 11, 34): 'np.int32'}, {}), '([N], np.int32)', True, 'import numpy as np\n'), ((46, 4, 46, 19), 'unittest.main', 'unittest.main', ({}, {}), '()', False, 'import unittest\n'), ((32, 17, 32, 38), 'numpy.ones', 'np.ones', ({(32, 25, 32, 27): '20', (32, 29, 32, 37): 'np.int32'}, {}), '(20, np.int32)', True, 'import numpy as np\n'), ((38, 8, 38, 41), 'dace.propagate_memlets_sdfg', 'dace.propagate_memlets_sdfg', ({(38, 36, 38, 40): 'sdfg'}, {}), '(sdfg)', False, 'import dace\n'), ((27, 39, 27, 68), 'numpy.arange', 'np.arange', (), '', True, 'import numpy as np\n'), ((41, 40, 41, 69), 'numpy.arange', 'np.arange', (), '', True, 'import numpy as np\n'), ((42, 52, 42, 72), 'numpy.ones', 'np.ones', ({(42, 60, 42, 61): '(4)', (42, 63, 42, 71): 'np.int32'}, {}), '(4, np.int32)', True, 'import numpy as np\n')] |
jayvdb/astropy | astropy/io/fits/hdu/streaming.py | bc6d8f106dd5b60bf57a8e6e29c4e2ae2178991f | # Licensed under a 3-clause BSD style license - see PYFITS.rst
import gzip
import os
from .base import _BaseHDU, BITPIX2DTYPE
from .hdulist import HDUList
from .image import PrimaryHDU
from astropy.io.fits.file import _File
from astropy.io.fits.header import _pad_length
from astropy.io.fits.util import fileobj_name
class StreamingHDU:
"""
A class that provides the capability to stream data to a FITS file
instead of requiring data to all be written at once.
The following pseudocode illustrates its use::
header = astropy.io.fits.Header()
for all the cards you need in the header:
header[key] = (value, comment)
shdu = astropy.io.fits.StreamingHDU('filename.fits', header)
for each piece of data:
shdu.write(data)
shdu.close()
"""
def __init__(self, name, header):
"""
Construct a `StreamingHDU` object given a file name and a header.
Parameters
----------
name : file path, file object, or file like object
The file to which the header and data will be streamed. If opened,
the file object must be opened in a writeable binary mode such as
'wb' or 'ab+'.
header : `Header` instance
The header object associated with the data to be written
to the file.
Notes
-----
The file will be opened and the header appended to the end of
the file. If the file does not already exist, it will be
created, and if the header represents a Primary header, it
will be written to the beginning of the file. If the file
does not exist and the provided header is not a Primary
header, a default Primary HDU will be inserted at the
beginning of the file and the provided header will be added as
the first extension. If the file does already exist, but the
provided header represents a Primary header, the header will
be modified to an image extension header and appended to the
end of the file.
"""
if isinstance(name, gzip.GzipFile):
raise TypeError('StreamingHDU not supported for GzipFile objects.')
self._header = header.copy()
# handle a file object instead of a file name
filename = fileobj_name(name) or ''
# Check if the file already exists. If it does not, check to see
# if we were provided with a Primary Header. If not we will need
# to prepend a default PrimaryHDU to the file before writing the
# given header.
newfile = False
if filename:
if not os.path.exists(filename) or os.path.getsize(filename) == 0:
newfile = True
elif (hasattr(name, 'len') and name.len == 0):
newfile = True
if newfile:
if 'SIMPLE' not in self._header:
hdulist = HDUList([PrimaryHDU()])
hdulist.writeto(name, 'exception')
else:
# This will not be the first extension in the file so we
# must change the Primary header provided into an image
# extension header.
if 'SIMPLE' in self._header:
self._header.set('XTENSION', 'IMAGE', 'Image extension',
after='SIMPLE')
del self._header['SIMPLE']
if 'PCOUNT' not in self._header:
dim = self._header['NAXIS']
if dim == 0:
dim = ''
else:
dim = str(dim)
self._header.set('PCOUNT', 0, 'number of parameters',
after='NAXIS' + dim)
if 'GCOUNT' not in self._header:
self._header.set('GCOUNT', 1, 'number of groups',
after='PCOUNT')
self._ffo = _File(name, 'append')
# TODO : Fix this once the HDU writing API is cleaned up
tmp_hdu = _BaseHDU()
# Passing self._header as an argument to _BaseHDU() will cause its
# values to be modified in undesired ways...need to have a better way
# of doing this
tmp_hdu._header = self._header
self._header_offset = tmp_hdu._writeheader(self._ffo)[0]
self._data_offset = self._ffo.tell()
self._size = self.size
if self._size != 0:
self.writecomplete = False
else:
self.writecomplete = True
# Support the 'with' statement
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def write(self, data):
"""
Write the given data to the stream.
Parameters
----------
data : ndarray
Data to stream to the file.
Returns
-------
writecomplete : int
Flag that when `True` indicates that all of the required
data has been written to the stream.
Notes
-----
Only the amount of data specified in the header provided to the class
constructor may be written to the stream. If the provided data would
cause the stream to overflow, an `OSError` exception is
raised and the data is not written. Once sufficient data has been
written to the stream to satisfy the amount specified in the header,
the stream is padded to fill a complete FITS block and no more data
will be accepted. An attempt to write more data after the stream has
been filled will raise an `OSError` exception. If the
dtype of the input data does not match what is expected by the header,
a `TypeError` exception is raised.
"""
size = self._ffo.tell() - self._data_offset
if self.writecomplete or size + data.nbytes > self._size:
raise OSError('Attempt to write more data to the stream than the '
'header specified.')
if BITPIX2DTYPE[self._header['BITPIX']] != data.dtype.name:
raise TypeError('Supplied data does not match the type specified '
'in the header.')
if data.dtype.str[0] != '>':
# byteswap little endian arrays before writing
output = data.byteswap()
else:
output = data
self._ffo.writearray(output)
if self._ffo.tell() - self._data_offset == self._size:
# the stream is full so pad the data to the next FITS block
self._ffo.write(_pad_length(self._size) * '\0')
self.writecomplete = True
self._ffo.flush()
return self.writecomplete
@property
def size(self):
"""
Return the size (in bytes) of the data portion of the HDU.
"""
size = 0
naxis = self._header.get('NAXIS', 0)
if naxis > 0:
simple = self._header.get('SIMPLE', 'F')
random_groups = self._header.get('GROUPS', 'F')
if simple == 'T' and random_groups == 'T':
groups = 1
else:
groups = 0
size = 1
for idx in range(groups, naxis):
size = size * self._header['NAXIS' + str(idx + 1)]
bitpix = self._header['BITPIX']
gcount = self._header.get('GCOUNT', 1)
pcount = self._header.get('PCOUNT', 0)
size = abs(bitpix) * gcount * (pcount + size) // 8
return size
def close(self):
"""
Close the physical FITS file.
"""
self._ffo.close()
| [((116, 20, 116, 41), 'astropy.io.fits.file._File', '_File', ({(116, 26, 116, 30): 'name', (116, 32, 116, 40): '"""append"""'}, {}), "(name, 'append')", False, 'from astropy.io.fits.file import _File\n'), ((71, 19, 71, 37), 'astropy.io.fits.util.fileobj_name', 'fileobj_name', ({(71, 32, 71, 36): 'name'}, {}), '(name)', False, 'from astropy.io.fits.util import fileobj_name\n'), ((81, 19, 81, 43), 'os.path.exists', 'os.path.exists', ({(81, 34, 81, 42): 'filename'}, {}), '(filename)', False, 'import os\n'), ((81, 47, 81, 72), 'os.path.getsize', 'os.path.getsize', ({(81, 63, 81, 71): 'filename'}, {}), '(filename)', False, 'import os\n'), ((189, 28, 189, 51), 'astropy.io.fits.header._pad_length', '_pad_length', ({(189, 40, 189, 50): 'self._size'}, {}), '(self._size)', False, 'from astropy.io.fits.header import _pad_length\n')] |
groupe-conseil-nutshimit-nippour/django-geoprisma | geoprisma/tests/test_templatetags.py | 4732fdb8a0684eb4d7fd50aa43e11b454ee71d08 | import django
from django.test import TestCase
from django.template import Template, Context
class genericObj(object):
"""
A generic object for testing templatetags
"""
def __init__(self):
self.name = "test"
self.status = "ready"
def getOption(self, optionName):
if optionName == "name":
return self.name
elif optionName == "status":
return self.status
def getName(self):
return self.name
def render(template_string, context_dict=None):
"""
A shortcut for testing template output.
"""
if context_dict is None:
context_dict = {}
c = Context(context_dict)
t = Template(template_string)
return t.render(c).strip()
class object_extrasTests(TestCase):
def test_callMethod(self):
genObj = genericObj()
template = """
{% load object_extras %}
{{ obj|args:"name"|call:"getOption" }}
"""
context = {
'obj': genObj
}
self.assertEqual(render(template, context), "test")
template = """
{% load object_extras %}
{{ obj|call:"getName" }}
"""
context = {
'obj': genObj
}
self.assertEqual(render(template, context), "test")
def test_check_type(self):
genObj = genericObj()
template = """
{% load object_extras %}
{{ obj|obj_type:"genericObj" }}
"""
context = {
'obj': genObj
}
self.assertEqual(render(template, context), "True")
template = """
{% load object_extras %}
{{ obj|obj_type:"notexist" }}
"""
context = {
'obj': genObj
}
self.assertEqual(render(template, context), "False")
class static_extrasTests(TestCase):
def setUp(self):
self.widgetTypeSetJs = set()
self.widgetTypeSetJs.add('queryonclick')
self.widgetTypeSetCss = set()
self.widgetTypeSetCss.add('geoexttoolbar')
def test_getJsStatics(self):
template = """
{% load staticfiles %}
{% load static_extras %}
{% getJsStatics widgetTypeSet as widget_js %}
{% for static_path in widget_js %}
<script src="{% static static_path %}" type="text/javascript"></script>
{% endfor %}
"""
context = {
'widgetTypeSet': self.widgetTypeSetJs
}
out = '<script src="/static/geoprisma/widgets/queryonclick/js/QueryOnClick.js" type="text/javascript"></script>'
self.assertEqual(render(template, context), out)
def test_getCssStatics(self):
template = """
{% load staticfiles %}
{% load static_extras %}
{% getCssStatics widgetTypeSet as widget_css %}
{% for static_path in widget_css %}
<link rel="stylesheet" type="text/css" href="{% static static_path %}" />
{% endfor %}
"""
context = {
'widgetTypeSet': self.widgetTypeSetCss
}
out = '<link rel="stylesheet" type="text/css" href="/static/geoprisma/widgets/geoexttoolbar/css/GeoExtToolbar.css" />'
self.assertEqual(render(template, context), out)
def test_template_exist(self):
template = """
{% load static_extras %}
{{ "geoprisma/widgets/queryonclick/queryonclick.html"|template_exists }}
"""
self.assertEqual(render(template), "True")
template = """
{% load static_extras %}
{{ "geoprisma/widgets/queryonclick/queryonclicknotexist.html"|template_exists }}
"""
self.assertEqual(render(template), "False")
| [((31, 8, 31, 29), 'django.template.Context', 'Context', ({(31, 16, 31, 28): 'context_dict'}, {}), '(context_dict)', False, 'from django.template import Template, Context\n'), ((32, 8, 32, 33), 'django.template.Template', 'Template', ({(32, 17, 32, 32): 'template_string'}, {}), '(template_string)', False, 'from django.template import Template, Context\n')] |
acidburn0zzz/ggrc-core | src/ggrc_workflows/models/task_group.py | 386781d08172102eb51030b65db8212974651628 | # Copyright (C) 2016 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""A module containing the workflow TaskGroup model."""
from sqlalchemy import or_
from ggrc import db
from ggrc.login import get_current_user
from ggrc.models.associationproxy import association_proxy
from ggrc.models.mixins import (
Titled, Slugged, Described, Timeboxed, WithContact
)
from ggrc.models.reflection import AttributeInfo
from ggrc.models.reflection import PublishOnly
from ggrc.models import all_models
from ggrc_workflows.models.task_group_object import TaskGroupObject
class TaskGroup(
WithContact, Timeboxed, Described, Titled, Slugged, db.Model):
"""Workflow TaskGroup model."""
__tablename__ = 'task_groups'
_title_uniqueness = False
workflow_id = db.Column(
db.Integer,
db.ForeignKey('workflows.id', ondelete="CASCADE"),
nullable=False,
)
lock_task_order = db.Column(db.Boolean(), nullable=True)
task_group_objects = db.relationship(
'TaskGroupObject', backref='task_group', cascade='all, delete-orphan')
objects = association_proxy(
'task_group_objects', 'object', 'TaskGroupObject')
task_group_tasks = db.relationship(
'TaskGroupTask', backref='task_group', cascade='all, delete-orphan')
cycle_task_groups = db.relationship(
'CycleTaskGroup', backref='task_group')
sort_index = db.Column(
db.String(length=250), default="", nullable=False)
_publish_attrs = [
'workflow',
'task_group_objects',
PublishOnly('objects'),
'task_group_tasks',
'lock_task_order',
'sort_index',
# Intentionally do not include `cycle_task_groups`
# 'cycle_task_groups',
]
_aliases = {
"title": "Summary",
"description": "Details",
"contact": {
"display_name": "Assignee",
"mandatory": True,
"filter_by": "_filter_by_contact",
},
"secondary_contact": None,
"start_date": None,
"end_date": None,
"workflow": {
"display_name": "Workflow",
"mandatory": True,
"filter_by": "_filter_by_workflow",
},
"task_group_objects": {
"display_name": "Objects",
"type": AttributeInfo.Type.SPECIAL_MAPPING,
"filter_by": "_filter_by_objects",
},
}
def copy(self, _other=None, **kwargs):
columns = [
'title', 'description', 'workflow', 'sort_index', 'modified_by',
'context'
]
if kwargs.get('clone_people', False) and getattr(self, "contact"):
columns.append("contact")
else:
kwargs["contact"] = get_current_user()
target = self.copy_into(_other, columns, **kwargs)
if kwargs.get('clone_objects', False):
self.copy_objects(target, **kwargs)
if kwargs.get('clone_tasks', False):
self.copy_tasks(target, **kwargs)
return target
def copy_objects(self, target, **kwargs):
# pylint: disable=unused-argument
for task_group_object in self.task_group_objects:
target.task_group_objects.append(task_group_object.copy(
task_group=target,
context=target.context,
))
return target
def copy_tasks(self, target, **kwargs):
for task_group_task in self.task_group_tasks:
target.task_group_tasks.append(task_group_task.copy(
None,
task_group=target,
context=target.context,
clone_people=kwargs.get("clone_people", False),
))
return target
@classmethod
def _filter_by_workflow(cls, predicate):
from ggrc_workflows.models import Workflow
return Workflow.query.filter(
(Workflow.id == cls.workflow_id) &
(predicate(Workflow.slug) | predicate(Workflow.title))
).exists()
@classmethod
def _filter_by_objects(cls, predicate):
parts = []
for model_name in all_models.__all__:
model = getattr(all_models, model_name)
query = getattr(model, "query", None)
field = getattr(model, "slug", getattr(model, "email", None))
if query is None or field is None or not hasattr(model, "id"):
continue
parts.append(query.filter(
(TaskGroupObject.object_type == model_name) &
(model.id == TaskGroupObject.object_id) &
predicate(field)
).exists())
return TaskGroupObject.query.filter(
(TaskGroupObject.task_group_id == cls.id) &
or_(*parts)
).exists()
| [((35, 23, 36, 76), 'ggrc.db.relationship', 'db.relationship', (), '', False, 'from ggrc import db\n'), ((38, 12, 39, 56), 'ggrc.models.associationproxy.association_proxy', 'association_proxy', ({(39, 6, 39, 26): '"""task_group_objects"""', (39, 28, 39, 36): '"""object"""', (39, 38, 39, 55): '"""TaskGroupObject"""'}, {}), "('task_group_objects', 'object', 'TaskGroupObject')", False, 'from ggrc.models.associationproxy import association_proxy\n'), ((41, 21, 42, 74), 'ggrc.db.relationship', 'db.relationship', (), '', False, 'from ggrc import db\n'), ((44, 22, 45, 45), 'ggrc.db.relationship', 'db.relationship', (), '', False, 'from ggrc import db\n'), ((30, 6, 30, 55), 'ggrc.db.ForeignKey', 'db.ForeignKey', (), '', False, 'from ggrc import db\n'), ((33, 30, 33, 42), 'ggrc.db.Boolean', 'db.Boolean', ({}, {}), '()', False, 'from ggrc import db\n'), ((48, 6, 48, 27), 'ggrc.db.String', 'db.String', (), '', False, 'from ggrc import db\n'), ((53, 6, 53, 28), 'ggrc.models.reflection.PublishOnly', 'PublishOnly', ({(53, 18, 53, 27): '"""objects"""'}, {}), "('objects')", False, 'from ggrc.models.reflection import PublishOnly\n'), ((93, 26, 93, 44), 'ggrc.login.get_current_user', 'get_current_user', ({}, {}), '()', False, 'from ggrc.login import get_current_user\n'), ((150, 8, 150, 19), 'sqlalchemy.or_', 'or_', ({(150, 12, 150, 18): '*parts'}, {}), '(*parts)', False, 'from sqlalchemy import or_\n')] |
DanielNoord/DuolingoPomodoro | src/tests/app_functions/menu/test_change_auto_login.py | 307b386daf3216fb9ba86f983f0e39f6647ffd64 | import pytest
import rumps
from src.app_functions.menu.change_auto_login import change_auto_login
@pytest.fixture(name="basic_app")
def create_app():
"""Creates a basic app object with some variables to pass to functions
Returns:
rumps.App: Basic app
"""
app = rumps.App("TestApp")
app.settings = {}
return app
def test_setting_is_true(mocker, basic_app):
"""Check if setting is changed correctly if True"""
basic_app.settings["auto_login"] = True
mock_function = mocker.patch("src.app_functions.menu.change_auto_login.update_menu")
mocker.patch("src.app_functions.menu.change_auto_login.save_settings")
change_auto_login(basic_app)
assert basic_app.settings["auto_login"] is False
mock_function.assert_called_once_with(basic_app)
def test_setting_is_false(mocker, basic_app):
"""Check if setting is changed correctly if false"""
basic_app.settings["auto_login"] = False
mock_function = mocker.patch("src.app_functions.menu.change_auto_login.update_menu")
mocker.patch("src.app_functions.menu.change_auto_login.save_settings")
change_auto_login(basic_app)
assert basic_app.settings["auto_login"] is True
mock_function.assert_called_once_with(basic_app)
| [((6, 1, 6, 33), 'pytest.fixture', 'pytest.fixture', (), '', False, 'import pytest\n'), ((13, 10, 13, 30), 'rumps.App', 'rumps.App', ({(13, 20, 13, 29): '"""TestApp"""'}, {}), "('TestApp')", False, 'import rumps\n'), ((23, 4, 23, 32), 'src.app_functions.menu.change_auto_login.change_auto_login', 'change_auto_login', ({(23, 22, 23, 31): 'basic_app'}, {}), '(basic_app)', False, 'from src.app_functions.menu.change_auto_login import change_auto_login\n'), ((33, 4, 33, 32), 'src.app_functions.menu.change_auto_login.change_auto_login', 'change_auto_login', ({(33, 22, 33, 31): 'basic_app'}, {}), '(basic_app)', False, 'from src.app_functions.menu.change_auto_login import change_auto_login\n')] |
H0merJayS1mpson/deepobscustom | deepobs/tensorflow/testproblems/cifar100_vgg19.py | e85816ce42466326dac18841c58b79f87a4a1a7c | # -*- coding: utf-8 -*-
"""VGG 19 architecture for CIFAR-100."""
import tensorflow as tf
from ._vgg import _vgg
from ..datasets.cifar100 import cifar100
from .testproblem import TestProblem
class cifar100_vgg19(TestProblem):
"""DeepOBS test problem class for the VGG 19 network on Cifar-100.
The CIFAR-100 images are resized to ``224`` by ``224`` to fit the input
dimension of the original VGG network, which was designed for ImageNet.
Details about the architecture can be found in the `original paper`_.
VGG 19 consists of 19 weight layers, of mostly convolutions. The model uses
cross-entroy loss. A weight decay is used on the weights (but not the biases)
which defaults to ``5e-4``.
.. _original paper: https://arxiv.org/abs/1409.1556
Args:
batch_size (int): Batch size to use.
weight_decay (float): Weight decay factor. Weight decay (L2-regularization)
is used on the weights but not the biases.
Defaults to ``5e-4``.
Attributes:
dataset: The DeepOBS data set class for Cifar-100.
train_init_op: A tensorflow operation initializing the test problem for the
training phase.
train_eval_init_op: A tensorflow operation initializing the test problem for
evaluating on training data.
test_init_op: A tensorflow operation initializing the test problem for
evaluating on test data.
losses: A tf.Tensor of shape (batch_size, ) containing the per-example loss
values.
regularizer: A scalar tf.Tensor containing a regularization term.
accuracy: A scalar tf.Tensor containing the mini-batch mean accuracy.
"""
def __init__(self, batch_size, weight_decay=5e-4):
"""Create a new VGG 19 test problem instance on Cifar-100.
Args:
batch_size (int): Batch size to use.
weight_decay (float): Weight decay factor. Weight decay (L2-regularization)
is used on the weights but not the biases.
Defaults to ``5e-4``.
"""
super(cifar100_vgg19, self).__init__(batch_size, weight_decay)
def set_up(self):
"""Set up the VGG 19 test problem on Cifar-100."""
self.dataset = cifar100(self._batch_size)
self.train_init_op = self.dataset.train_init_op
self.train_eval_init_op = self.dataset.train_eval_init_op
self.valid_init_op = self.dataset.valid_init_op
self.test_init_op = self.dataset.test_init_op
training = tf.equal(self.dataset.phase, "train")
x, y = self.dataset.batch
linear_outputs = _vgg(
x,
training,
variant=19,
num_outputs=100,
weight_decay=self._weight_decay,
)
self.losses = tf.nn.softmax_cross_entropy_with_logits_v2(
labels=y, logits=linear_outputs
)
y_pred = tf.argmax(linear_outputs, 1)
y_correct = tf.argmax(y, 1)
correct_prediction = tf.equal(y_pred, y_correct)
self.accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
self.regularizer = tf.losses.get_regularization_loss()
| [((63, 19, 63, 56), 'tensorflow.equal', 'tf.equal', ({(63, 28, 63, 46): 'self.dataset.phase', (63, 48, 63, 55): '"""train"""'}, {}), "(self.dataset.phase, 'train')", True, 'import tensorflow as tf\n'), ((73, 22, 75, 9), 'tensorflow.nn.softmax_cross_entropy_with_logits_v2', 'tf.nn.softmax_cross_entropy_with_logits_v2', (), '', True, 'import tensorflow as tf\n'), ((76, 17, 76, 45), 'tensorflow.argmax', 'tf.argmax', ({(76, 27, 76, 41): 'linear_outputs', (76, 43, 76, 44): '1'}, {}), '(linear_outputs, 1)', True, 'import tensorflow as tf\n'), ((77, 20, 77, 35), 'tensorflow.argmax', 'tf.argmax', ({(77, 30, 77, 31): 'y', (77, 33, 77, 34): '1'}, {}), '(y, 1)', True, 'import tensorflow as tf\n'), ((78, 29, 78, 56), 'tensorflow.equal', 'tf.equal', ({(78, 38, 78, 44): 'y_pred', (78, 46, 78, 55): 'y_correct'}, {}), '(y_pred, y_correct)', True, 'import tensorflow as tf\n'), ((81, 27, 81, 62), 'tensorflow.losses.get_regularization_loss', 'tf.losses.get_regularization_loss', ({}, {}), '()', True, 'import tensorflow as tf\n'), ((79, 39, 79, 78), 'tensorflow.cast', 'tf.cast', ({(79, 47, 79, 65): 'correct_prediction', (79, 67, 79, 77): 'tf.float32'}, {}), '(correct_prediction, tf.float32)', True, 'import tensorflow as tf\n')] |
TheHumanGoogle/Hackerrank-python-solution | write-a-function.py | ab2fa515444d7493340d7c7fbb88c3a090a3a8f5 | def is_leap(year):
leap=False
if year%400==0:
leap=True
elif year%4==0 and year%100!=0:
leap=True
else:
leap=False
return leap
year = int(input())
| [] |
byshyk/shortio | shortio/utils.py | 054014b3936495c86d2e2cd6a61c3cee9ab9b0f2 | """Contains utility functions."""
BIN_MODE_ARGS = {'mode', 'buffering', }
TEXT_MODE_ARGS = {'mode', 'buffering', 'encoding', 'errors', 'newline'}
def split_args(args):
"""Splits args into two groups: open args and other args.
Open args are used by ``open`` function. Other args are used by
``load``/``dump`` functions.
Args:
args: Keyword args to split.
Returns:
open_args: Arguments for ``open``.
other_args: Arguments for ``load``/``dump``.
"""
mode_args = BIN_MODE_ARGS if 'b' in args['mode'] else TEXT_MODE_ARGS
open_args = {}
other_args = {}
for arg, value in args.items():
if arg in mode_args:
open_args[arg] = value
else:
other_args[arg] = value
return open_args, other_args
def read_wrapper(load, **base_kwargs):
"""Wraps ``load`` function to avoid context manager boilerplate.
Args:
load: Function that takes the return of ``open``.
**base_kwargs: Base arguments that ``open``/``load`` take.
Returns:
Wrapper for ``load``.
"""
def wrapped(file, **kwargs):
open_args, load_args = split_args({**base_kwargs, **kwargs})
with open(file, **open_args) as f:
return load(f, **load_args)
return wrapped
def write_wrapper(dump, **base_kwargs):
"""Wraps ``dump`` function to avoid context manager boilerplate.
Args:
dump: Function that takes the return of ``open`` and data to dump.
**base_kwargs: Base arguments that ``open``/``dump`` take.
Returns:
Wrapper for ``dump``.
"""
def wrapped(file, obj, **kwargs):
open_args, dump_args = split_args({**base_kwargs, **kwargs})
with open(file, **open_args) as f:
dump(obj, f, **dump_args)
return wrapped
| [] |
sobolevn/paasta | paasta_tools/async_utils.py | 8b87e0b13816c09b3d063b6d3271e6c7627fd264 | import asyncio
import functools
import time
import weakref
from collections import defaultdict
from typing import AsyncIterable
from typing import Awaitable
from typing import Callable
from typing import Dict
from typing import List
from typing import Optional
from typing import TypeVar
T = TypeVar("T")
# NOTE: this method is not thread-safe due to lack of locking while checking
# and updating the cache
def async_ttl_cache(
ttl: Optional[float] = 300,
cleanup_self: bool = False,
*,
cache: Optional[Dict] = None,
) -> Callable[
[Callable[..., Awaitable[T]]], Callable[..., Awaitable[T]] # wrapped # inner
]:
async def call_or_get_from_cache(cache, async_func, args_for_key, args, kwargs):
# Please note that anything which is put into `key` will be in the
# cache forever, potentially causing memory leaks. The most common
# case is the `self` arg pointing to a huge object. To mitigate that
# we're using `args_for_key`, which is supposed not contain any huge
# objects.
key = functools._make_key(args_for_key, kwargs, typed=False)
try:
future, last_update = cache[key]
if ttl is not None and time.time() - last_update > ttl:
raise KeyError
except KeyError:
future = asyncio.ensure_future(async_func(*args, **kwargs))
# set the timestamp to +infinity so that we always wait on the in-flight request.
cache[key] = (future, float("Inf"))
try:
value = await future
except Exception:
# Only update the cache if it's the same future we awaited and
# it hasn't already been updated by another coroutine
# Note also that we use get() in case the key was deleted from the
# cache by another coroutine
if cache.get(key) == (future, float("Inf")):
del cache[key]
raise
else:
if cache.get(key) == (future, float("Inf")):
cache[key] = (future, time.time())
return value
if cleanup_self:
instance_caches: Dict = cache if cache is not None else defaultdict(dict)
def on_delete(w):
del instance_caches[w]
def outer(wrapped):
@functools.wraps(wrapped)
async def inner(self, *args, **kwargs):
w = weakref.ref(self, on_delete)
self_cache = instance_caches[w]
return await call_or_get_from_cache(
self_cache, wrapped, args, (self,) + args, kwargs
)
return inner
else:
cache2: Dict = cache if cache is not None else {} # Should be Dict[Any, T] but that doesn't work.
def outer(wrapped):
@functools.wraps(wrapped)
async def inner(*args, **kwargs):
return await call_or_get_from_cache(cache2, wrapped, args, args, kwargs)
return inner
return outer
async def aiter_to_list(aiter: AsyncIterable[T],) -> List[T]:
return [x async for x in aiter]
def async_timeout(
seconds: int = 10,
) -> Callable[
[Callable[..., Awaitable[T]]], Callable[..., Awaitable[T]] # wrapped # inner
]:
def outer(wrapped):
@functools.wraps(wrapped)
async def inner(*args, **kwargs):
return await asyncio.wait_for(wrapped(*args, **kwargs), timeout=seconds)
return inner
return outer
| [((15, 4, 15, 16), 'typing.TypeVar', 'TypeVar', ({(15, 12, 15, 15): '"""T"""'}, {}), "('T')", False, 'from typing import TypeVar\n'), ((34, 14, 34, 68), 'functools._make_key', 'functools._make_key', (), '', False, 'import functools\n'), ((99, 9, 99, 33), 'functools.wraps', 'functools.wraps', ({(99, 25, 99, 32): 'wrapped'}, {}), '(wrapped)', False, 'import functools\n'), ((60, 64, 60, 81), 'collections.defaultdict', 'defaultdict', ({(60, 76, 60, 80): 'dict'}, {}), '(dict)', False, 'from collections import defaultdict\n'), ((66, 13, 66, 37), 'functools.wraps', 'functools.wraps', ({(66, 29, 66, 36): 'wrapped'}, {}), '(wrapped)', False, 'import functools\n'), ((80, 13, 80, 37), 'functools.wraps', 'functools.wraps', ({(80, 29, 80, 36): 'wrapped'}, {}), '(wrapped)', False, 'import functools\n'), ((68, 20, 68, 48), 'weakref.ref', 'weakref.ref', ({(68, 32, 68, 36): 'self', (68, 38, 68, 47): 'on_delete'}, {}), '(self, on_delete)', False, 'import weakref\n'), ((56, 38, 56, 49), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((37, 35, 37, 46), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n')] |
MTI830PyTraders/pytrade | util/dataset.py | 33ea3e756019c999e9c3d78fca89cd72addf6ab2 | #!/usr/bin/python
''' generate dataset '''
import csv
import argparse
import numpy as np
import sklearn.metrics
import theanets
from sklearn.metrics import accuracy_score
import logging
from trendStrategy import OptTrendStrategy, TrendStrategy
from util import visu
def compare(stock, field='orders', strategy="TrendStrategy_predicted", best=OptTrendStrategy.__name__):
best_fname="{0}_{1}_{2}.csv".format(stock, best, field)
predicted_fname="{0}_{1}_{2}.csv".format(stock, strategy, field)
print "comparing",best_fname,predicted_fname
best_data = np.loadtxt(best_fname, usecols=[1], delimiter=',')
predicted_data = np.loadtxt(predicted_fname, usecols=[1], delimiter=',')
min_size = min(len(best_data), len(predicted_data))
title = "%s vs %s" %(best, strategy)
visu.compare(best_data[-min_size:], predicted_data[-min_size:], title)
def load_dataset(stock, ratio=0.8, name=OptTrendStrategy.__name__):
''' return train, valid (x,y) '''
orders = np.loadtxt("{0}_{1}_orders.csv".format(stock, name), usecols=[1], delimiter=',')
orders[orders==-1]=0
features = np.loadtxt("{0}_input.csv".format(stock), delimiter=',')
if len(orders)!=len(features):
logging.error("len(orders)!=len(features) -> %s!=%s" %(len(orders),len(features)))
features = features.astype('f')
orders = orders.astype('i')
pos = round(len(features)*ratio)
train = (features[:pos], orders[:pos])
valid = (features[pos:], orders[pos:])
return train, valid
def evaluate(exp, dataset):
y_true = dataset[1]
y_pred = exp.network.predict(dataset[0])
print(sklearn.metrics.confusion_matrix(y_true, y_pred))
print('accuracy:',accuracy_score(y_true, y_pred))
def train_strategy(stock, ratio=0.8, min_improvement=0.001):
train, valid = load_dataset(stock)
n, n_input = train[0].shape
exp = theanets.Experiment(
theanets.Classifier,
layers=(n_input, n_input*2, 2),
)
exp.train(train, valid, min_improvement=min_improvement,
algo='sgd',
learning_rate=0.01,
momentum=0.5,
hidden_l1=0.001,
weight_l2=0.001,
num_updates=100
)
print('training:')
evaluate(exp, train)
print('validation:')
evaluate(exp, valid)
exp.save('%s.nn' %stock)
return exp
def load_strategy(name, verbose=False):
print("loading %s trained strategy" %name)
train, valid = load_dataset(name)
n, n_input = train[0].shape
exp = theanets.Experiment(
theanets.Classifier,
layers=(n_input, n_input*2, 2),
)
exp.load('%s.nn' %name)
if verbose:
print('training:')
evaluate(exp, train)
print('validation:')
evaluate(exp, valid)
return exp
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('--stock', '-s', default="TSLA", help='stock')
parser.add_argument('--ratio', '-r', default=0.8, type=int, help='train/valid ratio')
parser.add_argument('--min', '-m', default=0.001, type=int, help='min improvement (stop learning)')
parser.add_argument('--field', default='orders', help='compare field')
args = parser.parse_args()
if args.field:
compare(args.stock, args.field)
train, valid = load_dataset(args.stock)
exp = train_strategy(args.stock, args.ratio, args.min)
exp = load_strategy(args.stock, True)
| [] |
manvhah/sporco | examples/scripts/sc/bpdn.py | 9237d7fc37e75089a2a65ebfe02b7491410da7d4 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# This file is part of the SPORCO package. Details of the copyright
# and user license can be found in the 'LICENSE.txt' file distributed
# with the package.
"""
Basis Pursuit DeNoising
=======================
This example demonstrates the use of class :class:`.admm.bpdn.BPDN` to solve the Basis Pursuit DeNoising (BPDN) problem :cite:`chen-1998-atomic`
$$\mathrm{argmin}_\mathbf{x} \; (1/2) \| D \mathbf{x} - \mathbf{s} \|_2^2 + \lambda \| \mathbf{x} \|_1 \;,$$
where $D$ is the dictionary, $\mathbf{x}$ is the sparse representation, and $\mathbf{s}$ is the signal to be represented. In this example the BPDN problem is used to estimate the reference sparse representation that generated a signal from a noisy version of the signal.
"""
from __future__ import print_function
from builtins import input
import numpy as np
from sporco.admm import bpdn
from sporco import util
from sporco import plot
"""
Configure problem size, sparsity, and noise level.
"""
N = 512 # Signal size
M = 4*N # Dictionary size
L = 32 # Number of non-zero coefficients in generator
sigma = 0.5 # Noise level
"""
Construct random dictionary, reference random sparse representation, and test signal consisting of the synthesis of the reference sparse representation with additive Gaussian noise.
"""
# Construct random dictionary and random sparse coefficients
np.random.seed(12345)
D = np.random.randn(N, M)
x0 = np.zeros((M, 1))
si = np.random.permutation(list(range(0, M-1)))
x0[si[0:L]] = np.random.randn(L, 1)
# Construct reference and noisy signal
s0 = D.dot(x0)
s = s0 + sigma*np.random.randn(N,1)
"""
Set BPDN solver class options.
"""
opt = bpdn.BPDN.Options({'Verbose': False, 'MaxMainIter': 500,
'RelStopTol': 1e-3, 'AutoRho': {'RsdlTarget': 1.0}})
"""
Select regularization parameter $\lambda$ by evaluating the error in recovering the sparse representation over a logarithmicaly spaced grid. (The reference representation is assumed to be known, which is not realistic in a real application.) A function is defined that evalues the BPDN recovery error for a specified $\lambda$, and this function is evaluated in parallel by :func:`sporco.util.grid_search`.
"""
# Function computing reconstruction error at lmbda
def evalerr(prm):
lmbda = prm[0]
b = bpdn.BPDN(D, s, lmbda, opt)
x = b.solve()
return np.sum(np.abs(x-x0))
# Parallel evalution of error function on lmbda grid
lrng = np.logspace(1, 2, 20)
sprm, sfvl, fvmx, sidx = util.grid_search(evalerr, (lrng,))
lmbda = sprm[0]
print('Minimum ℓ1 error: %5.2f at 𝜆 = %.2e' % (sfvl, lmbda))
"""
Once the best $\lambda$ has been determined, run BPDN with verbose display of ADMM iteration statistics.
"""
# Initialise and run BPDN object for best lmbda
opt['Verbose'] = True
b = bpdn.BPDN(D, s, lmbda, opt)
x = b.solve()
print("BPDN solve time: %.2fs" % b.timer.elapsed('solve'))
"""
Plot comparison of reference and recovered representations.
"""
plot.plot(np.hstack((x0, x)), title='Sparse representation',
lgnd=['Reference', 'Reconstructed'])
"""
Plot lmbda error curve, functional value, residuals, and rho
"""
its = b.getitstat()
fig = plot.figure(figsize=(15, 10))
plot.subplot(2, 2, 1)
plot.plot(fvmx, x=lrng, ptyp='semilogx', xlbl='$\lambda$',
ylbl='Error', fig=fig)
plot.subplot(2, 2, 2)
plot.plot(its.ObjFun, xlbl='Iterations', ylbl='Functional', fig=fig)
plot.subplot(2, 2, 3)
plot.plot(np.vstack((its.PrimalRsdl, its.DualRsdl)).T,
ptyp='semilogy', xlbl='Iterations', ylbl='Residual',
lgnd=['Primal', 'Dual'], fig=fig)
plot.subplot(2, 2, 4)
plot.plot(its.Rho, xlbl='Iterations', ylbl='Penalty Parameter', fig=fig)
fig.show()
# Wait for enter on keyboard
input()
| [((44, 0, 44, 21), 'numpy.random.seed', 'np.random.seed', ({(44, 15, 44, 20): '(12345)'}, {}), '(12345)', True, 'import numpy as np\n'), ((45, 4, 45, 25), 'numpy.random.randn', 'np.random.randn', ({(45, 20, 45, 21): 'N', (45, 23, 45, 24): 'M'}, {}), '(N, M)', True, 'import numpy as np\n'), ((46, 5, 46, 21), 'numpy.zeros', 'np.zeros', ({(46, 14, 46, 20): '(M, 1)'}, {}), '((M, 1))', True, 'import numpy as np\n'), ((48, 14, 48, 35), 'numpy.random.randn', 'np.random.randn', ({(48, 30, 48, 31): 'L', (48, 33, 48, 34): '1'}, {}), '(L, 1)', True, 'import numpy as np\n'), ((59, 6, 60, 72), 'sporco.admm.bpdn.BPDN.Options', 'bpdn.BPDN.Options', ({(59, 24, 60, 71): "{'Verbose': False, 'MaxMainIter': 500, 'RelStopTol': 0.001, 'AutoRho': {\n 'RsdlTarget': 1.0}}"}, {}), "({'Verbose': False, 'MaxMainIter': 500, 'RelStopTol': \n 0.001, 'AutoRho': {'RsdlTarget': 1.0}})", False, 'from sporco.admm import bpdn\n'), ((76, 7, 76, 28), 'numpy.logspace', 'np.logspace', ({(76, 19, 76, 20): '1', (76, 22, 76, 23): '2', (76, 25, 76, 27): '20'}, {}), '(1, 2, 20)', True, 'import numpy as np\n'), ((77, 25, 77, 59), 'sporco.util.grid_search', 'util.grid_search', ({(77, 42, 77, 49): 'evalerr', (77, 51, 77, 58): '(lrng,)'}, {}), '(evalerr, (lrng,))', False, 'from sporco import util\n'), ((89, 4, 89, 31), 'sporco.admm.bpdn.BPDN', 'bpdn.BPDN', ({(89, 14, 89, 15): 'D', (89, 17, 89, 18): 's', (89, 20, 89, 25): 'lmbda', (89, 27, 89, 30): 'opt'}, {}), '(D, s, lmbda, opt)', False, 'from sporco.admm import bpdn\n'), ((108, 6, 108, 35), 'sporco.plot.figure', 'plot.figure', (), '', False, 'from sporco import plot\n'), ((109, 0, 109, 21), 'sporco.plot.subplot', 'plot.subplot', ({(109, 13, 109, 14): '(2)', (109, 16, 109, 17): '(2)', (109, 19, 109, 20): '(1)'}, {}), '(2, 2, 1)', False, 'from sporco import plot\n'), ((110, 0, 111, 32), 'sporco.plot.plot', 'plot.plot', (), '', False, 'from sporco import plot\n'), ((112, 0, 112, 21), 'sporco.plot.subplot', 'plot.subplot', ({(112, 13, 112, 14): '(2)', (112, 16, 112, 17): '(2)', (112, 19, 112, 20): '(2)'}, {}), '(2, 2, 2)', False, 'from sporco import plot\n'), ((113, 0, 113, 68), 'sporco.plot.plot', 'plot.plot', (), '', False, 'from sporco import plot\n'), ((114, 0, 114, 21), 'sporco.plot.subplot', 'plot.subplot', ({(114, 13, 114, 14): '(2)', (114, 16, 114, 17): '(2)', (114, 19, 114, 20): '(3)'}, {}), '(2, 2, 3)', False, 'from sporco import plot\n'), ((118, 0, 118, 21), 'sporco.plot.subplot', 'plot.subplot', ({(118, 13, 118, 14): '(2)', (118, 16, 118, 17): '(2)', (118, 19, 118, 20): '(4)'}, {}), '(2, 2, 4)', False, 'from sporco import plot\n'), ((119, 0, 119, 72), 'sporco.plot.plot', 'plot.plot', (), '', False, 'from sporco import plot\n'), ((125, 0, 125, 7), 'builtins.input', 'input', ({}, {}), '()', False, 'from builtins import input\n'), ((70, 8, 70, 35), 'sporco.admm.bpdn.BPDN', 'bpdn.BPDN', ({(70, 18, 70, 19): 'D', (70, 21, 70, 22): 's', (70, 24, 70, 29): 'lmbda', (70, 31, 70, 34): 'opt'}, {}), '(D, s, lmbda, opt)', False, 'from sporco.admm import bpdn\n'), ((99, 10, 99, 28), 'numpy.hstack', 'np.hstack', ({(99, 20, 99, 27): '(x0, x)'}, {}), '((x0, x))', True, 'import numpy as np\n'), ((52, 15, 52, 35), 'numpy.random.randn', 'np.random.randn', ({(52, 31, 52, 32): 'N', (52, 33, 52, 34): '(1)'}, {}), '(N, 1)', True, 'import numpy as np\n'), ((72, 18, 72, 30), 'numpy.abs', 'np.abs', ({(72, 25, 72, 29): '(x - x0)'}, {}), '(x - x0)', True, 'import numpy as np\n'), ((115, 10, 115, 51), 'numpy.vstack', 'np.vstack', ({(115, 20, 115, 50): '(its.PrimalRsdl, its.DualRsdl)'}, {}), '((its.PrimalRsdl, its.DualRsdl))', True, 'import numpy as np\n')] |
tadartefactorist/mask | saleor-env/lib/python3.7/site-packages/snowballstemmer/nepali_stemmer.py | 7967dd4ad39e3d26ac516719faefb40e00a8cbff | # This file was generated automatically by the Snowball to Python compiler
# http://snowballstem.org/
from .basestemmer import BaseStemmer
from .among import Among
class NepaliStemmer(BaseStemmer):
'''
This class was automatically generated by a Snowball to Python compiler
It implements the stemming algorithm defined by a snowball script.
'''
a_0 = [
Among(u"\u0932\u093E\u0907", -1, 1),
Among(u"\u0932\u093E\u0908", -1, 1),
Among(u"\u0938\u0901\u0917", -1, 1),
Among(u"\u0938\u0902\u0917", -1, 1),
Among(u"\u092E\u093E\u0930\u094D\u092B\u0924", -1, 1),
Among(u"\u0930\u0924", -1, 1),
Among(u"\u0915\u093E", -1, 2),
Among(u"\u092E\u093E", -1, 1),
Among(u"\u0926\u094D\u0935\u093E\u0930\u093E", -1, 1),
Among(u"\u0915\u093F", -1, 2),
Among(u"\u092A\u091B\u093F", -1, 1),
Among(u"\u0915\u0940", -1, 2),
Among(u"\u0932\u0947", -1, 1),
Among(u"\u0915\u0948", -1, 2),
Among(u"\u0938\u0901\u0917\u0948", -1, 1),
Among(u"\u092E\u0948", -1, 1),
Among(u"\u0915\u094B", -1, 2)
]
a_1 = [
Among(u"\u0901", -1, -1),
Among(u"\u0902", -1, -1),
Among(u"\u0948", -1, -1)
]
a_2 = [
Among(u"\u0901", -1, 1),
Among(u"\u0902", -1, 1),
Among(u"\u0948", -1, 2)
]
a_3 = [
Among(u"\u0925\u093F\u090F", -1, 1),
Among(u"\u091B", -1, 1),
Among(u"\u0907\u091B", 1, 1),
Among(u"\u090F\u091B", 1, 1),
Among(u"\u093F\u091B", 1, 1),
Among(u"\u0947\u091B", 1, 1),
Among(u"\u0928\u0947\u091B", 5, 1),
Among(u"\u0939\u0941\u0928\u0947\u091B", 6, 1),
Among(u"\u0907\u0928\u094D\u091B", 1, 1),
Among(u"\u093F\u0928\u094D\u091B", 1, 1),
Among(u"\u0939\u0941\u0928\u094D\u091B", 1, 1),
Among(u"\u090F\u0915\u093E", -1, 1),
Among(u"\u0907\u090F\u0915\u093E", 11, 1),
Among(u"\u093F\u090F\u0915\u093E", 11, 1),
Among(u"\u0947\u0915\u093E", -1, 1),
Among(u"\u0928\u0947\u0915\u093E", 14, 1),
Among(u"\u0926\u093E", -1, 1),
Among(u"\u0907\u0926\u093E", 16, 1),
Among(u"\u093F\u0926\u093E", 16, 1),
Among(u"\u0926\u0947\u0916\u093F", -1, 1),
Among(u"\u092E\u093E\u0925\u093F", -1, 1),
Among(u"\u090F\u0915\u0940", -1, 1),
Among(u"\u0907\u090F\u0915\u0940", 21, 1),
Among(u"\u093F\u090F\u0915\u0940", 21, 1),
Among(u"\u0947\u0915\u0940", -1, 1),
Among(u"\u0926\u0947\u0916\u0940", -1, 1),
Among(u"\u0925\u0940", -1, 1),
Among(u"\u0926\u0940", -1, 1),
Among(u"\u091B\u0941", -1, 1),
Among(u"\u090F\u091B\u0941", 28, 1),
Among(u"\u0947\u091B\u0941", 28, 1),
Among(u"\u0928\u0947\u091B\u0941", 30, 1),
Among(u"\u0928\u0941", -1, 1),
Among(u"\u0939\u0930\u0941", -1, 1),
Among(u"\u0939\u0930\u0942", -1, 1),
Among(u"\u091B\u0947", -1, 1),
Among(u"\u0925\u0947", -1, 1),
Among(u"\u0928\u0947", -1, 1),
Among(u"\u090F\u0915\u0948", -1, 1),
Among(u"\u0947\u0915\u0948", -1, 1),
Among(u"\u0928\u0947\u0915\u0948", 39, 1),
Among(u"\u0926\u0948", -1, 1),
Among(u"\u0907\u0926\u0948", 41, 1),
Among(u"\u093F\u0926\u0948", 41, 1),
Among(u"\u090F\u0915\u094B", -1, 1),
Among(u"\u0907\u090F\u0915\u094B", 44, 1),
Among(u"\u093F\u090F\u0915\u094B", 44, 1),
Among(u"\u0947\u0915\u094B", -1, 1),
Among(u"\u0928\u0947\u0915\u094B", 47, 1),
Among(u"\u0926\u094B", -1, 1),
Among(u"\u0907\u0926\u094B", 49, 1),
Among(u"\u093F\u0926\u094B", 49, 1),
Among(u"\u092F\u094B", -1, 1),
Among(u"\u0907\u092F\u094B", 52, 1),
Among(u"\u092D\u092F\u094B", 52, 1),
Among(u"\u093F\u092F\u094B", 52, 1),
Among(u"\u0925\u093F\u092F\u094B", 55, 1),
Among(u"\u0926\u093F\u092F\u094B", 55, 1),
Among(u"\u0925\u094D\u092F\u094B", 52, 1),
Among(u"\u091B\u094C", -1, 1),
Among(u"\u0907\u091B\u094C", 59, 1),
Among(u"\u090F\u091B\u094C", 59, 1),
Among(u"\u093F\u091B\u094C", 59, 1),
Among(u"\u0947\u091B\u094C", 59, 1),
Among(u"\u0928\u0947\u091B\u094C", 63, 1),
Among(u"\u092F\u094C", -1, 1),
Among(u"\u0925\u093F\u092F\u094C", 65, 1),
Among(u"\u091B\u094D\u092F\u094C", 65, 1),
Among(u"\u0925\u094D\u092F\u094C", 65, 1),
Among(u"\u091B\u0928\u094D", -1, 1),
Among(u"\u0907\u091B\u0928\u094D", 69, 1),
Among(u"\u090F\u091B\u0928\u094D", 69, 1),
Among(u"\u093F\u091B\u0928\u094D", 69, 1),
Among(u"\u0947\u091B\u0928\u094D", 69, 1),
Among(u"\u0928\u0947\u091B\u0928\u094D", 73, 1),
Among(u"\u0932\u093E\u0928\u094D", -1, 1),
Among(u"\u091B\u093F\u0928\u094D", -1, 1),
Among(u"\u0925\u093F\u0928\u094D", -1, 1),
Among(u"\u092A\u0930\u094D", -1, 1),
Among(u"\u0907\u0938\u094D", -1, 1),
Among(u"\u0925\u093F\u0907\u0938\u094D", 79, 1),
Among(u"\u091B\u0938\u094D", -1, 1),
Among(u"\u0907\u091B\u0938\u094D", 81, 1),
Among(u"\u090F\u091B\u0938\u094D", 81, 1),
Among(u"\u093F\u091B\u0938\u094D", 81, 1),
Among(u"\u0947\u091B\u0938\u094D", 81, 1),
Among(u"\u0928\u0947\u091B\u0938\u094D", 85, 1),
Among(u"\u093F\u0938\u094D", -1, 1),
Among(u"\u0925\u093F\u0938\u094D", 87, 1),
Among(u"\u091B\u0947\u0938\u094D", -1, 1),
Among(u"\u0939\u094B\u0938\u094D", -1, 1)
]
def __r_remove_category_1(self):
# (, line 53
# [, line 54
self.ket = self.cursor
# substring, line 54
among_var = self.find_among_b(NepaliStemmer.a_0)
if among_var == 0:
return False
# ], line 54
self.bra = self.cursor
if among_var == 1:
# (, line 58
# delete, line 58
if not self.slice_del():
return False
elif among_var == 2:
# (, line 59
# or, line 59
try:
v_1 = self.limit - self.cursor
try:
# (, line 59
# or, line 59
try:
v_2 = self.limit - self.cursor
try:
# literal, line 59
if not self.eq_s_b(u"\u090F"):
raise lab3()
raise lab2()
except lab3: pass
self.cursor = self.limit - v_2
# literal, line 59
if not self.eq_s_b(u"\u0947"):
raise lab1()
except lab2: pass
# (, line 59
raise lab0()
except lab1: pass
self.cursor = self.limit - v_1
# delete, line 59
if not self.slice_del():
return False
except lab0: pass
return True
def __r_check_category_2(self):
# (, line 63
# [, line 64
self.ket = self.cursor
# substring, line 64
if self.find_among_b(NepaliStemmer.a_1) == 0:
return False
# ], line 64
self.bra = self.cursor
return True
def __r_remove_category_2(self):
# (, line 69
# [, line 70
self.ket = self.cursor
# substring, line 70
among_var = self.find_among_b(NepaliStemmer.a_2)
if among_var == 0:
return False
# ], line 70
self.bra = self.cursor
if among_var == 1:
# (, line 71
# or, line 71
try:
v_1 = self.limit - self.cursor
try:
# literal, line 71
if not self.eq_s_b(u"\u092F\u094C"):
raise lab1()
raise lab0()
except lab1: pass
self.cursor = self.limit - v_1
try:
# literal, line 71
if not self.eq_s_b(u"\u091B\u094C"):
raise lab2()
raise lab0()
except lab2: pass
self.cursor = self.limit - v_1
try:
# literal, line 71
if not self.eq_s_b(u"\u0928\u094C"):
raise lab3()
raise lab0()
except lab3: pass
self.cursor = self.limit - v_1
# literal, line 71
if not self.eq_s_b(u"\u0925\u0947"):
return False
except lab0: pass
# delete, line 71
if not self.slice_del():
return False
elif among_var == 2:
# (, line 72
# literal, line 72
if not self.eq_s_b(u"\u0924\u094D\u0930"):
return False
# delete, line 72
if not self.slice_del():
return False
return True
def __r_remove_category_3(self):
# (, line 76
# [, line 77
self.ket = self.cursor
# substring, line 77
if self.find_among_b(NepaliStemmer.a_3) == 0:
return False
# ], line 77
self.bra = self.cursor
# (, line 79
# delete, line 79
if not self.slice_del():
return False
return True
def _stem(self):
# (, line 85
# backwards, line 86
self.limit_backward = self.cursor
self.cursor = self.limit
# (, line 86
# do, line 87
v_1 = self.limit - self.cursor
try:
# call remove_category_1, line 87
if not self.__r_remove_category_1():
raise lab0()
except lab0: pass
self.cursor = self.limit - v_1
# do, line 88
v_2 = self.limit - self.cursor
try:
# (, line 88
# repeat, line 89
try:
while True:
try:
v_3 = self.limit - self.cursor
try:
# (, line 89
# do, line 89
v_4 = self.limit - self.cursor
try:
# (, line 89
# and, line 89
v_5 = self.limit - self.cursor
# call check_category_2, line 89
if not self.__r_check_category_2():
raise lab5()
self.cursor = self.limit - v_5
# call remove_category_2, line 89
if not self.__r_remove_category_2():
raise lab5()
except lab5: pass
self.cursor = self.limit - v_4
# call remove_category_3, line 89
if not self.__r_remove_category_3():
raise lab4()
raise lab3()
except lab4: pass
self.cursor = self.limit - v_3
raise lab2()
except lab3: pass
except lab2: pass
except lab1: pass
self.cursor = self.limit - v_2
self.cursor = self.limit_backward
return True
class lab0(BaseException): pass
class lab1(BaseException): pass
class lab2(BaseException): pass
class lab3(BaseException): pass
class lab4(BaseException): pass
class lab5(BaseException): pass
| [] |
MountainField/uspec | tests/auto_test_class_creation_spec.py | a4f8908b1a3af519d9d2ce7b85a4b4cca7b85883 | # -*- coding: utf-8 -*-
# =================================================================
# uspec
#
# Copyright (c) 2020 Takahide Nogayama
#
# This software is released under the MIT License.
# http://opensource.org/licenses/mit-license.php
# =================================================================
from __future__ import unicode_literals, print_function, division
import unittest
import uspec
from uspec import describe, context, it
###################################
class TestGame(unittest.TestCase): pass
with describe("Game", test_class=TestGame):
assert test_class is TestGame
@it("hoge")
def _(self):
self.assertTrue(True)
assert TestGame is not None
##################################
TEST_CLASS_NAME_GAME2 = None
with describe("Game2"):
TEST_CLASS_NAME_GAME2 = test_class.__name__
@it("hoge")
def _(self):
self.assertTrue(True)
assert TEST_CLASS_NAME_GAME2 in globals()
##################################
def wrap():
global TEST_CLASS_NAME_GAME3
with describe("Game3"):
TEST_CLASS_NAME_GAME3 = locals()["test_class"].__name__
@it("hoge")
def _(self):
self.assertTrue(True)
wrap()
assert TEST_CLASS_NAME_GAME3 in globals()
if __name__ == '__main__':
import unittest
unittest.main(verbosity=2)
| [((23, 5, 23, 42), 'uspec.describe', 'describe', (), '', False, 'from uspec import describe, context, it\n'), ((27, 5, 27, 15), 'uspec.it', 'it', ({(27, 8, 27, 14): '"""hoge"""'}, {}), "('hoge')", False, 'from uspec import describe, context, it\n'), ((36, 5, 36, 22), 'uspec.describe', 'describe', ({(36, 14, 36, 21): '"""Game2"""'}, {}), "('Game2')", False, 'from uspec import describe, context, it\n'), ((39, 5, 39, 15), 'uspec.it', 'it', ({(39, 8, 39, 14): '"""hoge"""'}, {}), "('hoge')", False, 'from uspec import describe, context, it\n'), ((65, 4, 65, 30), 'unittest.main', 'unittest.main', (), '', False, 'import unittest\n'), ((50, 9, 50, 26), 'uspec.describe', 'describe', ({(50, 18, 50, 25): '"""Game3"""'}, {}), "('Game3')", False, 'from uspec import describe, context, it\n'), ((54, 9, 54, 19), 'uspec.it', 'it', ({(54, 12, 54, 18): '"""hoge"""'}, {}), "('hoge')", False, 'from uspec import describe, context, it\n')] |
Matthewk01/Snake-AI | main.py | d5f211334436676966f17bb6dbfea8aba61ee6b4 | import pygame
from game.game_logic.game import Game
import matplotlib.pyplot as plt
def main():
scores_history = []
GAME_COUNT = 2
for i in range(GAME_COUNT):
game = Game(400, "Snake AI")
score = game.start()
scores_history.append(score)
print("Game:", i)
plt.ylim(0, 36)
plt.plot(range(len(scores_history)), scores_history)
plt.ylabel('Snake length')
plt.xlabel('Game count')
plt.show()
if __name__ == "__main__":
main()
| [((15, 4, 15, 19), 'matplotlib.pyplot.ylim', 'plt.ylim', ({(15, 13, 15, 14): '(0)', (15, 16, 15, 18): '(36)'}, {}), '(0, 36)', True, 'import matplotlib.pyplot as plt\n'), ((17, 4, 17, 30), 'matplotlib.pyplot.ylabel', 'plt.ylabel', ({(17, 15, 17, 29): '"""Snake length"""'}, {}), "('Snake length')", True, 'import matplotlib.pyplot as plt\n'), ((18, 4, 18, 28), 'matplotlib.pyplot.xlabel', 'plt.xlabel', ({(18, 15, 18, 27): '"""Game count"""'}, {}), "('Game count')", True, 'import matplotlib.pyplot as plt\n'), ((19, 4, 19, 14), 'matplotlib.pyplot.show', 'plt.show', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n'), ((10, 15, 10, 36), 'game.game_logic.game.Game', 'Game', ({(10, 20, 10, 23): '400', (10, 25, 10, 35): '"""Snake AI"""'}, {}), "(400, 'Snake AI')", False, 'from game.game_logic.game import Game\n')] |
ctuning/inference_results_v1.1 | closed/Intel/code/resnet50/openvino-cpu/src/tools/create_image_list.py | d9176eca28fcf6d7a05ccb97994362a76a1eb5ab | import os
import sys
from glob import glob
def create_list(images_dir, output_file, img_ext=".jpg"):
ImgList = os.listdir(images_dir)
val_list = []
for img in ImgList:
img,ext = img.split(".")
val_list.append(img)
with open(os.path.join(images_dir, output_file),'w') as fid:
for line in val_list[:-1]:
fid.write(line + "\n")
fid.write(val_list[-1])
def main():
if len(sys.argv) < 2:
print("Requires images directory")
sys.exit(1)
elif len(sys.argv) < 3:
images_dir = sys.argv[1]
output_file = "image_list.txt"
else:
images_dir = sys.argv[1]
output_file = sys.argv[2]
create_list(images_dir, output_file)
if __name__=="__main__":
main() | [((7, 14, 7, 36), 'os.listdir', 'os.listdir', ({(7, 25, 7, 35): 'images_dir'}, {}), '(images_dir)', False, 'import os\n'), ((24, 8, 24, 19), 'sys.exit', 'sys.exit', ({(24, 17, 24, 18): '(1)'}, {}), '(1)', False, 'import sys\n'), ((15, 14, 15, 51), 'os.path.join', 'os.path.join', ({(15, 27, 15, 37): 'images_dir', (15, 39, 15, 50): 'output_file'}, {}), '(images_dir, output_file)', False, 'import os\n')] |
honchardev/Fun | AI/others/churn/churn_2.py | ca7c0076e9bb3017c5d7e89aa7d5bd54a83c8ecc | #!/usr/bin/env python
# coding: utf-8
# In[1]:
# src: http://datareview.info/article/prognozirovanie-ottoka-klientov-so-scikit-learn/
# In[ ]:
# Показатель оттока клиентов – бизнес-термин, описывающий
# насколько интенсивно клиенты покидают компанию или
# прекращают оплачивать товары или услуги.
# Это ключевой показатель для многих компаний, потому что
# зачастую приобретение новых клиентов обходится намного дороже,
# чем удержание старых (в некоторых случаях от 5 до 20 раз дороже).
# Примеры использования:
# 1. мобильные операторы, операторы кабельного телевидения и
# компании, обслуживающие прием платежей с помощью кредитных карт
# 2. казино используют прогнозные модели, чтобы предсказать
# идеальные условия в зале, позволяющие удержать игроков
# в Блэкджек за столом.
# 3. Aвиакомпании могут предложить клиентам, у которых есть
# жалобы, заменить их билет на билет первого класса.
# Эффективное удержание клиентов сводится к задаче, в рамках
# которой, используя имеющиеся данные, необходимо отличить
# клиентов, собирающихся уйти, от тех, кто этого делать
# не собирается.
# In[ ]:
# datset src: https://raw.githubusercontent.com/michaelulin/churn/master/work/churn_model/data/churn.csv
# In[88]:
# Load libraries
import matplotlib.pyplot as plt
get_ipython().run_line_magic('matplotlib', 'inline')
import pandas as pd
import numpy as np
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import accuracy_score, confusion_matrix, precision_recall_fscore_support
from sklearn.model_selection import KFold, train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC
from sklearn.neighbors import KNeighborsClassifier
# In[3]:
# Load dataset
raw_churn_df = pd.read_csv('churn.csv')
# In[17]:
display(raw_churn_df.shape)
display(raw_churn_df.head(), raw_churn_df.tail())
display(raw_churn_df.columns.values)
display(raw_churn_df.dtypes)
display(raw_churn_df.isnull().sum())
# In[78]:
# Isolate target data
y = raw_churn_df['Churn?']
X = raw_churn_df.drop('Churn?', axis=1)
# In[79]:
# Drop irrelevant features
features_to_drop = ['State', 'Area Code', 'Phone']
X = X.drop(features_to_drop, axis=1)
# In[80]:
# Encode yes/no with 1/0 values
X["Int'l Plan"] = X["Int'l Plan"].map({'no': 0, 'yes': 1})
X["VMail Plan"] = X["VMail Plan"].map({'no': 0, 'yes': 1})
# In[81]:
# Scale everything
std_scaler = StandardScaler(with_mean=True)
X = std_scaler.fit_transform(X)
display(X.shape)
# In[90]:
# Perform CV for SVM, random forest and kNN
def try_clf(X, y, clf_nofit):
X_tr, X_val, y_tr, y_val = train_test_split(X, y, random_state=42)
clf = clf_nofit.fit(X_tr, y_tr)
y_pred = clf.predict(X_val)
display(clf_nofit.__class__.__name__)
display(accuracy_score(y_val, y_pred))
display(confusion_matrix(y_val, y_pred))
display("prec, rec, f1, support", precision_recall_fscore_support(y_val, y_pred))
try_clf(X, y, SVC(gamma='scale'))
try_clf(X, y, RandomForestClassifier(n_estimators=100, n_jobs=-1))
try_clf(X, y, KNeighborsClassifier())
# std scaler with_mean=False accuracies:
# 0.9256594724220624
# 0.9484412470023981
# 0.8896882494004796
# std scaler with_mean=True accuracies:
# 0.9256594724220624
# 0.9496402877697842
# 0.8896882494004796
# In[86]:
# Recall
# Каково отношение количества правильно спрогнозированных уходов
# к общему количеству фактических уходов?
# Precision
# Каково отношение количества правильно спрогнозированных уходов
# к общему количеству спрогнозированных уходов?
# In[101]:
# # Predict probabilities
# def try_probab(X, y, clf_nofit):
# X_tr, X_val, y_tr, y_val = train_test_split(X, y, random_state=42)
# clf = clf_nofit.fit(X_tr, y_tr)
# y_prob = clf.predict_proba(X_val)
# # for i in range(len(X)):
# # display("y_true={0}, Predicted={1}".format(y[i], y_prob[i]))
# display(pd.value_counts(y_prob[:, 1]))
# try_probab(X, y, SVC(gamma='scale', probability=True))
# # try_probab(X, y, RandomForestClassifier(n_estimators=100, n_jobs=-1))
# # try_probab(X, y, KNeighborsClassifier())
# # for i in range(len(Xnew)):
# # print("X=%s, Predicted=%s" % (Xnew[i], ynew[i]))
# In[ ]:
# todo: calibration and discrimination
# https://github.com/ghuiber/churn/blob/master/churn_measurements.py
# from churn_measurements import calibration, discrimination
| [((67, 15, 67, 39), 'pandas.read_csv', 'pd.read_csv', ({(67, 27, 67, 38): '"""churn.csv"""'}, {}), "('churn.csv')", True, 'import pandas as pd\n'), ((116, 13, 116, 43), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', (), '', False, 'from sklearn.preprocessing import StandardScaler\n'), ((128, 31, 128, 70), 'sklearn.model_selection.train_test_split', 'train_test_split', (), '', False, 'from sklearn.model_selection import KFold, train_test_split\n'), ((142, 14, 142, 32), 'sklearn.svm.SVC', 'SVC', (), '', False, 'from sklearn.svm import SVC\n'), ((143, 14, 143, 65), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', (), '', False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((144, 14, 144, 36), 'sklearn.neighbors.KNeighborsClassifier', 'KNeighborsClassifier', ({}, {}), '()', False, 'from sklearn.neighbors import KNeighborsClassifier\n'), ((136, 12, 136, 41), 'sklearn.metrics.accuracy_score', 'accuracy_score', ({(136, 27, 136, 32): 'y_val', (136, 34, 136, 40): 'y_pred'}, {}), '(y_val, y_pred)', False, 'from sklearn.metrics import accuracy_score, confusion_matrix, precision_recall_fscore_support\n'), ((138, 12, 138, 43), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', ({(138, 29, 138, 34): 'y_val', (138, 36, 138, 42): 'y_pred'}, {}), '(y_val, y_pred)', False, 'from sklearn.metrics import accuracy_score, confusion_matrix, precision_recall_fscore_support\n'), ((140, 38, 140, 84), 'sklearn.metrics.precision_recall_fscore_support', 'precision_recall_fscore_support', ({(140, 70, 140, 75): 'y_val', (140, 77, 140, 83): 'y_pred'}, {}), '(y_val, y_pred)', False, 'from sklearn.metrics import accuracy_score, confusion_matrix, precision_recall_fscore_support\n')] |
rajatariya21/airbyte | airbyte-integrations/connectors/source-google-sheets/google_sheets_source/models/spreadsheet.py | 11e70a7a96e2682b479afbe6f709b9a5fe9c4a8d | # MIT License
#
# Copyright (c) 2020 Airbyte
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import annotations
from typing import List, Optional
from pydantic import BaseModel, Extra, Field
class SpreadsheetProperties(BaseModel):
class Config:
extra = Extra.allow
title: Optional[str] = None
class SheetProperties(BaseModel):
class Config:
extra = Extra.allow
title: Optional[str] = None
class CellData(BaseModel):
class Config:
extra = Extra.allow
formattedValue: Optional[str] = None
class RowData(BaseModel):
class Config:
extra = Extra.allow
values: Optional[List[CellData]] = None
class GridData(BaseModel):
class Config:
extra = Extra.allow
rowData: Optional[List[RowData]] = None
class Sheet(BaseModel):
class Config:
extra = Extra.allow
data: Optional[List[GridData]] = None
properties: Optional[SheetProperties] = None
class Spreadsheet(BaseModel):
class Config:
extra = Extra.allow
spreadsheetId: str
sheets: List[Sheet]
properties: Optional[SpreadsheetProperties] = None
| [] |
Dnewman9/Python-Trivia-API | pytrivia/trivia.py | 0af7f999cc4ab278fb0ac6fd64733ab168984e60 | """
A simple python api wrapper for https://opentdb.com/
"""
from aiohttp import ClientSession
from requests import get
from pytrivia.__helpers import decode_dict, get_token, make_request
from pytrivia.enums import *
class Trivia:
def __init__(self, with_token: bool):
"""
Initialize an instance of the Trivia class
:param with_token: If True then the instance will uses a session token
"""
self.token = get_token() if with_token else None
def request(self, num_questions: int, category: Category = None,
diffculty: Diffculty = None, type_: Type = None) -> dict:
"""
Send an api request to https://opentdb.com/
Limitations:
Only 1 Category can be requested per API Call.
To get questions from any category, don't specify a category.
A Maximum of 50 Questions can be retrieved per call.
:param num_questions: the number of questions,
must be between 1 and 50 (inclusive)
:param category: the category of the question. None for any category
:param diffculty: the diffculty of the question. None for any diffculty
:param type_: the type of the question. None for any type
:return: the api call response
:rtype: dict
:raises: ValueError when the num_questions parameter is less than 1
or greater than 50
"""
result = get(
self.__url(num_questions, category, diffculty, type_)).json()
if result['response_code'] in (3, 4):
self.token = get_token()
return self.request(num_questions, category, diffculty, type_)
else:
return decode_dict(result)
async def request_async(self, session: ClientSession, close_session: bool,
num_questions: int, category: Category = None,
diffculty: Diffculty = None,
type_: Type = None) -> dict:
"""
Send an api request to https://opentdb.com/
Limitations:
Only 1 Category can be requested per API Call.
To get questions from any category, don't specify a category.
A Maximum of 50 Questions can be retrieved per call.
:param session: an Aiohttp client session.
:param close_session: True to close the session after the request.
:param num_questions: the number of questions,
must be between 1 and 50 (inclusive)
:param category: the category of the question. None for any category
:param diffculty: the diffculty of the question. None for any diffculty
:param type_: the type of the question. None for any type
:return: the api call response
:rtype: dict
:raises: ValueError when the num_questions parameter is less than 1
or greater than 50
:raises ClientResponseError if the HTTP response code isn't 200
"""
try:
return await self.__request(
session, num_questions, category, diffculty, type_)
finally:
if close_session:
session.close()
async def __request(self, session: ClientSession, num_questions: int,
category: Category = None, diffculty: Diffculty = None,
type_: Type = None) -> dict:
"""
Helper method for the async request.
"""
resp = await make_request(
session, self.__url(num_questions, category, diffculty, type_))
result = await resp.json()
if result['response_code'] in (3, 4):
self.token = get_token()
return await self.__request(
session, num_questions, category, diffculty, type_)
else:
return decode_dict(result)
def __url(self, num_questions, category, diffculty, type_):
"""
Helper method to generate request url.
"""
if num_questions < 1 or num_questions > 50:
raise ValueError
url = 'https://opentdb.com/api.php?amount={}&encode=base64'.format(
num_questions)
if category is not None:
url += '&category={}'.format(category.value)
if diffculty is not None:
url += '&difficulty={}'.format(diffculty.value)
if type_ is not None:
url += '&type={}'.format(type_.value)
if self.token is not None:
url += '&token={}'.format(self.token)
return url
| [((18, 21, 18, 32), 'pytrivia.__helpers.get_token', 'get_token', ({}, {}), '()', False, 'from pytrivia.__helpers import decode_dict, get_token, make_request\n'), ((48, 25, 48, 36), 'pytrivia.__helpers.get_token', 'get_token', ({}, {}), '()', False, 'from pytrivia.__helpers import decode_dict, get_token, make_request\n'), ((51, 19, 51, 38), 'pytrivia.__helpers.decode_dict', 'decode_dict', ({(51, 31, 51, 37): 'result'}, {}), '(result)', False, 'from pytrivia.__helpers import decode_dict, get_token, make_request\n'), ((103, 25, 103, 36), 'pytrivia.__helpers.get_token', 'get_token', ({}, {}), '()', False, 'from pytrivia.__helpers import decode_dict, get_token, make_request\n'), ((107, 19, 107, 38), 'pytrivia.__helpers.decode_dict', 'decode_dict', ({(107, 31, 107, 37): 'result'}, {}), '(result)', False, 'from pytrivia.__helpers import decode_dict, get_token, make_request\n')] |
py-ranoid/practical-nlp | utils.py | 514fd4da3b72f26597d91cdb89704a849bf6b36d | import requests
import tarfile
import os
def download_file(url, directory):
local_filename = os.path.join(directory, url.split('/')[-1])
print ("Downloading %s --> %s"%(url, local_filename))
with requests.get(url, stream=True) as r:
r.raise_for_status()
with open(local_filename, 'wb') as f:
for chunk in r.iter_content(chunk_size=8192):
f.write(chunk)
return local_filename
def extract_tar(fpath):
fname_dir, fname = os.path.split(fpath)
dest_path = os.path.join(fname_dir,fname.split('.')[0])
print ("Extracting %s --> %s"%(fpath, dest_path))
if fname.endswith("tar.gz"):
tar = tarfile.open(fpath, "r:gz")
tar.extractall(path=fname_dir)
tar.close()
elif fname.endswith("tar"):
tar = tarfile.open(fname, "r:")
tar.extractall(path=fname_dir)
tar.close()
return dest_path
def list_files(startpath):
for root, dirs, files in os.walk(startpath):
level = root.replace(startpath, '').count(os.sep)
indent = ' ' * 4 * (level)
print('{}{}/'.format(indent, os.path.basename(root)))
subindent = ' ' * 4 * (level + 1)
for f in files:
print('{}{}'.format(subindent, f)) | [((16, 23, 16, 43), 'os.path.split', 'os.path.split', ({(16, 37, 16, 42): 'fpath'}, {}), '(fpath)', False, 'import os\n'), ((30, 29, 30, 47), 'os.walk', 'os.walk', ({(30, 37, 30, 46): 'startpath'}, {}), '(startpath)', False, 'import os\n'), ((8, 9, 8, 39), 'requests.get', 'requests.get', (), '', False, 'import requests\n'), ((20, 14, 20, 41), 'tarfile.open', 'tarfile.open', ({(20, 27, 20, 32): 'fpath', (20, 34, 20, 40): '"""r:gz"""'}, {}), "(fpath, 'r:gz')", False, 'import tarfile\n'), ((24, 14, 24, 39), 'tarfile.open', 'tarfile.open', ({(24, 27, 24, 32): 'fname', (24, 34, 24, 38): '"""r:"""'}, {}), "(fname, 'r:')", False, 'import tarfile\n'), ((33, 37, 33, 59), 'os.path.basename', 'os.path.basename', ({(33, 54, 33, 58): 'root'}, {}), '(root)', False, 'import os\n')] |
yostudios/Spritemapper | spritecss/config.py | 277cb76a14be639b6d7fa3191bc427409e72ad69 | import shlex
from os import path
from itertools import imap, ifilter
from urlparse import urljoin
from .css import CSSParser, iter_events
def parse_config_stmt(line, prefix="spritemapper."):
line = line.strip()
if line.startswith(prefix) and "=" in line:
(key, value) = line.split("=", 1)
return (key[len(prefix):].strip(), value.strip())
def iter_config_stmts(data):
return ifilter(None, imap(parse_config_stmt, data.splitlines()))
def iter_css_config(parser):
for ev in iter_events(parser, lexemes=("comment",)):
for v in iter_config_stmts(ev.comment):
yield v
class CSSConfig(object):
def __init__(self, parser=None, base=None, root=None, fname=None):
if fname and root is None:
root = path.dirname(fname)
self.root = root
self._data = dict(base) if base else {}
if parser is not None:
self._data.update(iter_css_config(parser))
def __iter__(self):
# this is mostly so you can go CSSConfig(base=CSSConfig(..))
return self._data.iteritems()
@classmethod
def from_file(cls, fname):
with open(fname, "rb") as fp:
return cls(CSSParser.from_file(fp), fname=fname)
def normpath(self, p):
"""Normalize a possibly relative path *p* to the root."""
return path.normpath(path.join(self.root, p))
def absurl(self, p):
"""Make an absolute reference to *p* from any configured base URL."""
base = self.base_url
if base:
p = urljoin(base, p)
return p
@property
def base_url(self):
return self._data.get("base_url")
@property
def sprite_dirs(self):
if "sprite_dirs" not in self._data:
return
elif self._data.get("output_image"):
raise RuntimeError("cannot have sprite_dirs "
"when output_image is set")
sdirs = shlex.split(self._data["sprite_dirs"])
return map(self.normpath, sdirs)
@property
def output_image(self):
if "output_image" in self._data:
return self.normpath(self._data["output_image"])
@property
def is_mapping_recursive(self):
rv = self._data.get("recursive")
if rv and self._data.get("output_image"):
raise RuntimeError("cannot have recursive spritemapping "
"when output_image is set")
elif rv is None:
return not self._data.get("output_image")
else:
return bool(rv)
@property
def padding(self):
return self._data.get("padding", (1, 1))
@property
def anneal_steps(self):
return int(self._data.get("anneal_steps", 9200))
def get_spritemap_out(self, dn):
"Get output image filename for spritemap directory *dn*."
if "output_image" in self._data:
return self.output_image
return dn + ".png"
def get_spritemap_url(self, fname):
"Get output image URL for spritemap *fname*."
return self.absurl(path.relpath(fname, self.root))
def get_css_out(self, fname):
"Get output image filename for spritemap directory *fname*."
(dirn, base) = path.split(fname)
if "output_css" in self._data:
(base, ext) = path.splitext(base)
names = dict(filename=fname, dirname=dirn,
basename=base, extension=ext)
return self.normpath(self._data["output_css"].format(**names))
else:
return path.join(dirn, "sm_" + base)
def print_config(fname):
from pprint import pprint
from .css import CSSParser
with open(fname, "rb") as fp:
print "%s\n%s\n" % (fname, "=" * len(fname))
pprint(dict(iter_css_config(CSSParser.read_file(fp))))
print
def main():
import sys
for fn in sys.argv[1:]:
print_config(fn)
if __name__ == "__main__":
main()
| [] |
DanielTakeshi/debridement-code | plotting/make_bar_graph.py | d1a946d1fa3c60b60284c977ecb2d6584e524ae2 | """ A bar graph.
(c) September 2017 by Daniel Seita
"""
import argparse
from collections import defaultdict
from keras.models import Sequential
from keras.layers import Dense, Activation
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import sys
np.set_printoptions(suppress=True, linewidth=200)
# Some matplotlib settings.
plt.style.use('seaborn-darkgrid')
titlesize = 21
labelsize = 17
legendsize = 15
ticksize = 15
bar_width = 0.80
opacity = 1.0
error_config = {'ecolor': '0.0', 'linewidth':3.0}
def deprecated():
"""
This is a deprecated method, only to show how to possibly combine these into
one plot. However, I find this unwieldly.
"""
fig, ax = plt.subplots()
bar_width = 0.80
opacity = 0.5
error_config = {'ecolor': '0.3'}
rects1 = plt.bar(np.array([0,1]), means_lin, bar_width,
alpha=opacity,
color='b',
yerr=std_lin,
error_kw=error_config,
label='Lin')
rects2 = plt.bar(np.array([3,4,5,6,7]), means_rfs, bar_width,
alpha=opacity,
color='r',
yerr=std_rfs,
error_kw=error_config,
label='RF')
rects3 = plt.bar(np.array([9,10]), means_dnn, bar_width,
alpha=opacity,
color='y',
yerr=std_dnn,
error_kw=error_config,
label='DNN')
plt.xticks(np.arange(11) + bar_width / 2,
('A','B','','D','E','F','G','','','J','K'))
plt.xlabel('Group')
plt.ylabel('Scores')
plt.title('Scores by group and gender')
plt.tight_layout()
plt.legend()
plt.savefig('figures/validation_set_results.png')
def plot(results, vv):
lin_mean = []
lin_std = []
lin_keys = []
rfs_mean = []
rfs_std = []
rfs_keys = []
dnn_mean = []
dnn_std = []
dnn_keys = []
sorted_keys = sorted(results.keys())
for key in sorted_keys:
info = [ss['loss'] for ss in results[key]]
if 'Lin' in key:
lin_mean.append(np.mean(info))
lin_std.append(np.std(info))
lin_keys.append(key)
elif 'RFs' in key:
rfs_mean.append(np.mean(info))
rfs_std.append(np.std(info))
rfs_keys.append(key)
elif 'DNN' in key:
dnn_mean.append(np.mean(info))
dnn_std.append(np.std(info))
dnn_keys.append(key)
print("\nlin_mean: {}".format(lin_mean))
print("lin_std: {}".format(lin_std))
print("lin_keys: {}".format(lin_keys))
print("\nrfs_mean: {}".format(rfs_mean))
print("rfs_std: {}".format(rfs_std))
print("rfs_keys: {}".format(rfs_keys))
print("\nDNN results:")
for (mean,std,key) in zip(dnn_mean,dnn_std,dnn_keys):
print("{:.2f}\t{:.2f}\t{}".format(mean,std,key))
# sys.exit()
# Use this to determine which DNN models should be here.
dnn_threshold = 3.0
real_index = 0
for ii,(mean,std,key) in enumerate(zip(dnn_mean,dnn_std,dnn_keys)):
if mean > dnn_threshold:
continue
real_index += 1
# Gah! Now I can finally make the bar chart. I think it's easiest to have it
# split across three different subplots, one per algorithm category.
width_ratio = [len(lin_keys),len(rfs_keys),real_index]
fig, ax = plt.subplots(nrows=1, ncols=3, figsize=(16,5),
gridspec_kw={'width_ratios':width_ratio})
for ii,(mean,std,key) in enumerate(zip(lin_mean,lin_std,lin_keys)):
ax[0].bar(np.array([ii]), mean, bar_width,
alpha=opacity,
yerr=std,
error_kw=error_config,
label=key[4:])
for ii,(mean,std,key) in enumerate(zip(rfs_mean,rfs_std,rfs_keys)):
ax[1].bar(np.array([ii]), mean, bar_width,
alpha=opacity,
yerr=std,
error_kw=error_config,
label=key[4:])
real_index = 0
for ii,(mean,std,key) in enumerate(zip(dnn_mean,dnn_std,dnn_keys)):
if mean > dnn_threshold:
continue
ax[2].bar(np.array([real_index]), mean, bar_width,
alpha=opacity,
yerr=std,
error_kw=error_config,
label=key[4:])
real_index += 1
# Some rather tedious but necessary stuff to make it publication-quality.
ax[0].set_title('Linear', fontsize=titlesize)
ax[1].set_title('Random Forests', fontsize=titlesize)
ax[2].set_title('Deep Neural Networks', fontsize=titlesize)
ax[0].set_ylabel('Average Squared $L_2$, 10-Fold CV', fontsize=labelsize)
for i in range(3):
ax[i].set_xlabel('Algorithm', fontsize=labelsize)
ax[i].set_ylim([0.0,9.0])
ax[i].tick_params(axis='y', labelsize=ticksize)
ax[i].set_xticklabels([])
ax[0].legend(loc="best", ncol=1, prop={'size':legendsize})
ax[1].legend(loc="best", ncol=2, prop={'size':legendsize})
ax[2].legend(loc="best", ncol=3, prop={'size':legendsize})
plt.tight_layout()
plt.savefig('figures/validation_set_results_v'+vv+'.png')
if __name__ == "__main__":
pp = argparse.ArgumentParser()
pp.add_argument('--version', type=int)
pp.add_argument('--kfolds', type=int, default=10)
args = pp.parse_args()
assert args.version is not None
VERSION = str(args.version).zfill(2)
file_name = 'results/results_kfolds10_v'+VERSION+'.npy'
results = np.load(file_name)[()]
print("results has keys: {}".format(results.keys()))
plot(results, VERSION)
| [((11, 0, 11, 21), 'matplotlib.use', 'matplotlib.use', ({(11, 15, 11, 20): '"""Agg"""'}, {}), "('Agg')", False, 'import matplotlib\n'), ((15, 0, 15, 49), 'numpy.set_printoptions', 'np.set_printoptions', (), '', True, 'import numpy as np\n'), ((18, 0, 18, 33), 'matplotlib.pyplot.style.use', 'plt.style.use', ({(18, 14, 18, 32): '"""seaborn-darkgrid"""'}, {}), "('seaborn-darkgrid')", True, 'import matplotlib.pyplot as plt\n'), ((33, 14, 33, 28), 'matplotlib.pyplot.subplots', 'plt.subplots', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n'), ((59, 4, 59, 23), 'matplotlib.pyplot.xlabel', 'plt.xlabel', ({(59, 15, 59, 22): '"""Group"""'}, {}), "('Group')", True, 'import matplotlib.pyplot as plt\n'), ((60, 4, 60, 24), 'matplotlib.pyplot.ylabel', 'plt.ylabel', ({(60, 15, 60, 23): '"""Scores"""'}, {}), "('Scores')", True, 'import matplotlib.pyplot as plt\n'), ((61, 4, 61, 43), 'matplotlib.pyplot.title', 'plt.title', ({(61, 14, 61, 42): '"""Scores by group and gender"""'}, {}), "('Scores by group and gender')", True, 'import matplotlib.pyplot as plt\n'), ((62, 4, 62, 22), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n'), ((63, 4, 63, 16), 'matplotlib.pyplot.legend', 'plt.legend', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n'), ((64, 4, 64, 53), 'matplotlib.pyplot.savefig', 'plt.savefig', ({(64, 16, 64, 52): '"""figures/validation_set_results.png"""'}, {}), "('figures/validation_set_results.png')", True, 'import matplotlib.pyplot as plt\n'), ((115, 14, 116, 68), 'matplotlib.pyplot.subplots', 'plt.subplots', (), '', True, 'import matplotlib.pyplot as plt\n'), ((157, 4, 157, 22), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n'), ((158, 4, 158, 61), 'matplotlib.pyplot.savefig', 'plt.savefig', ({(158, 16, 158, 60): "('figures/validation_set_results_v' + vv + '.png')"}, {}), "('figures/validation_set_results_v' + vv + '.png')", True, 'import matplotlib.pyplot as plt\n'), ((162, 9, 162, 34), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ({}, {}), '()', False, 'import argparse\n'), ((38, 21, 38, 36), 'numpy.array', 'np.array', ({(38, 30, 38, 35): '[0, 1]'}, {}), '([0, 1])', True, 'import numpy as np\n'), ((44, 21, 44, 42), 'numpy.array', 'np.array', ({(44, 30, 44, 41): '[3, 4, 5, 6, 7]'}, {}), '([3, 4, 5, 6, 7])', True, 'import numpy as np\n'), ((50, 21, 50, 37), 'numpy.array', 'np.array', ({(50, 30, 50, 36): '[9, 10]'}, {}), '([9, 10])', True, 'import numpy as np\n'), ((170, 14, 170, 32), 'numpy.load', 'np.load', ({(170, 22, 170, 31): 'file_name'}, {}), '(file_name)', True, 'import numpy as np\n'), ((57, 15, 57, 28), 'numpy.arange', 'np.arange', ({(57, 25, 57, 27): '(11)'}, {}), '(11)', True, 'import numpy as np\n'), ((119, 18, 119, 32), 'numpy.array', 'np.array', ({(119, 27, 119, 31): '[ii]'}, {}), '([ii])', True, 'import numpy as np\n'), ((125, 18, 125, 32), 'numpy.array', 'np.array', ({(125, 27, 125, 31): '[ii]'}, {}), '([ii])', True, 'import numpy as np\n'), ((134, 18, 134, 40), 'numpy.array', 'np.array', ({(134, 27, 134, 39): '[real_index]'}, {}), '([real_index])', True, 'import numpy as np\n'), ((82, 28, 82, 41), 'numpy.mean', 'np.mean', ({(82, 36, 82, 40): 'info'}, {}), '(info)', True, 'import numpy as np\n'), ((83, 27, 83, 39), 'numpy.std', 'np.std', ({(83, 34, 83, 38): 'info'}, {}), '(info)', True, 'import numpy as np\n'), ((86, 28, 86, 41), 'numpy.mean', 'np.mean', ({(86, 36, 86, 40): 'info'}, {}), '(info)', True, 'import numpy as np\n'), ((87, 27, 87, 39), 'numpy.std', 'np.std', ({(87, 34, 87, 38): 'info'}, {}), '(info)', True, 'import numpy as np\n'), ((90, 28, 90, 41), 'numpy.mean', 'np.mean', ({(90, 36, 90, 40): 'info'}, {}), '(info)', True, 'import numpy as np\n'), ((91, 27, 91, 39), 'numpy.std', 'np.std', ({(91, 34, 91, 38): 'info'}, {}), '(info)', True, 'import numpy as np\n')] |
tzengerink/groceries-api | setup.py | a22cc3503006b87b731b956f6341d730b143bf10 | #!/usr/bin/env python
from setuptools import find_packages, setup
import os
import re
ROOT = os.path.dirname(__file__)
VERSION_RE = re.compile(r'''__version__ = \'([0-9.]+)\'''')
def get_version():
init = open(os.path.join(ROOT, 'application', '__init__.py')).read()
return VERSION_RE.search(init).group(1)
setup(
name='groceries-api',
version=get_version(),
license='MIT',
packages=find_packages(),
include_package_data=True,
install_requires=[
'alembic==0.7.5.post2',
'APScheduler==3.1.0',
'Flask==0.10.1',
'Flask-Cors==2.0.0',
'Flask-SQLAlchemy==2.0',
'gunicorn==19.3.0',
'psycopg2==2.6.1',
'PyJWT==1.1.0',
'requests==2.8.1',
'six==1.9.0',
],
extras_require={
'dev': {
'coverage==3.7.1',
'coveralls==0.5',
'flake8==2.4.0',
'mock==1.0.1',
'pytest==2.7.0',
'tox==2.1.1',
},
},
)
| [((7, 7, 7, 32), 'os.path.dirname', 'os.path.dirname', ({(7, 23, 7, 31): '__file__'}, {}), '(__file__)', False, 'import os\n'), ((8, 13, 8, 59), 're.compile', 're.compile', ({(8, 24, 8, 58): '"""__version__ = \\\\\'([0-9.]+)\\\\\'"""'}, {}), '("__version__ = \\\\\'([0-9.]+)\\\\\'")', False, 'import re\n'), ((20, 13, 20, 28), 'setuptools.find_packages', 'find_packages', ({}, {}), '()', False, 'from setuptools import find_packages, setup\n'), ((12, 16, 12, 64), 'os.path.join', 'os.path.join', ({(12, 29, 12, 33): 'ROOT', (12, 35, 12, 48): '"""application"""', (12, 50, 12, 63): '"""__init__.py"""'}, {}), "(ROOT, 'application', '__init__.py')", False, 'import os\n')] |
SuperM0use24/TT-CL-Edition | toontown/suit/DistributedLawbotBoss.py | fdad8394f0656ae122b687d603f72afafd220c65 | from direct.showbase.ShowBase import *
from direct.interval.IntervalGlobal import *
from toontown.battle.BattleProps import *
from direct.distributed.ClockDelta import *
from direct.showbase.PythonUtil import Functor
from direct.showbase.PythonUtil import StackTrace
from direct.gui.DirectGui import *
from panda3d.core import *
from libotp import *
from direct.fsm import FSM
from direct.fsm import ClassicFSM
from direct.fsm import State
from direct.directnotify import DirectNotifyGlobal
from toontown.toonbase import ToontownGlobals
from toontown.toonbase import ToontownBattleGlobals
import DistributedBossCog
from toontown.toonbase import TTLocalizer
import SuitDNA
from toontown.toon import Toon
from toontown.battle import BattleBase
from direct.directutil import Mopath
from direct.showutil import Rope
from toontown.distributed import DelayDelete
from toontown.battle import MovieToonVictory
from toontown.building import ElevatorUtils
from toontown.battle import RewardPanel
from toontown.toon import NPCToons
from direct.task import Task
import random
import math
from toontown.coghq import CogDisguiseGlobals
from toontown.building import ElevatorConstants
from toontown.toonbase import ToontownTimer
OneBossCog = None
class DistributedLawbotBoss(DistributedBossCog.DistributedBossCog, FSM.FSM):
notify = DirectNotifyGlobal.directNotify.newCategory('DistributedLawbotBoss')
debugPositions = False
def __init__(self, cr):
self.notify.debug('----- __init___')
DistributedBossCog.DistributedBossCog.__init__(self, cr)
FSM.FSM.__init__(self, 'DistributedLawbotBoss')
self.lawyers = []
self.lawyerRequest = None
self.bossDamage = 0
self.attackCode = None
self.attackAvId = 0
self.recoverRate = 0
self.recoverStartTime = 0
self.bossDamageMovie = None
self.everThrownPie = 0
self.battleThreeMusicTime = 0
self.insidesANodePath = None
self.insidesBNodePath = None
self.strafeInterval = None
self.onscreenMessage = None
self.bossMaxDamage = ToontownGlobals.LawbotBossMaxDamage
self.elevatorType = ElevatorConstants.ELEVATOR_CJ
self.gavels = {}
self.chairs = {}
self.cannons = {}
self.useCannons = 1
self.juryBoxIval = None
self.juryTimer = None
self.witnessToon = None
self.witnessToonOnstage = False
self.numToonJurorsSeated = 0
self.mainDoor = None
self.reflectedMainDoor = None
self.panFlashInterval = None
self.panDamage = ToontownGlobals.LawbotBossDefensePanDamage
if base.config.GetBool('lawbot-boss-cheat', 0):
self.panDamage = 25
self.evidenceHitSfx = None
self.toonUpSfx = None
self.bonusTimer = None
self.warningSfx = None
self.juryMovesSfx = None
self.baseColStashed = False
self.battleDifficulty = 0
self.bonusWeight = 0
self.numJurorsLocalToonSeated = 0
self.cannonIndex = -1
return
def announceGenerate(self):
global OneBossCog
self.notify.debug('----- announceGenerate')
DistributedBossCog.DistributedBossCog.announceGenerate(self)
self.setName(TTLocalizer.LawbotBossName)
nameInfo = TTLocalizer.BossCogNameWithDept % {'name': self._name,
'dept': SuitDNA.getDeptFullname(self.style.dept)}
self.setDisplayName(nameInfo)
self.piesRestockSfx = loader.loadSfx('phase_5/audio/sfx/LB_receive_evidence.ogg')
self.rampSlideSfx = loader.loadSfx('phase_9/audio/sfx/CHQ_VP_ramp_slide.ogg')
self.evidenceHitSfx = loader.loadSfx('phase_11/audio/sfx/LB_evidence_hit.ogg')
self.warningSfx = loader.loadSfx('phase_9/audio/sfx/CHQ_GOON_tractor_beam_alarmed.ogg')
self.juryMovesSfx = loader.loadSfx('phase_11/audio/sfx/LB_jury_moves.ogg')
self.toonUpSfx = loader.loadSfx('phase_11/audio/sfx/LB_toonup.ogg')
self.strafeSfx = []
for i in xrange(10):
self.strafeSfx.append(loader.loadSfx('phase_3.5/audio/sfx/SA_shred.ogg'))
render.setTag('pieCode', str(ToontownGlobals.PieCodeNotBossCog))
insidesA = CollisionPolygon(Point3(4.0, -2.0, 5.0), Point3(-4.0, -2.0, 5.0), Point3(-4.0, -2.0, 0.5), Point3(4.0, -2.0, 0.5))
insidesANode = CollisionNode('BossZap')
insidesANode.addSolid(insidesA)
insidesANode.setCollideMask(ToontownGlobals.PieBitmask | ToontownGlobals.WallBitmask)
self.insidesANodePath = self.axle.attachNewNode(insidesANode)
self.insidesANodePath.setTag('pieCode', str(ToontownGlobals.PieCodeBossInsides))
self.insidesANodePath.stash()
insidesB = CollisionPolygon(Point3(-4.0, 2.0, 5.0), Point3(4.0, 2.0, 5.0), Point3(4.0, 2.0, 0.5), Point3(-4.0, 2.0, 0.5))
insidesBNode = CollisionNode('BossZap')
insidesBNode.addSolid(insidesB)
insidesBNode.setCollideMask(ToontownGlobals.PieBitmask | ToontownGlobals.WallBitmask)
self.insidesBNodePath = self.axle.attachNewNode(insidesBNode)
self.insidesBNodePath.setTag('pieCode', str(ToontownGlobals.PieCodeBossInsides))
self.insidesBNodePath.stash()
target = CollisionTube(0, -1, 4, 0, -1, 9, 3.5)
targetNode = CollisionNode('BossZap')
targetNode.addSolid(target)
targetNode.setCollideMask(ToontownGlobals.PieBitmask)
self.targetNodePath = self.pelvis.attachNewNode(targetNode)
self.targetNodePath.setTag('pieCode', str(ToontownGlobals.PieCodeBossCog))
shield = CollisionTube(0, 1, 4, 0, 1, 7, 3.5)
shieldNode = CollisionNode('BossZap')
shieldNode.addSolid(shield)
shieldNode.setCollideMask(ToontownGlobals.PieBitmask | ToontownGlobals.CameraBitmask)
shieldNodePath = self.pelvis.attachNewNode(shieldNode)
disk = loader.loadModel('phase_9/models/char/bossCog-gearCollide')
disk.find('**/+CollisionNode').setName('BossZap')
disk.reparentTo(self.pelvis)
disk.setZ(0.8)
self.loadEnvironment()
self.__makeWitnessToon()
self.__loadMopaths()
localAvatar.chatMgr.chatInputSpeedChat.addCJMenu()
if OneBossCog != None:
self.notify.warning('Multiple BossCogs visible.')
OneBossCog = self
return
def disable(self):
global OneBossCog
self.notify.debug('----- disable')
DistributedBossCog.DistributedBossCog.disable(self)
self.request('Off')
self.unloadEnvironment()
self.__cleanupWitnessToon()
self.__unloadMopaths()
self.__clearOnscreenMessage()
taskMgr.remove(self.uniqueName('PieAdvice'))
self.__cleanupStrafe()
self.__cleanupJuryBox()
render.clearTag('pieCode')
self.targetNodePath.detachNode()
self.cr.relatedObjectMgr.abortRequest(self.lawyerRequest)
self.lawyerRequest = None
self.betweenBattleMusic.stop()
self.promotionMusic.stop()
self.stingMusic.stop()
self.battleTwoMusic.stop()
self.battleThreeMusic.stop()
self.epilogueMusic.stop()
if self.juryTimer:
self.juryTimer.destroy()
del self.juryTimer
if self.bonusTimer:
self.bonusTimer.destroy()
del self.bonusTimer
localAvatar.chatMgr.chatInputSpeedChat.removeCJMenu()
if OneBossCog == self:
OneBossCog = None
return
def delete(self):
self.notify.debug('----- delete')
DistributedBossCog.DistributedBossCog.delete(self)
def d_hitBoss(self, bossDamage):
self.notify.debug('----- d_hitBoss')
self.sendUpdate('hitBoss', [bossDamage])
def d_healBoss(self, bossHeal):
self.notify.debug('----- d_bossHeal')
self.sendUpdate('healBoss', [bossHeal])
def d_hitBossInsides(self):
self.notify.debug('----- d_hitBossInsides')
self.sendUpdate('hitBossInsides', [])
def d_hitDefensePan(self):
self.notify.debug('----- d_hitDefensePan')
self.sendUpdate('hitDefensePan', [])
def d_hitProsecutionPan(self):
self.notify.debug('----- d_hitProsecutionPan')
self.sendUpdate('hitProsecutionPan', [])
def d_hitToon(self, toonId):
self.notify.debug('----- d_hitToon')
self.sendUpdate('hitToon', [toonId])
def gotToon(self, toon):
stateName = self.state
if stateName == 'Elevator':
self.placeToonInElevator(toon)
def setLawyerIds(self, lawyerIds):
self.lawyers = []
self.cr.relatedObjectMgr.abortRequest(self.lawyerRequest)
self.lawyerRequest = self.cr.relatedObjectMgr.requestObjects(lawyerIds, allCallback=self.__gotLawyers)
def __gotLawyers(self, lawyers):
self.lawyerRequest = None
self.lawyers = lawyers
for i in xrange(len(self.lawyers)):
suit = self.lawyers[i]
suit.fsm.request('neutral')
suit.loop('neutral')
suit.setBossCogId(self.doId)
return
def setBossDamage(self, bossDamage, recoverRate, timestamp):
recoverStartTime = globalClockDelta.networkToLocalTime(timestamp)
self.bossDamage = bossDamage
self.recoverRate = recoverRate
self.recoverStartTime = recoverStartTime
taskName = 'RecoverBossDamage'
taskMgr.remove(taskName)
if self.bossDamageMovie:
if self.bossDamage >= self.bossMaxDamage:
self.notify.debug('finish the movie then transition to NearVictory')
self.bossDamageMovie.resumeUntil(self.bossDamageMovie.getDuration())
else:
self.bossDamageMovie.resumeUntil(self.bossDamage * self.bossDamageToMovie)
if self.recoverRate:
taskMgr.add(self.__recoverBossDamage, taskName)
self.makeScaleReflectDamage()
self.bossHealthBar.update(self.bossMaxDamage - bossDamage, self.bossMaxDamage)
def getBossDamage(self):
self.notify.debug('----- getBossDamage')
now = globalClock.getFrameTime()
elapsed = now - self.recoverStartTime
return max(self.bossDamage - self.recoverRate * elapsed / 60.0, 0)
def __recoverBossDamage(self, task):
self.notify.debug('----- __recoverBossDamage')
if self.bossDamageMovie:
self.bossDamageMovie.setT(self.getBossDamage() * self.bossDamageToMovie)
return Task.cont
def __walkToonToPromotion(self, toonId, delay, mopath, track, delayDeletes):
self.notify.debug('----- __walkToonToPromotion')
toon = base.cr.doId2do.get(toonId)
if toon:
destPos = toon.getPos()
self.placeToonInElevator(toon)
toon.wrtReparentTo(render)
ival = Sequence(Wait(delay), Func(toon.suit.setPlayRate, 1, 'walk'), Func(toon.suit.loop, 'walk'), toon.posInterval(1, Point3(0, 90, 20)), ParallelEndTogether(MopathInterval(mopath, toon), toon.posInterval(2, destPos, blendType='noBlend')), Func(toon.suit.loop, 'neutral'))
track.append(ival)
delayDeletes.append(DelayDelete.DelayDelete(toon, 'LawbotBoss.__walkToonToPromotion'))
def __walkSuitToPoint(self, node, fromPos, toPos):
self.notify.debug('----- __walkSuitToPoint')
vector = Vec3(toPos - fromPos)
distance = vector.length()
time = distance / (ToontownGlobals.SuitWalkSpeed * 1.8)
return Sequence(Func(node.setPos, fromPos), Func(node.headsUp, toPos), node.posInterval(time, toPos))
def __makeRollToBattleTwoMovie(self):
startPos = Point3(ToontownGlobals.LawbotBossBattleOnePosHpr[0], ToontownGlobals.LawbotBossBattleOnePosHpr[1], ToontownGlobals.LawbotBossBattleOnePosHpr[2])
if self.arenaSide:
topRampPos = Point3(*ToontownGlobals.LawbotBossTopRampPosB)
topRampTurnPos = Point3(*ToontownGlobals.LawbotBossTopRampTurnPosB)
p3Pos = Point3(*ToontownGlobals.LawbotBossP3PosB)
else:
topRampPos = Point3(*ToontownGlobals.LawbotBossTopRampPosA)
topRampTurnPos = Point3(*ToontownGlobals.LawbotBossTopRampTurnPosA)
p3Pos = Point3(*ToontownGlobals.LawbotBossP3PosA)
battlePos = Point3(ToontownGlobals.LawbotBossBattleTwoPosHpr[0], ToontownGlobals.LawbotBossBattleTwoPosHpr[1], ToontownGlobals.LawbotBossBattleTwoPosHpr[2])
battleHpr = VBase3(ToontownGlobals.LawbotBossBattleTwoPosHpr[3], ToontownGlobals.LawbotBossBattleTwoPosHpr[4], ToontownGlobals.LawbotBossBattleTwoPosHpr[5])
bossTrack = Sequence()
self.notify.debug('calling setPosHpr')
myInterval = camera.posHprInterval(8, Point3(-22, -100, 35), Point3(-10, -13, 0), startPos=Point3(-22, -90, 35), startHpr=Point3(-10, -13, 0), blendType='easeInOut')
chatTrack = Sequence(Func(self.setChatAbsolute, TTLocalizer.LawbotBossTempJury1, CFSpeech), Func(camera.reparentTo, localAvatar), Func(camera.setPos, localAvatar.getOldCameraPos()), Func(camera.setHpr, 0, 0, 0), Func(self.releaseToons, 1))
bossTrack.append(Func(self.getGeomNode().setH, 180))
track, hpr = self.rollBossToPoint(startPos, None, battlePos, None, 0)
bossTrack.append(track)
track, hpr = self.rollBossToPoint(battlePos, hpr, battlePos, battleHpr, 0)
self.makeToonsWait()
finalPodiumPos = Point3(self.podium.getX(), self.podium.getY(), self.podium.getZ() + ToontownGlobals.LawbotBossBattleTwoPosHpr[2])
finalReflectedPodiumPos = Point3(self.reflectedPodium.getX(), self.reflectedPodium.getY(), self.reflectedPodium.getZ() + ToontownGlobals.LawbotBossBattleTwoPosHpr[2])
return Sequence(chatTrack, bossTrack, Func(self.getGeomNode().setH, 0), Parallel(self.podium.posInterval(5.0, finalPodiumPos), self.reflectedPodium.posInterval(5.0, finalReflectedPodiumPos), Func(self.stashBoss), self.posInterval(5.0, battlePos), Func(taskMgr.doMethodLater, 0.01, self.unstashBoss, 'unstashBoss')), name=self.uniqueName('BattleTwoMovie'))
def __makeRollToBattleThreeMovie(self):
startPos = Point3(ToontownGlobals.LawbotBossBattleTwoPosHpr[0], ToontownGlobals.LawbotBossBattleTwoPosHpr[1], ToontownGlobals.LawbotBossBattleTwoPosHpr[2])
battlePos = Point3(ToontownGlobals.LawbotBossBattleThreePosHpr[0], ToontownGlobals.LawbotBossBattleThreePosHpr[1], ToontownGlobals.LawbotBossBattleThreePosHpr[2])
battleHpr = VBase3(ToontownGlobals.LawbotBossBattleThreePosHpr[3], ToontownGlobals.LawbotBossBattleThreePosHpr[4], ToontownGlobals.LawbotBossBattleThreePosHpr[5])
bossTrack = Sequence()
myInterval = camera.posHprInterval(8, Point3(-22, -100, 35), Point3(-10, -13, 0), startPos=Point3(-22, -90, 35), startHpr=Point3(-10, -13, 0), blendType='easeInOut')
chatTrack = Sequence(Func(self.setChatAbsolute, TTLocalizer.LawbotBossTrialChat1, CFSpeech), Func(camera.reparentTo, localAvatar), Func(camera.setPos, localAvatar.getOldCameraPos()), Func(camera.setHpr, 0, 0, 0), Func(self.releaseToons, 1))
bossTrack.append(Func(self.getGeomNode().setH, 180))
bossTrack.append(Func(self.loop, 'Ff_neutral'))
track, hpr = self.rollBossToPoint(startPos, None, battlePos, None, 0)
bossTrack.append(track)
track, hpr = self.rollBossToPoint(battlePos, hpr, battlePos, battleHpr, 0)
self.makeToonsWait()
return Sequence(chatTrack, bossTrack, Func(self.getGeomNode().setH, 0), name=self.uniqueName('BattleTwoMovie'))
def toNeutralMode(self):
if self.cr:
place = self.cr.playGame.getPlace()
if place and hasattr(place, 'fsm'):
place.setState('waitForBattle')
def makeToonsWait(self):
self.notify.debug('makeToonsWait')
for toonId in self.involvedToons:
toon = self.cr.doId2do.get(toonId)
if toon:
toon.stopLookAround()
toon.stopSmooth()
if self.hasLocalToon():
self.toMovieMode()
for toonId in self.involvedToons:
toon = self.cr.doId2do.get(toonId)
if toon:
toon.loop('neutral')
def makeEndOfBattleMovie(self, hasLocalToon):
name = self.uniqueName('Drop')
seq = Sequence(name=name)
seq += [Wait(0.0)]
if hasLocalToon:
seq += [Func(self.show),
Func(camera.reparentTo, localAvatar),
Func(camera.setPos, localAvatar.getOldCameraPos()),
Func(camera.setHpr, 0, 0, 0)]
seq.append(Func(self.setChatAbsolute, TTLocalizer.LawbotBossPassExam, CFSpeech))
seq.append(Wait(5.0))
seq.append(Func(self.clearChat))
return seq
def __makeBossDamageMovie(self):
self.notify.debug('---- __makeBossDamageMovie')
startPos = Point3(ToontownGlobals.LawbotBossBattleThreePosHpr[0], ToontownGlobals.LawbotBossBattleThreePosHpr[1], ToontownGlobals.LawbotBossBattleThreePosHpr[2])
startHpr = Point3(*ToontownGlobals.LawbotBossBattleThreeHpr)
bottomPos = Point3(*ToontownGlobals.LawbotBossBottomPos)
deathPos = Point3(*ToontownGlobals.LawbotBossDeathPos)
self.setPosHpr(startPos, startHpr)
bossTrack = Sequence()
bossTrack.append(Func(self.loop, 'Ff_neutral'))
track, hpr = self.rollBossToPoint(startPos, startHpr, bottomPos, None, 1)
bossTrack.append(track)
track, hpr = self.rollBossToPoint(bottomPos, startHpr, deathPos, None, 1)
bossTrack.append(track)
duration = bossTrack.getDuration()
return bossTrack
def __showOnscreenMessage(self, text):
self.notify.debug('----- __showOnscreenmessage')
if self.onscreenMessage:
self.onscreenMessage.destroy()
self.onscreenMessage = None
self.onscreenMessage = DirectLabel(text=text, text_fg=VBase4(1, 1, 1, 1), text_align=TextNode.ACenter, relief=None, pos=(0, 0, 0.35), scale=0.1)
return
def __clearOnscreenMessage(self):
if self.onscreenMessage:
self.onscreenMessage.destroy()
self.onscreenMessage = None
return
def __showWaitingMessage(self, task):
self.notify.debug('----- __showWaitingMessage')
self.__showOnscreenMessage(TTLocalizer.BuildingWaitingForVictors)
def loadEnvironment(self):
self.notify.debug('----- loadEnvironment')
DistributedBossCog.DistributedBossCog.loadEnvironment(self)
self.geom = loader.loadModel('phase_11/models/lawbotHQ/LawbotCourtroom3')
self.geom.setPos(0, 0, -71.601)
self.geom.setScale(1)
self.elevatorEntrance = self.geom.find('**/elevator_origin')
self.elevatorEntrance.getChildren().detach()
self.elevatorEntrance.setScale(1)
elevatorModel = loader.loadModel('phase_11/models/lawbotHQ/LB_Elevator')
elevatorModel.reparentTo(self.elevatorEntrance)
self.setupElevator(elevatorModel)
self.promotionMusic = base.loader.loadMusic('phase_7/audio/bgm/encntr_suit_winning_indoor.ogg')
self.betweenBattleMusic = base.loader.loadMusic('phase_9/audio/bgm/encntr_toon_winning.ogg')
self.battleTwoMusic = base.loader.loadMusic('phase_11/audio/bgm/LB_juryBG.ogg')
floor = self.geom.find('**/MidVaultFloor1')
if floor.isEmpty():
floor = self.geom.find('**/CR3_Floor')
self.evFloor = self.replaceCollisionPolysWithPlanes(floor)
self.evFloor.reparentTo(self.geom)
self.evFloor.setName('floor')
plane = CollisionPlane(Plane(Vec3(0, 0, 1), Point3(0, 0, -50)))
planeNode = CollisionNode('dropPlane')
planeNode.addSolid(plane)
planeNode.setCollideMask(ToontownGlobals.PieBitmask)
self.geom.attachNewNode(planeNode)
self.door3 = self.geom.find('**/SlidingDoor1/')
if self.door3.isEmpty():
self.door3 = self.geom.find('**/interior/CR3_Door')
self.mainDoor = self.geom.find('**/Door_1')
if not self.mainDoor.isEmpty():
itemsToHide = ['interior/Door_1']
for str in itemsToHide:
stuffToHide = self.geom.find('**/%s' % str)
if not stuffToHide.isEmpty():
self.notify.debug('found %s' % stuffToHide)
stuffToHide.wrtReparentTo(self.mainDoor)
else:
self.notify.debug('not found %s' % stuffToHide)
self.reflectedMainDoor = self.geom.find('**/interiorrefl/CR3_Door')
if not self.reflectedMainDoor.isEmpty():
itemsToHide = ['Reflections/Door_1']
for str in itemsToHide:
stuffToHide = self.geom.find('**/%s' % str)
if not stuffToHide.isEmpty():
self.notify.debug('found %s' % stuffToHide)
stuffToHide.wrtReparentTo(self.reflectedMainDoor)
else:
self.notify.debug('not found %s' % stuffToHide)
self.geom.reparentTo(render)
self.loadWitnessStand()
self.loadScale()
self.scaleNodePath.stash()
self.loadJuryBox()
self.loadPodium()
ug = self.geom.find('**/Reflections')
ug.setBin('ground', -10)
def loadJuryBox(self):
self.juryBox = self.geom.find('**/JuryBox')
juryBoxPos = self.juryBox.getPos()
newPos = juryBoxPos - Point3(*ToontownGlobals.LawbotBossJuryBoxRelativeEndPos)
if not self.debugPositions:
self.juryBox.setPos(newPos)
self.reflectedJuryBox = self.geom.find('**/JuryBox_Geo_Reflect')
reflectedJuryBoxPos = self.reflectedJuryBox.getPos()
newReflectedPos = reflectedJuryBoxPos - Point3(*ToontownGlobals.LawbotBossJuryBoxRelativeEndPos)
if not self.debugPositions:
self.reflectedJuryBox.setPos(newReflectedPos)
if not self.reflectedJuryBox.isEmpty():
if self.debugPositions:
self.reflectedJuryBox.show()
self.reflectedJuryBox.setZ(self.reflectedJuryBox.getZ() + ToontownGlobals.LawbotBossJuryBoxRelativeEndPos[2])
def loadPodium(self):
self.podium = self.geom.find('**/Podium')
newZ = self.podium.getZ() - ToontownGlobals.LawbotBossBattleTwoPosHpr[2]
if not self.debugPositions:
self.podium.setZ(newZ)
self.reflectedPodium = self.geom.find('**/Podium_Geo1_Refl')
reflectedZ = self.reflectedPodium.getZ()
if not self.debugPositions:
self.reflectedPodium.setZ(reflectedZ)
if not self.reflectedPodium.isEmpty():
if self.debugPositions:
self.reflectedPodium.show()
def loadCannons(self):
pass
def loadWitnessStand(self):
self.realWitnessStand = self.geom.find('**/WitnessStand')
if not self.realWitnessStand.isEmpty():
pass
self.reflectedWitnessStand = self.geom.find('**/Witnessstand_Geo_Reflect')
if not self.reflectedWitnessStand.isEmpty():
pass
colNode = self.realWitnessStand.find('**/witnessStandCollisions/Witnessstand_Collision')
colNode.setName('WitnessStand')
def loadScale(self):
self.useProgrammerScale = base.config.GetBool('want-injustice-scale-debug', 0)
if self.useProgrammerScale:
self.loadScaleOld()
else:
self.loadScaleNew()
def __debugScale(self):
prosecutionPanPos = self.prosecutionPanNodePath.getPos()
origin = Point3(0, 0, 0)
prosecutionPanRelPos = self.scaleNodePath.getRelativePoint(self.prosecutionPanNodePath, origin)
panRenderPos = render.getRelativePoint(self.prosecutionPanNodePath, origin)
self.notify.debug('prosecutionPanPos = %s' % prosecutionPanPos)
self.notify.debug('prosecutionPanRelPos = %s' % prosecutionPanRelPos)
self.notify.debug('panRenderPos = %s' % panRenderPos)
prosecutionLocatorPos = self.prosecutionLocator.getPos()
prosecutionLocatorRelPos = self.scaleNodePath.getRelativePoint(self.prosecutionLocator, origin)
locatorRenderPos = render.getRelativePoint(self.prosecutionLocator, origin)
self.notify.debug('prosecutionLocatorPos = %s ' % prosecutionLocatorPos)
self.notify.debug('prosecutionLocatorRelPos = %s ' % prosecutionLocatorRelPos)
self.notify.debug('locatorRenderPos = %s' % locatorRenderPos)
beamPos = self.beamNodePath.getPos()
beamRelPos = self.scaleNodePath.getRelativePoint(self.beamNodePath, origin)
beamRenderPos = render.getRelativePoint(self.beamNodePath, origin)
self.notify.debug('beamPos = %s' % beamPos)
self.notify.debug('beamRelPos = %s' % beamRelPos)
self.notify.debug('beamRenderPos = %s' % beamRenderPos)
beamBoundsCenter = self.beamNodePath.getBounds().getCenter()
self.notify.debug('beamBoundsCenter = %s' % beamBoundsCenter)
beamLocatorBounds = self.beamLocator.getBounds()
beamLocatorPos = beamLocatorBounds.getCenter()
self.notify.debug('beamLocatorPos = %s' % beamLocatorPos)
def loadScaleNew(self):
self.scaleNodePath = loader.loadModel('phase_11/models/lawbotHQ/scale')
self.beamNodePath = self.scaleNodePath.find('**/scaleBeam')
self.defensePanNodePath = self.scaleNodePath.find('**/defensePan')
self.prosecutionPanNodePath = self.scaleNodePath.find('**/prosecutionPan')
self.defenseColNodePath = self.scaleNodePath.find('**/DefenseCol')
self.defenseColNodePath.setTag('pieCode', str(ToontownGlobals.PieCodeDefensePan))
self.prosecutionColNodePath = self.scaleNodePath.find('**/ProsecutionCol')
self.prosecutionColNodePath.setTag('pieCode', str(ToontownGlobals.PieCodeProsecutionPan))
self.standNodePath = self.scaleNodePath.find('**/scaleStand')
self.scaleNodePath.setPosHpr(*ToontownGlobals.LawbotBossInjusticePosHpr)
self.defenseLocator = self.scaleNodePath.find('**/DefenseLocator')
defenseLocBounds = self.defenseLocator.getBounds()
defenseLocPos = defenseLocBounds.getCenter()
self.notify.debug('defenseLocatorPos = %s' % defenseLocPos)
self.defensePanNodePath.setPos(defenseLocPos)
self.defensePanNodePath.reparentTo(self.beamNodePath)
self.notify.debug('defensePanNodePath.getPos()=%s' % self.defensePanNodePath.getPos())
self.prosecutionLocator = self.scaleNodePath.find('**/ProsecutionLocator')
prosecutionLocBounds = self.prosecutionLocator.getBounds()
prosecutionLocPos = prosecutionLocBounds.getCenter()
self.notify.debug('prosecutionLocatorPos = %s' % prosecutionLocPos)
self.prosecutionPanNodePath.setPos(prosecutionLocPos)
self.prosecutionPanNodePath.reparentTo(self.beamNodePath)
self.beamLocator = self.scaleNodePath.find('**/StandLocator1')
beamLocatorBounds = self.beamLocator.getBounds()
beamLocatorPos = beamLocatorBounds.getCenter()
negBeamLocatorPos = -beamLocatorPos
self.notify.debug('beamLocatorPos = %s' % beamLocatorPos)
self.notify.debug('negBeamLocatorPos = %s' % negBeamLocatorPos)
self.beamNodePath.setPos(beamLocatorPos)
self.scaleNodePath.setScale(*ToontownGlobals.LawbotBossInjusticeScale)
self.scaleNodePath.wrtReparentTo(self.geom)
self.baseHighCol = self.scaleNodePath.find('**/BaseHighCol')
oldBitMask = self.baseHighCol.getCollideMask()
newBitMask = oldBitMask & ~ToontownGlobals.PieBitmask
newBitMask = newBitMask & ~ToontownGlobals.CameraBitmask
self.baseHighCol.setCollideMask(newBitMask)
self.defenseHighCol = self.scaleNodePath.find('**/DefenseHighCol')
self.defenseHighCol.stash()
self.defenseHighCol.setCollideMask(newBitMask)
self.baseTopCol = self.scaleNodePath.find('**/Scale_base_top_collision')
self.baseSideCol = self.scaleNodePath.find('**/Scale_base_side_col')
self.defenseLocator.hide()
self.prosecutionLocator.hide()
self.beamLocator.hide()
def loadScaleOld(self):
startingTilt = 0
self.scaleNodePath = NodePath('injusticeScale')
beamGeom = self.createBlock(0.25, 2, 0.125, -0.25, -2, -0.125, 0, 1.0, 0, 1.0)
self.beamNodePath = NodePath('scaleBeam')
self.beamNodePath.attachNewNode(beamGeom)
self.beamNodePath.setPos(0, 0, 3)
self.beamNodePath.reparentTo(self.scaleNodePath)
defensePanGeom = self.createBlock(0.5, 0.5, 0, -0.5, -0.5, -2, 0, 0, 1.0, 0.25)
self.defensePanNodePath = NodePath('defensePan')
self.defensePanNodePath.attachNewNode(defensePanGeom)
self.defensePanNodePath.setPos(0, -2, 0)
self.defensePanNodePath.reparentTo(self.beamNodePath)
defenseTube = CollisionTube(0, 0, -0.5, 0, 0, -1.5, 0.6)
defenseTube.setTangible(1)
defenseCollNode = CollisionNode('DefenseCol')
defenseCollNode.addSolid(defenseTube)
self.defenseColNodePath = self.defensePanNodePath.attachNewNode(defenseCollNode)
self.defenseColNodePath.setTag('pieCode', str(ToontownGlobals.PieCodeDefensePan))
prosecutionPanGeom = self.createBlock(0.5, 0.5, 0, -0.5, -0.5, -2, 1.0, 0, 0, 1.0)
self.prosecutionPanNodePath = NodePath('prosecutionPan')
self.prosecutionPanNodePath.attachNewNode(prosecutionPanGeom)
self.prosecutionPanNodePath.setPos(0, 2, 0)
self.prosecutionPanNodePath.reparentTo(self.beamNodePath)
prosecutionTube = CollisionTube(0, 0, -0.5, 0, 0, -1.5, 0.6)
prosecutionTube.setTangible(1)
prosecutionCollNode = CollisionNode(self.uniqueName('ProsecutionCol'))
prosecutionCollNode.addSolid(prosecutionTube)
self.prosecutionColNodePath = self.prosecutionPanNodePath.attachNewNode(prosecutionCollNode)
self.prosecutionColNodePath.setTag('pieCode', str(ToontownGlobals.PieCodeProsecutionPan))
standGeom = self.createBlock(0.25, 0.25, 0, -0.25, -0.25, 3)
self.standNodePath = NodePath('scaleStand')
self.standNodePath.attachNewNode(standGeom)
self.standNodePath.reparentTo(self.scaleNodePath)
self.scaleNodePath.setPosHpr(*ToontownGlobals.LawbotBossInjusticePosHpr)
self.scaleNodePath.setScale(5.0)
self.scaleNodePath.wrtReparentTo(self.geom)
self.setScaleTilt(startingTilt)
def setScaleTilt(self, tilt):
self.beamNodePath.setP(tilt)
if self.useProgrammerScale:
self.defensePanNodePath.setP(-tilt)
self.prosecutionPanNodePath.setP(-tilt)
else:
self.defensePanNodePath.setP(-tilt)
self.prosecutionPanNodePath.setP(-tilt)
def stashBaseCol(self):
if not self.baseColStashed:
self.notify.debug('stashBaseCol')
self.baseTopCol.stash()
self.baseSideCol.stash()
self.baseColStashed = True
def unstashBaseCol(self):
if self.baseColStashed:
self.notify.debug('unstashBaseCol')
self.baseTopCol.unstash()
self.baseSideCol.unstash()
self.baseColStashed = False
def makeScaleReflectDamage(self):
diffDamage = self.bossDamage - ToontownGlobals.LawbotBossInitialDamage
diffDamage *= 1.0
if diffDamage >= 0:
percentDamaged = diffDamage / (ToontownGlobals.LawbotBossMaxDamage - ToontownGlobals.LawbotBossInitialDamage)
tilt = percentDamaged * ToontownGlobals.LawbotBossWinningTilt
else:
percentDamaged = diffDamage / (ToontownGlobals.LawbotBossInitialDamage - 0)
tilt = percentDamaged * ToontownGlobals.LawbotBossWinningTilt
self.setScaleTilt(tilt)
if self.bossDamage < ToontownGlobals.LawbotBossMaxDamage * 0.85:
self.unstashBaseCol()
else:
self.stashBaseCol()
def unloadEnvironment(self):
self.notify.debug('----- unloadEnvironment')
DistributedBossCog.DistributedBossCog.unloadEnvironment(self)
self.geom.removeNode()
del self.geom
def __loadMopaths(self):
self.notify.debug('----- __loadMopaths')
self.toonsEnterA = Mopath.Mopath()
self.toonsEnterA.loadFile('phase_9/paths/bossBattle-toonsEnterA')
self.toonsEnterA.fFaceForward = 1
self.toonsEnterA.timeScale = 35
self.toonsEnterB = Mopath.Mopath()
self.toonsEnterB.loadFile('phase_9/paths/bossBattle-toonsEnterB')
self.toonsEnterB.fFaceForward = 1
self.toonsEnterB.timeScale = 35
def __unloadMopaths(self):
self.notify.debug('----- __unloadMopaths')
self.toonsEnterA.reset()
self.toonsEnterB.reset()
def enterOff(self):
self.notify.debug('----- enterOff')
DistributedBossCog.DistributedBossCog.enterOff(self)
if self.witnessToon:
self.witnessToon.clearChat()
def enterWaitForToons(self):
self.notify.debug('----- enterWaitForToons')
DistributedBossCog.DistributedBossCog.enterWaitForToons(self)
self.geom.hide()
self.witnessToon.removeActive()
def exitWaitForToons(self):
self.notify.debug('----- exitWaitForToons')
DistributedBossCog.DistributedBossCog.exitWaitForToons(self)
self.geom.show()
self.witnessToon.addActive()
def enterElevator(self):
self.notify.debug('----- enterElevator')
DistributedBossCog.DistributedBossCog.enterElevator(self)
self.witnessToon.removeActive()
self.reparentTo(render)
self.setPosHpr(*ToontownGlobals.LawbotBossBattleOnePosHpr)
self.happy = 1
self.raised = 1
self.forward = 1
self.doAnimate()
self.__hideWitnessToon()
if not self.mainDoor.isEmpty():
self.mainDoor.stash()
if not self.reflectedMainDoor.isEmpty():
self.reflectedMainDoor.stash()
camera.reparentTo(self.elevatorModel)
camera.setPosHpr(0, 30, 8, 180, 0, 0)
def exitElevator(self):
self.notify.debug('----- exitElevator')
DistributedBossCog.DistributedBossCog.exitElevator(self)
self.witnessToon.removeActive()
def enterIntroduction(self):
self.notify.debug('----- enterIntroduction')
self.reparentTo(render)
self.setPosHpr(*ToontownGlobals.LawbotBossBattleOnePosHpr)
self.stopAnimate()
self.__hideWitnessToon()
DistributedBossCog.DistributedBossCog.enterIntroduction(self)
base.playMusic(self.promotionMusic, looping=1, volume=0.9)
if not self.mainDoor.isEmpty():
self.mainDoor.stash()
if not self.reflectedMainDoor.isEmpty():
self.reflectedMainDoor.stash()
def exitIntroduction(self):
self.notify.debug('----- exitIntroduction')
DistributedBossCog.DistributedBossCog.exitIntroduction(self)
self.promotionMusic.stop()
if not self.mainDoor.isEmpty():
pass
if not self.reflectedMainDoor.isEmpty():
self.reflectedMainDoor.unstash()
if not self.elevatorEntrance.isEmpty():
pass
def enterBattleOne(self):
self.notify.debug('----- LawbotBoss.enterBattleOne ')
DistributedBossCog.DistributedBossCog.enterBattleOne(self)
self.reparentTo(render)
self.setPosHpr(*ToontownGlobals.LawbotBossBattleOnePosHpr)
self.clearChat()
self.loop('Ff_neutral')
self.notify.debug('self.battleANode = %s' % self.battleANode)
self.__hideWitnessToon()
if self.battleA == None or self.battleB == None:
pass
return
def exitBattleOne(self):
self.notify.debug('----- exitBattleOne')
DistributedBossCog.DistributedBossCog.exitBattleOne(self)
def stashBoss(self):
self.stash()
def unstashBoss(self, task):
self.unstash()
self.reparentTo(render)
def enterRollToBattleTwo(self):
self.notify.debug('----- enterRollToBattleTwo')
self.releaseToons(finalBattle=1)
self.stashBoss()
self.toonsToBattlePosition(self.involvedToons, self.battleANode)
self.stickBossToFloor()
intervalName = 'RollToBattleTwo'
seq = Sequence(self.__makeRollToBattleTwoMovie(), Func(self.__onToPrepareBattleTwo), name=intervalName)
seq.start()
self.storeInterval(seq, intervalName)
base.playMusic(self.betweenBattleMusic, looping=1, volume=0.9)
taskMgr.doMethodLater(0.01, self.unstashBoss, 'unstashBoss')
def __onToPrepareBattleTwo(self):
self.notify.debug('----- __onToPrepareBattleTwo')
self.unstickBoss()
self.setPosHpr(*ToontownGlobals.LawbotBossBattleTwoPosHpr)
self.doneBarrier('RollToBattleTwo')
def exitRollToBattleTwo(self):
self.notify.debug('----- exitRollToBattleTwo')
self.unstickBoss()
intervalName = 'RollToBattleTwo'
self.clearInterval(intervalName)
self.betweenBattleMusic.stop()
def enterPrepareBattleTwo(self):
self.notify.debug('----- enterPrepareBattleTwo')
self.cleanupIntervals()
self.controlToons()
self.setToonsToNeutral(self.involvedToons)
self.clearChat()
self.reparentTo(render)
self.__showWitnessToon()
prepareBattleTwoMovie = self.__makePrepareBattleTwoMovie()
intervalName = 'prepareBattleTwo'
seq = Sequence(prepareBattleTwoMovie, name=intervalName)
seq.start()
self.storeInterval(seq, intervalName)
self.acceptOnce('doneChatPage', self.__showCannonsAppearing)
base.playMusic(self.stingMusic, looping=0, volume=1.0)
def __showCannonsAppearing(self, elapsedTime = 0):
allCannonsAppear = Sequence(Func(self.__positionToonsInFrontOfCannons), Func(camera.reparentTo, localAvatar), Func(camera.setPos, localAvatar.getOldCameraPosTwo()), Func(camera.lookAt, localAvatar))
multiCannons = Parallel()
index = 0
self.involvedToons.sort()
for toonId in self.involvedToons:
toon = self.cr.doId2do.get(toonId)
if toon:
if index in self.cannons:
cannon = self.cannons[index]
cannonSeq = cannon.generateCannonAppearTrack(toon)
multiCannons.append(cannonSeq)
index += 1
else:
self.notify.warning('No cannon %d but we have a toon =%d' % (index, toonId))
allCannonsAppear.append(multiCannons)
intervalName = 'prepareBattleTwoCannonsAppear'
seq = Sequence(allCannonsAppear, Func(self.__onToBattleTwo), name=intervalName)
seq.start()
self.storeInterval(seq, intervalName)
def __onToBattleTwo(self, elapsedTime = 0):
self.notify.debug('----- __onToBattleTwo')
self.doneBarrier('PrepareBattleTwo')
taskMgr.doMethodLater(1, self.__showWaitingMessage, self.uniqueName('WaitingMessage'))
def exitPrepareBattleTwo(self):
self.notify.debug('----- exitPrepareBattleTwo')
self.show()
taskMgr.remove(self.uniqueName('WaitingMessage'))
self.ignore('doneChatPage')
self.__clearOnscreenMessage()
self.stingMusic.stop()
def enterBattleTwo(self):
self.notify.debug('----- enterBattleTwo')
self.cleanupIntervals()
mult = ToontownBattleGlobals.getBossBattleCreditMultiplier(2)
localAvatar.inventory.setBattleCreditMultiplier(mult)
self.reparentTo(render)
self.setPosHpr(*ToontownGlobals.LawbotBossBattleTwoPosHpr)
self.clearChat()
self.witnessToon.clearChat()
self.releaseToons(finalBattle=1)
self.__showWitnessToon()
if not self.useCannons:
self.toonsToBattlePosition(self.toonsA, self.battleANode)
self.toonsToBattlePosition(self.toonsB, self.battleBNode)
base.playMusic(self.battleTwoMusic, looping=1, volume=0.9)
self.startJuryBoxMoving()
for index in xrange(len(self.cannons)):
cannon = self.cannons[index]
cannon.cannon.show()
def getChairParent(self):
return self.juryBox
def startJuryBoxMoving(self):
if self.juryBoxIval:
self.juryBoxIval.finish()
self.juryBoxIval = None
self.juryBox.setPos(-30, 0, -12.645)
self.reflectedJuryBox.setPos(-30, 0, 0)
curPos = self.juryBox.getPos()
endingAbsPos = Point3(curPos[0] + ToontownGlobals.LawbotBossJuryBoxRelativeEndPos[0], curPos[1] + ToontownGlobals.LawbotBossJuryBoxRelativeEndPos[1], curPos[2] + ToontownGlobals.LawbotBossJuryBoxRelativeEndPos[2])
curReflectedPos = self.reflectedJuryBox.getPos()
reflectedEndingAbsPos = Point3(curReflectedPos[0] + ToontownGlobals.LawbotBossJuryBoxRelativeEndPos[0], curReflectedPos[1] + ToontownGlobals.LawbotBossJuryBoxRelativeEndPos[1], curReflectedPos[2] + ToontownGlobals.LawbotBossJuryBoxRelativeEndPos[2])
self.juryBoxIval = Parallel(self.juryBox.posInterval(ToontownGlobals.LawbotBossJuryBoxMoveTime, endingAbsPos), self.reflectedJuryBox.posInterval(ToontownGlobals.LawbotBossJuryBoxMoveTime, reflectedEndingAbsPos), SoundInterval(self.juryMovesSfx, node=self.chairs[2].nodePath, duration=ToontownGlobals.LawbotBossJuryBoxMoveTime, loop=1, volume=1.0))
self.juryBoxIval.start()
self.juryTimer = ToontownTimer.ToontownTimer()
self.juryTimer.posInTopRightCorner()
self.juryTimer.countdown(ToontownGlobals.LawbotBossJuryBoxMoveTime)
def exitBattleTwo(self):
self.notify.debug('----- exitBattleTwo')
intervalName = self.uniqueName('Drop')
self.clearInterval(intervalName)
self.cleanupBattles()
self.battleTwoMusic.stop()
localAvatar.inventory.setBattleCreditMultiplier(1)
if self.juryTimer:
self.juryTimer.destroy()
del self.juryTimer
self.juryTimer = None
for chair in self.chairs.values():
chair.stopCogsFlying()
return
def enterRollToBattleThree(self):
self.notify.debug('----- enterRollToBattleThree')
self.reparentTo(render)
self.stickBossToFloor()
intervalName = 'RollToBattleThree'
seq = Sequence(self.__makeRollToBattleThreeMovie(), Func(self.__onToPrepareBattleThree), name=intervalName)
seq.start()
self.storeInterval(seq, intervalName)
base.playMusic(self.betweenBattleMusic, looping=1, volume=0.9)
def __onToPrepareBattleThree(self):
self.notify.debug('----- __onToPrepareBattleThree')
self.unstickBoss()
self.setPosHpr(*ToontownGlobals.LawbotBossBattleThreePosHpr)
self.doneBarrier('RollToBattleThree')
def exitRollToBattleThree(self):
self.notify.debug('----- exitRollToBattleThree')
self.unstickBoss()
intervalName = 'RollToBattleThree'
self.clearInterval(intervalName)
self.betweenBattleMusic.stop()
def enterPrepareBattleThree(self):
self.notify.debug('----- enterPrepareBattleThree')
self.cleanupIntervals()
self.controlToons()
self.setToonsToNeutral(self.involvedToons)
self.clearChat()
self.reparentTo(render)
base.playMusic(self.betweenBattleMusic, looping=1, volume=0.9)
self.__showWitnessToon()
prepareBattleThreeMovie = self.__makePrepareBattleThreeMovie()
self.acceptOnce('doneChatPage', self.__onToBattleThree)
intervalName = 'prepareBattleThree'
seq = Sequence(prepareBattleThreeMovie, name=intervalName)
seq.start()
self.storeInterval(seq, intervalName)
def __onToBattleThree(self, elapsed):
self.notify.debug('----- __onToBattleThree')
self.doneBarrier('PrepareBattleThree')
taskMgr.doMethodLater(1, self.__showWaitingMessage, self.uniqueName('WaitingMessage'))
def exitPrepareBattleThree(self):
self.notify.debug('----- exitPrepareBattleThree')
self.show()
taskMgr.remove(self.uniqueName('WaitingMessage'))
self.ignore('doneChatPage')
intervalName = 'PrepareBattleThree'
self.clearInterval(intervalName)
self.__clearOnscreenMessage()
self.betweenBattleMusic.stop()
def enterBattleThree(self):
DistributedBossCog.DistributedBossCog.enterBattleThree(self)
self.scaleNodePath.unstash()
localAvatar.setPos(-3, 0, 0)
base.localAvatar.orbitalCamera.start()
self.clearChat()
self.witnessToon.clearChat()
self.reparentTo(render)
self.happy = 1
self.raised = 1
self.forward = 1
self.doAnimate()
self.accept('enterWitnessStand', self.__touchedWitnessStand)
self.accept('pieSplat', self.__pieSplat)
self.accept('localPieSplat', self.__localPieSplat)
self.accept('outOfPies', self.__outOfPies)
self.accept('begin-pie', self.__foundPieButton)
self.accept('enterDefenseCol', self.__enterDefenseCol)
self.accept('enterProsecutionCol', self.__enterProsecutionCol)
localAvatar.setCameraFov(ToontownGlobals.BossBattleCameraFov)
taskMgr.doMethodLater(30, self.__howToGetPies, self.uniqueName('PieAdvice'))
self.stickBossToFloor()
self.setPosHpr(*ToontownGlobals.LawbotBossBattleThreePosHpr)
self.bossMaxDamage = ToontownGlobals.LawbotBossMaxDamage
base.playMusic(self.battleThreeMusic, looping=1, volume=0.9)
self.__showWitnessToon()
diffSettings = ToontownGlobals.LawbotBossDifficultySettings[self.battleDifficulty]
self.bossHealthBar.initialize(self.bossMaxDamage - self.bossDamage, self.bossMaxDamage)
if diffSettings[4]:
localAvatar.chatMgr.chatInputSpeedChat.removeCJMenu()
localAvatar.chatMgr.chatInputSpeedChat.addCJMenu(self.bonusWeight)
def __doneBattleThree(self):
self.notify.debug('----- __doneBattleThree')
self.setState('NearVictory')
self.unstickBoss()
def exitBattleThree(self):
self.notify.debug('----- exitBattleThree')
DistributedBossCog.DistributedBossCog.exitBattleThree(self)
NametagGlobals.setMasterArrowsOn(1)
bossDoneEventName = self.uniqueName('DestroyedBoss')
self.ignore(bossDoneEventName)
taskMgr.remove(self.uniqueName('StandUp'))
self.ignore('enterWitnessStand')
self.ignore('pieSplat')
self.ignore('localPieSplat')
self.ignore('outOfPies')
self.ignore('begin-pie')
self.ignore('enterDefenseCol')
self.ignore('enterProsecutionCol')
self.__clearOnscreenMessage()
taskMgr.remove(self.uniqueName('PieAdvice'))
localAvatar.setCameraFov(ToontownGlobals.CogHQCameraFov)
if self.bossDamageMovie:
self.bossDamageMovie.finish()
self.bossDamageMovie = None
self.unstickBoss()
taskName = 'RecoverBossDamage'
taskMgr.remove(taskName)
self.battleThreeMusicTime = self.battleThreeMusic.getTime()
self.battleThreeMusic.stop()
return
def enterNearVictory(self):
self.cleanupIntervals()
self.reparentTo(render)
self.setPos(*ToontownGlobals.LawbotBossDeathPos)
self.setHpr(*ToontownGlobals.LawbotBossBattleThreeHpr)
self.clearChat()
self.releaseToons(finalBattle=1)
self.accept('pieSplat', self.__finalPieSplat)
self.accept('localPieSplat', self.__localPieSplat)
self.accept('outOfPies', self.__outOfPies)
localAvatar.setCameraFov(ToontownGlobals.BossBattleCameraFov)
self.happy = 0
self.raised = 0
self.forward = 1
self.doAnimate()
self.setDizzy(1)
base.playMusic(self.battleThreeMusic, looping=1, volume=0.9, time=self.battleThreeMusicTime)
def exitNearVictory(self):
self.notify.debug('----- exitNearVictory')
self.ignore('pieSplat')
self.ignore('localPieSplat')
self.ignore('outOfPies')
self.__clearOnscreenMessage()
taskMgr.remove(self.uniqueName('PieAdvice'))
localAvatar.setCameraFov(ToontownGlobals.CogHQCameraFov)
self.setDizzy(0)
self.battleThreeMusicTime = self.battleThreeMusic.getTime()
self.battleThreeMusic.stop()
def enterVictory(self):
self.notify.debug('----- enterVictory')
self.cleanupIntervals()
self.reparentTo(render)
self.setPosHpr(*ToontownGlobals.LawbotBossBattleThreePosHpr)
self.loop('neutral')
localAvatar.setCameraFov(ToontownGlobals.BossBattleCameraFov)
self.clearChat()
self.witnessToon.clearChat()
self.controlToons()
self.setToonsToNeutral(self.involvedToons)
self.happy = 1
self.raised = 1
self.forward = 1
intervalName = 'VictoryMovie'
seq = Sequence(self.makeVictoryMovie(), Func(self.__continueVictory), name=intervalName)
seq.start()
self.storeInterval(seq, intervalName)
self.bossHealthBar.deinitialize()
base.playMusic(self.battleThreeMusic, looping=1, volume=0.9, time=self.battleThreeMusicTime)
def __continueVictory(self):
self.notify.debug('----- __continueVictory')
self.stopAnimate()
self.doneBarrier('Victory')
def exitVictory(self):
self.notify.debug('----- exitVictory')
self.stopAnimate()
self.unstash()
localAvatar.setCameraFov(ToontownGlobals.CogHQCameraFov)
self.battleThreeMusicTime = self.battleThreeMusic.getTime()
self.battleThreeMusic.stop()
def enterDefeat(self):
self.notify.debug('----- enterDefeat')
self.cleanupIntervals()
localAvatar.setCameraFov(ToontownGlobals.BossBattleCameraFov)
self.reparentTo(render)
self.clearChat()
self.releaseToons(finalBattle=1)
self.happy = 0
self.raised = 0
self.forward = 1
intervalName = 'DefeatMovie'
seq = Sequence(self.makeDefeatMovie(), Func(self.__continueDefeat), name=intervalName)
seq.start()
self.storeInterval(seq, intervalName)
base.playMusic(self.battleThreeMusic, looping=1, volume=0.9, time=self.battleThreeMusicTime)
def __continueDefeat(self):
self.notify.debug('----- __continueDefeat')
self.stopAnimate()
self.doneBarrier('Defeat')
def exitDefeat(self):
self.notify.debug('----- exitDefeat')
self.stopAnimate()
self.unstash()
localAvatar.setCameraFov(ToontownGlobals.CogHQCameraFov)
self.battleThreeMusicTime = self.battleThreeMusic.getTime()
self.battleThreeMusic.stop()
def enterReward(self):
self.cleanupIntervals()
self.clearChat()
self.witnessToon.clearChat()
self.stash()
self.stopAnimate()
self.controlToons()
panelName = self.uniqueName('reward')
self.rewardPanel = RewardPanel.RewardPanel(panelName)
victory, camVictory, skipper = MovieToonVictory.doToonVictory(1, self.involvedToons, self.toonRewardIds, self.toonRewardDicts, self.deathList, self.rewardPanel, allowGroupShot=0, uberList=self.uberList, noSkip=True)
ival = Sequence(Parallel(victory, camVictory), Func(self.__doneReward))
intervalName = 'RewardMovie'
delayDeletes = []
for toonId in self.involvedToons:
toon = self.cr.doId2do.get(toonId)
if toon:
delayDeletes.append(DelayDelete.DelayDelete(toon, 'LawbotBoss.enterReward'))
ival.delayDeletes = delayDeletes
ival.start()
self.storeInterval(ival, intervalName)
base.playMusic(self.battleThreeMusic, looping=1, volume=0.9, time=self.battleThreeMusicTime)
def __doneReward(self):
self.notify.debug('----- __doneReward')
self.doneBarrier('Reward')
self.toWalkMode()
def exitReward(self):
self.notify.debug('----- exitReward')
intervalName = 'RewardMovie'
self.clearInterval(intervalName)
self.unstash()
self.rewardPanel.destroy()
del self.rewardPanel
self.battleThreeMusicTime = 0
self.battleThreeMusic.stop()
def enterEpilogue(self):
self.cleanupIntervals()
self.clearChat()
self.witnessToon.clearChat()
self.stash()
self.stopAnimate()
self.controlToons()
self.__showWitnessToon()
self.witnessToon.reparentTo(render)
self.witnessToon.setPosHpr(*ToontownGlobals.LawbotBossWitnessEpiloguePosHpr)
self.witnessToon.loop('Sit')
self.__arrangeToonsAroundWitnessToon()
camera.reparentTo(render)
camera.setPos(self.witnessToon, -9, 12, 6)
camera.lookAt(self.witnessToon, 0, 0, 3)
intervalName = 'EpilogueMovie'
seq = Sequence(self.makeEpilogueMovie(), name=intervalName)
seq.start()
self.storeInterval(seq, intervalName)
self.accept('doneChatPage', self.__doneEpilogue)
base.playMusic(self.epilogueMusic, looping=1, volume=0.9)
def __doneEpilogue(self, elapsedTime = 0):
self.notify.debug('----- __doneEpilogue')
intervalName = 'EpilogueMovieToonAnim'
self.clearInterval(intervalName)
track = Parallel(Sequence(Wait(0.5), Func(self.localToonToSafeZone)))
self.storeInterval(track, intervalName)
track.start()
def exitEpilogue(self):
self.notify.debug('----- exitEpilogue')
self.clearInterval('EpilogueMovieToonAnim')
self.unstash()
self.epilogueMusic.stop()
def enterFrolic(self):
self.notify.debug('----- enterFrolic')
self.setPosHpr(*ToontownGlobals.LawbotBossBattleOnePosHpr)
DistributedBossCog.DistributedBossCog.enterFrolic(self)
self.show()
def doorACallback(self, isOpen):
if self.insidesANodePath:
if isOpen:
self.insidesANodePath.unstash()
else:
self.insidesANodePath.stash()
def doorBCallback(self, isOpen):
if self.insidesBNodePath:
if isOpen:
self.insidesBNodePath.unstash()
else:
self.insidesBNodePath.stash()
def __toonsToPromotionPosition(self, toonIds, battleNode):
self.notify.debug('----- __toonsToPromotionPosition')
points = BattleBase.BattleBase.toonPoints[len(toonIds) - 1]
for i in xrange(len(toonIds)):
toon = base.cr.doId2do.get(toonIds[i])
if toon:
toon.reparentTo(render)
pos, h = points[i]
toon.setPosHpr(battleNode, pos[0], pos[1] + 10, pos[2], h, 0, 0)
def __outOfPies(self):
self.notify.debug('----- outOfPies')
self.__showOnscreenMessage(TTLocalizer.LawbotBossNeedMoreEvidence)
taskMgr.doMethodLater(20, self.__howToGetPies, self.uniqueName('PieAdvice'))
def __howToGetPies(self, task):
self.notify.debug('----- __howToGetPies')
self.__showOnscreenMessage(TTLocalizer.LawbotBossHowToGetEvidence)
def __howToThrowPies(self, task):
self.notify.debug('----- __howToThrowPies')
self.__showOnscreenMessage(TTLocalizer.LawbotBossHowToThrowPies)
def __foundPieButton(self):
self.everThrownPie = 1
self.__clearOnscreenMessage()
taskMgr.remove(self.uniqueName('PieAdvice'))
def __touchedWitnessStand(self, entry):
self.sendUpdate('touchWitnessStand', [])
self.__clearOnscreenMessage()
taskMgr.remove(self.uniqueName('PieAdvice'))
base.playSfx(self.piesRestockSfx)
if not self.everThrownPie:
taskMgr.doMethodLater(30, self.__howToThrowPies, self.uniqueName('PieAdvice'))
def __pieSplat(self, toon, pieCode):
if pieCode == ToontownGlobals.PieCodeBossInsides:
if toon == localAvatar:
self.d_hitBossInsides()
self.flashRed()
elif pieCode == ToontownGlobals.PieCodeBossCog:
if toon == localAvatar:
self.d_hitBoss(1)
if self.dizzy:
self.flashRed()
self.doAnimate('hit', now=1)
elif pieCode == ToontownGlobals.PieCodeDefensePan:
self.flashRed()
self.flashPanBlue()
base.playSfx(self.evidenceHitSfx, node=self.defensePanNodePath, volume=0.25)
if toon == localAvatar:
self.d_hitBoss(self.panDamage)
elif pieCode == ToontownGlobals.PieCodeProsecutionPan:
self.flashGreen()
if toon == localAvatar:
pass
elif pieCode == ToontownGlobals.PieCodeLawyer:
pass
def __localPieSplat(self, pieCode, entry):
if pieCode == ToontownGlobals.PieCodeLawyer:
self.__lawyerGotHit(entry)
if pieCode != ToontownGlobals.PieCodeToon:
return
avatarDoId = entry.getIntoNodePath().getNetTag('avatarDoId')
if avatarDoId == '':
self.notify.warning('Toon %s has no avatarDoId tag.' % repr(entry.getIntoNodePath()))
return
doId = int(avatarDoId)
if doId != localAvatar.doId:
self.d_hitToon(doId)
def __lawyerGotHit(self, entry):
lawyerCol = entry.getIntoNodePath()
names = lawyerCol.getName().split('-')
lawyerDoId = int(names[1])
for lawyer in self.lawyers:
if lawyerDoId == lawyer.doId:
lawyer.sendUpdate('hitByToon', [])
def __finalPieSplat(self, toon, pieCode):
if pieCode != ToontownGlobals.PieCodeDefensePan:
return
self.sendUpdate('finalPieSplat', [])
self.ignore('pieSplat')
def cleanupAttacks(self):
self.notify.debug('----- cleanupAttacks')
self.__cleanupStrafe()
def __cleanupStrafe(self):
self.notify.debug('----- __cleanupStrage')
if self.strafeInterval:
self.strafeInterval.finish()
self.strafeInterval = None
return
def __cleanupJuryBox(self):
self.notify.debug('----- __cleanupJuryBox')
if self.juryBoxIval:
self.juryBoxIval.finish()
self.juryBoxIval = None
if self.juryBox:
self.juryBox.removeNode()
return
def doStrafe(self, side, direction):
gearRoot = self.rotateNode.attachNewNode('gearRoot')
if side == 0:
gearRoot.setPos(0, -7, 3)
gearRoot.setHpr(180, 0, 0)
door = self.doorA
else:
gearRoot.setPos(0, 7, 3)
door = self.doorB
gearRoot.setTag('attackCode', str(ToontownGlobals.BossCogStrafeAttack))
gearModel = self.getGearFrisbee()
gearModel.setScale(0.1)
t = self.getBossDamage() / 100.0
gearTrack = Parallel()
numGears = int(4 + 6 * t + 0.5)
time = 5.0 - 4.0 * t
spread = 60 * math.pi / 180.0
if direction == 1:
spread = -spread
dist = 50
rate = time / numGears
for i in xrange(numGears):
node = gearRoot.attachNewNode(str(i))
node.hide()
node.setPos(0, 0, 0)
gear = gearModel.instanceTo(node)
angle = (float(i) / (numGears - 1) - 0.5) * spread
x = dist * math.sin(angle)
y = dist * math.cos(angle)
h = random.uniform(-720, 720)
gearTrack.append(Sequence(Wait(i * rate), Func(node.show), Parallel(node.posInterval(1, Point3(x, y, 0), fluid=1), node.hprInterval(1, VBase3(h, 0, 0), fluid=1), Sequence(SoundInterval(self.strafeSfx[i], volume=0.2, node=self), duration=0)), Func(node.detachNode)))
seq = Sequence(Func(door.request, 'open'), Wait(0.7), gearTrack, Func(door.request, 'close'))
self.__cleanupStrafe()
self.strafeInterval = seq
seq.start()
def replaceCollisionPolysWithPlanes(self, model):
newCollisionNode = CollisionNode('collisions')
newCollideMask = BitMask32(0)
planes = []
collList = model.findAllMatches('**/+CollisionNode')
if not collList:
collList = [model]
for cnp in collList:
cn = cnp.node()
if not isinstance(cn, CollisionNode):
self.notify.warning('Not a collision node: %s' % repr(cnp))
break
newCollideMask = newCollideMask | cn.getIntoCollideMask()
for i in xrange(cn.getNumSolids()):
solid = cn.getSolid(i)
if isinstance(solid, CollisionPolygon):
plane = Plane(solid.getPlane())
planes.append(plane)
else:
self.notify.warning('Unexpected collision solid: %s' % repr(solid))
newCollisionNode.addSolid(plane)
newCollisionNode.setIntoCollideMask(newCollideMask)
threshold = 0.1
planes.sort(lambda p1, p2: p1.compareTo(p2, threshold))
lastPlane = None
for plane in planes:
if lastPlane == None or plane.compareTo(lastPlane, threshold) != 0:
cp = CollisionPlane(plane)
newCollisionNode.addSolid(cp)
lastPlane = plane
return NodePath(newCollisionNode)
def makeIntroductionMovie(self, delayDeletes):
self.notify.debug('----- makeIntroductionMovie')
for toonId in self.involvedToons:
toon = self.cr.doId2do.get(toonId)
if toon:
delayDeletes.append(DelayDelete.DelayDelete(toon, 'LawbotBoss.makeIntroductionMovie'))
track = Parallel()
bossAnimTrack = Sequence(
ActorInterval(self, 'Ff_speech', startTime=2, duration=10, loop=1),
ActorInterval(self, 'Ff_lookRt', duration=3),
ActorInterval(self, 'Ff_lookRt', duration=3, startTime=3, endTime=0),
ActorInterval(self, 'Ff_neutral', duration=2),
ActorInterval(self, 'Ff_speech', duration=7, loop=1))
track.append(bossAnimTrack)
attackToons = TTLocalizer.BossCogAttackToons
dialogTrack = Track(
(0, Func(self.setChatAbsolute, TTLocalizer.LawbotBossTempIntro0, CFSpeech)),
(5.6, Func(self.setChatAbsolute, TTLocalizer.LawbotBossTempIntro1, CFSpeech)),
(12, Func(self.setChatAbsolute, TTLocalizer.LawbotBossTempIntro2, CFSpeech)),
(18, Func(self.setChatAbsolute, TTLocalizer.LawbotBossTempIntro3, CFSpeech)),
(22, Func(self.setChatAbsolute, TTLocalizer.LawbotBossTempIntro4, CFSpeech)),
(24, Sequence(
Func(self.clearChat),
self.loseCogSuits(self.toonsA + self.toonsB, render, (-2.798, -70, 10, 180, 0, 0)))),
(27, Sequence(
self.toonNormalEyes(self.involvedToons),
Func(self.loop, 'Ff_neutral'),
Func(self.setChatAbsolute, attackToons, CFSpeech))))
track.append(dialogTrack)
return Sequence(
Func(self.stickToonsToFloor),
track,
Func(self.unstickToons), name=self.uniqueName('Introduction'))
def walkToonsToBattlePosition(self, toonIds, battleNode):
self.notify.debug('walkToonsToBattlePosition-----------------------------------------------')
self.notify.debug('toonIds=%s battleNode=%s' % (toonIds, battleNode))
ival = Parallel()
points = BattleBase.BattleBase.toonPoints[len(toonIds) - 1]
self.notify.debug('walkToonsToBattlePosition: points = %s' % points[0][0])
for i in xrange(len(toonIds)):
toon = base.cr.doId2do.get(toonIds[i])
if toon:
pos, h = points[i]
origPos = pos
self.notify.debug('origPos = %s' % origPos)
self.notify.debug('batlleNode.getTransform = %s render.getTransform=%s' % (battleNode.getTransform(), render.getTransform()))
self.notify.debug('render.getScale()=%s battleNode.getScale()=%s' % (render.getScale(), battleNode.getScale()))
myCurPos = self.getPos()
self.notify.debug('myCurPos = %s' % self.getPos())
self.notify.debug('battleNode.parent() = %s' % battleNode.getParent())
self.notify.debug('battleNode.parent().getPos() = %s' % battleNode.getParent().getPos())
bnParent = battleNode.getParent()
battleNode.wrtReparentTo(render)
bnWorldPos = battleNode.getPos()
battleNode.wrtReparentTo(bnParent)
self.notify.debug('battle node world pos = %s' % bnWorldPos)
pos = render.getRelativePoint(battleNode, pos)
self.notify.debug('walktToonsToBattlePosition: render.getRelativePoint result = %s' % pos)
self.notify.debug('walkToonsToBattlePosition: final pos = %s' % pos)
ival.append(Sequence(Func(toon.setPlayRate, 0.8, 'walk'), Func(toon.loop, 'walk'), toon.posInterval(3, pos), Func(toon.setPlayRate, 1, 'walk'), Func(toon.loop, 'neutral')))
return ival
def toonsToBattlePosition(self, toonIds, battleNode):
self.notify.debug('DistrutedLawbotBoss.toonsToBattlePosition----------------------------------------')
self.notify.debug('toonIds=%s battleNode=%s' % (toonIds, battleNode))
if len(toonIds) < 5:
points = BattleBase.BattleBase.toonPoints[len(toonIds) - 1]
else:
points = list(BattleBase.BattleBase.toonPoints[3])
points.extend(BattleBase.BattleBase.toonPoints[len(toonIds) - 5])
self.notify.debug('toonsToBattlePosition: points = %s' % points[0][0])
for i in xrange(len(toonIds)):
toon = base.cr.doId2do.get(toonIds[i])
if toon:
toon.wrtReparentTo(render)
pos, h = points[i]
if i > 3:
pos.setY(pos.getY() + 2.0)
bnParent = battleNode.getParent()
battleNode.wrtReparentTo(render)
bnWorldPos = battleNode.getPos()
battleNode.wrtReparentTo(bnParent)
toon.setPosHpr(battleNode, pos[0], pos[1], pos[2], h, 0, 0)
self.notify.debug('new toon pos %s ' % toon.getPos())
def touchedGavel(self, gavel, entry):
self.notify.debug('touchedGavel')
attackCodeStr = entry.getIntoNodePath().getNetTag('attackCode')
if attackCodeStr == '':
self.notify.warning('Node %s has no attackCode tag.' % repr(entry.getIntoNodePath()))
return
attackCode = int(attackCodeStr)
into = entry.getIntoNodePath()
self.zapLocalToon(attackCode, into)
def touchedGavelHandle(self, gavel, entry):
attackCodeStr = entry.getIntoNodePath().getNetTag('attackCode')
if attackCodeStr == '':
self.notify.warning('Node %s has no attackCode tag.' % repr(entry.getIntoNodePath()))
return
attackCode = int(attackCodeStr)
into = entry.getIntoNodePath()
self.zapLocalToon(attackCode, into)
def createBlock(self, x1, y1, z1, x2, y2, z2, r = 1.0, g = 1.0, b = 1.0, a = 1.0):
gFormat = GeomVertexFormat.getV3n3cpt2()
myVertexData = GeomVertexData('holds my vertices', gFormat, Geom.UHDynamic)
vertexWriter = GeomVertexWriter(myVertexData, 'vertex')
normalWriter = GeomVertexWriter(myVertexData, 'normal')
colorWriter = GeomVertexWriter(myVertexData, 'color')
texWriter = GeomVertexWriter(myVertexData, 'texcoord')
vertexWriter.addData3f(x1, y1, z1)
vertexWriter.addData3f(x2, y1, z1)
vertexWriter.addData3f(x1, y2, z1)
vertexWriter.addData3f(x2, y2, z1)
vertexWriter.addData3f(x1, y1, z2)
vertexWriter.addData3f(x2, y1, z2)
vertexWriter.addData3f(x1, y2, z2)
vertexWriter.addData3f(x2, y2, z2)
for index in xrange(8):
normalWriter.addData3f(1.0, 1.0, 1.0)
colorWriter.addData4f(r, g, b, a)
texWriter.addData2f(1.0, 1.0)
tris = GeomTriangles(Geom.UHDynamic)
tris.addVertex(0)
tris.addVertex(1)
tris.addVertex(2)
tris.closePrimitive()
tris.addVertex(1)
tris.addVertex(3)
tris.addVertex(2)
tris.closePrimitive()
tris.addVertex(2)
tris.addVertex(3)
tris.addVertex(6)
tris.closePrimitive()
tris.addVertex(3)
tris.addVertex(7)
tris.addVertex(6)
tris.closePrimitive()
tris.addVertex(0)
tris.addVertex(2)
tris.addVertex(4)
tris.closePrimitive()
tris.addVertex(2)
tris.addVertex(6)
tris.addVertex(4)
tris.closePrimitive()
tris.addVertex(1)
tris.addVertex(5)
tris.addVertex(3)
tris.closePrimitive()
tris.addVertex(3)
tris.addVertex(5)
tris.addVertex(7)
tris.closePrimitive()
tris.addVertex(0)
tris.addVertex(4)
tris.addVertex(5)
tris.closePrimitive()
tris.addVertex(1)
tris.addVertex(0)
tris.addVertex(5)
tris.closePrimitive()
tris.addVertex(4)
tris.addVertex(6)
tris.addVertex(7)
tris.closePrimitive()
tris.addVertex(7)
tris.addVertex(5)
tris.addVertex(4)
tris.closePrimitive()
cubeGeom = Geom(myVertexData)
cubeGeom.addPrimitive(tris)
cubeGN = GeomNode('cube')
cubeGN.addGeom(cubeGeom)
return cubeGN
def __enterDefenseCol(self, entry):
self.notify.debug('__enterDefenseCol')
def __enterProsecutionCol(self, entry):
self.notify.debug('__enterProsecutionCol')
def makeVictoryMovie(self):
myFromPos = Point3(ToontownGlobals.LawbotBossBattleThreePosHpr[0], ToontownGlobals.LawbotBossBattleThreePosHpr[1], ToontownGlobals.LawbotBossBattleThreePosHpr[2])
myToPos = Point3(myFromPos[0], myFromPos[1] + 30, myFromPos[2])
rollThroughDoor = self.rollBossToPoint(fromPos=myFromPos, fromHpr=None, toPos=myToPos, toHpr=None, reverse=0)
rollTrack = Sequence(
Func(self.getGeomNode().setH, 180),
rollThroughDoor[0],
Func(self.getGeomNode().setH, 0))
rollTrackDuration = rollTrack.getDuration()
self.notify.debug('rollTrackDuration = %f' % rollTrackDuration)
doorStartPos = self.door3.getPos()
doorEndPos = Point3(doorStartPos[0], doorStartPos[1], doorStartPos[2] + 25)
bossTrack = Track(
(0.5, Sequence(
Func(self.clearChat),
Func(camera.reparentTo, render),
Func(camera.setPos, -3, 45, 25),
Func(camera.setHpr, 0, 10, 0))),
(1.0, Func(self.setChatAbsolute, TTLocalizer.LawbotBossDefenseWins1, CFSpeech)),
(5.5, Func(self.setChatAbsolute, TTLocalizer.LawbotBossDefenseWins2, CFSpeech)),
(9.5, Sequence(Func(camera.wrtReparentTo, render))),
(9.6, Parallel(
rollTrack,
Func(self.setChatAbsolute, TTLocalizer.LawbotBossDefenseWins3, CFSpeech),
self.door3.posInterval(2, doorEndPos, startPos=doorStartPos))),
(13.1, Sequence(self.door3.posInterval(1, doorStartPos))))
retTrack = Parallel(bossTrack, ActorInterval(self, 'Ff_speech', loop=1))
return bossTrack
def makeEpilogueMovie(self):
epSpeech = TTLocalizer.WitnessToonCongratulations
epSpeech = self.__talkAboutPromotion(epSpeech)
bossTrack = Sequence(Func(self.witnessToon.animFSM.request, 'neutral'), Func(self.witnessToon.setLocalPageChat, epSpeech, 0))
return bossTrack
def makeDefeatMovie(self):
bossTrack = Track((0.0, Sequence(Func(self.clearChat), Func(self.reverseHead), ActorInterval(self, 'Ff_speech'))), (1.0, Func(self.setChatAbsolute, TTLocalizer.LawbotBossProsecutionWins, CFSpeech)))
return bossTrack
def __makeWitnessToon(self):
dnaNetString = 't\x1b\x00\x01\x01\x00\x03\x00\x03\x01\x10\x13\x00\x13\x13'
npc = Toon.Toon()
npc.setDNAString(dnaNetString)
npc.setName(TTLocalizer.WitnessToonName)
npc.setPickable(0)
npc.setPlayerType(NametagGroup.CCNonPlayer)
npc.animFSM.request('Sit')
self.witnessToon = npc
self.witnessToon.setPosHpr(*ToontownGlobals.LawbotBossWitnessStandPosHpr)
def __cleanupWitnessToon(self):
self.__hideWitnessToon()
if self.witnessToon:
self.witnessToon.removeActive()
self.witnessToon.delete()
self.witnessToon = None
return
def __showWitnessToon(self):
if not self.witnessToonOnstage:
self.witnessToon.addActive()
self.witnessToon.reparentTo(self.geom)
seatCenter = self.realWitnessStand.find('**/witnessStandSeatEdge')
center = seatCenter.getPos()
self.notify.debug('center = %s' % center)
self.witnessToon.setPos(center)
self.witnessToon.setH(180)
self.witnessToon.setZ(self.witnessToon.getZ() - 1.5)
self.witnessToon.setY(self.witnessToon.getY() - 1.15)
self.witnessToonOnstage = 1
def __hideWitnessToon(self):
if self.witnessToonOnstage:
self.witnessToon.removeActive()
self.witnessToon.detachNode()
self.witnessToonOnstage = 0
def __hideToons(self):
for toonId in self.involvedToons:
toon = self.cr.doId2do.get(toonId)
if toon:
toon.hide()
def __showToons(self):
for toonId in self.involvedToons:
toon = self.cr.doId2do.get(toonId)
if toon:
toon.show()
def __arrangeToonsAroundWitnessToon(self):
radius = 7
numToons = len(self.involvedToons)
center = (numToons - 1) / 2.0
for i in xrange(numToons):
toon = self.cr.doId2do.get(self.involvedToons[i])
if toon:
angle = 90 - 15 * (i - center)
radians = angle * math.pi / 180.0
x = math.cos(radians) * radius
y = math.sin(radians) * radius
toon.setPos(self.witnessToon, x, y, 0)
toon.headsUp(self.witnessToon)
toon.loop('neutral')
toon.show()
def __talkAboutPromotion(self, speech):
if self.prevCogSuitLevel < ToontownGlobals.MaxCogSuitLevel:
newCogSuitLevel = localAvatar.getCogLevels()[CogDisguiseGlobals.dept2deptIndex(self.style.dept)]
if newCogSuitLevel == ToontownGlobals.MaxCogSuitLevel:
speech += TTLocalizer.WitnessToonLastPromotion % (ToontownGlobals.MaxCogSuitLevel + 1)
if newCogSuitLevel in ToontownGlobals.CogSuitHPLevels:
speech += TTLocalizer.WitnessToonHPBoost
else:
speech += TTLocalizer.WitnessToonMaxed % (ToontownGlobals.MaxCogSuitLevel + 1)
return speech
def __positionToonsInFrontOfCannons(self):
self.notify.debug('__positionToonsInFrontOfCannons')
index = 0
self.involvedToons.sort()
for toonId in self.involvedToons:
if index in self.cannons:
cannon = self.cannons[index]
toon = self.cr.doId2do.get(toonId)
self.notify.debug('cannonId = %d' % cannon.doId)
cannonPos = cannon.nodePath.getPos(render)
self.notify.debug('cannonPos = %s' % cannonPos)
if toon:
self.notify.debug('toon = %s' % toon.getName())
toon.reparentTo(cannon.nodePath)
toon.setPos(0, 8, 0)
toon.setH(180)
renderPos = toon.getPos(render)
self.notify.debug('renderPos =%s' % renderPos)
index += 1
self.notify.debug('done with positionToons')
def __makePrepareBattleTwoMovie(self):
chatString = TTLocalizer.WitnessToonPrepareBattleTwo % ToontownGlobals.LawbotBossJurorsForBalancedScale
movie = Sequence(Func(camera.reparentTo, self.witnessToon), Func(camera.setPos, 0, 8, 2), Func(camera.setHpr, 180, 10, 0), Func(self.witnessToon.setLocalPageChat, chatString, 0))
return movie
def __doWitnessPrepareBattleThreeChat(self):
self.notify.debug('__doWitnessPrepareBattleThreeChat: original self.numToonJurorsSeated = %d' % self.numToonJurorsSeated)
self.countToonJurors()
self.notify.debug('after calling self.countToonJurors, numToonJurorsSeated=%d' % self.numToonJurorsSeated)
if self.numToonJurorsSeated == 0:
juryResult = TTLocalizer.WitnessToonNoJuror
elif self.numToonJurorsSeated == 1:
juryResult = TTLocalizer.WitnessToonOneJuror
elif self.numToonJurorsSeated == 12:
juryResult = TTLocalizer.WitnessToonAllJurors
else:
juryResult = TTLocalizer.WitnessToonSomeJurors % self.numToonJurorsSeated
juryResult += '\x07'
trialSpeech = juryResult
trialSpeech += TTLocalizer.WitnessToonPrepareBattleThree
diffSettings = ToontownGlobals.LawbotBossDifficultySettings[self.battleDifficulty]
if diffSettings[4]:
newWeight, self.bonusWeight, self.numJurorsLocalToonSeated = self.calculateWeightOfToon(base.localAvatar.doId)
if self.bonusWeight > 0:
if self.bonusWeight == 1:
juryWeightBonus = TTLocalizer.WitnessToonJuryWeightBonusSingular.get(self.battleDifficulty)
else:
juryWeightBonus = TTLocalizer.WitnessToonJuryWeightBonusPlural.get(self.battleDifficulty)
if juryWeightBonus:
weightBonusText = juryWeightBonus % (self.numJurorsLocalToonSeated, self.bonusWeight)
trialSpeech += '\x07'
trialSpeech += weightBonusText
self.witnessToon.setLocalPageChat(trialSpeech, 0)
def __makePrepareBattleThreeMovie(self):
movie = Sequence(Func(camera.reparentTo, render), Func(camera.setPos, -15, 15, 20), Func(camera.setHpr, -90, 0, 0), Wait(3), Func(camera.reparentTo, self.witnessToon), Func(camera.setPos, 0, 8, 2), Func(camera.setHpr, 180, 10, 0), Func(self.__doWitnessPrepareBattleThreeChat))
return movie
def countToonJurors(self):
self.numToonJurorsSeated = 0
for key in self.chairs.keys():
chair = self.chairs[key]
if chair.state == 'ToonJuror' or chair.state == None and chair.newState == 'ToonJuror':
self.numToonJurorsSeated += 1
self.notify.debug('self.numToonJurorsSeated = %d' % self.numToonJurorsSeated)
return
def cleanupPanFlash(self):
if self.panFlashInterval:
self.panFlashInterval.finish()
self.panFlashInterval = None
return
def flashPanBlue(self):
self.cleanupPanFlash()
intervalName = 'FlashPanBlue'
self.defensePanNodePath.setColorScale(1, 1, 1, 1)
seq = Sequence(self.defensePanNodePath.colorScaleInterval(0.1, colorScale=VBase4(0, 0, 1, 1)), self.defensePanNodePath.colorScaleInterval(0.3, colorScale=VBase4(1, 1, 1, 1)), name=intervalName)
self.panFlashInterval = seq
seq.start()
self.storeInterval(seq, intervalName)
def saySomething(self, chatString):
intervalName = 'ChiefJusticeTaunt'
seq = Sequence(name=intervalName)
seq.append(Func(self.setChatAbsolute, chatString, CFSpeech))
seq.append(Wait(4.0))
seq.append(Func(self.clearChat))
oldSeq = self.activeIntervals.get(intervalName)
if oldSeq:
oldSeq.finish()
seq.start()
self.storeInterval(seq, intervalName)
def setTaunt(self, tauntIndex, extraInfo):
gotError = False
if not hasattr(self, 'state'):
self.notify.warning('returning from setTaunt, no attr state')
gotError = True
elif not self.state == 'BattleThree':
self.notify.warning('returning from setTaunt, not in battle three state, state=%s', self.state)
gotError = True
if not hasattr(self, 'nametag'):
self.notify.warning('returning from setTaunt, no attr nametag')
gotError = True
if gotError:
st = StackTrace()
print st
return
chatString = TTLocalizer.LawbotBossTaunts[1]
if tauntIndex == 0:
if extraInfo < len(self.involvedToons):
toonId = self.involvedToons[extraInfo]
toon = base.cr.doId2do.get(toonId)
if toon:
chatString = TTLocalizer.LawbotBossTaunts[tauntIndex] % toon.getName()
else:
chatString = TTLocalizer.LawbotBossTaunts[tauntIndex]
self.saySomething(chatString)
def toonGotHealed(self, toonId):
toon = base.cr.doId2do.get(toonId)
if toon:
base.playSfx(self.toonUpSfx, node=toon)
def hideBonusTimer(self):
if self.bonusTimer:
self.bonusTimer.hide()
def enteredBonusState(self):
self.witnessToon.clearChat()
text = TTLocalizer.WitnessToonBonus % (ToontownGlobals.LawbotBossBonusWeightMultiplier, ToontownGlobals.LawbotBossBonusDuration)
self.witnessToon.setChatAbsolute(text, CFSpeech | CFTimeout)
base.playSfx(self.toonUpSfx)
if not self.bonusTimer:
self.bonusTimer = ToontownTimer.ToontownTimer()
self.bonusTimer.posInTopRightCorner()
self.bonusTimer.show()
self.bonusTimer.countdown(ToontownGlobals.LawbotBossBonusDuration, self.hideBonusTimer)
def setAttackCode(self, attackCode, avId = 0):
DistributedBossCog.DistributedBossCog.setAttackCode(self, attackCode, avId)
if attackCode == ToontownGlobals.BossCogAreaAttack:
self.saySomething(TTLocalizer.LawbotBossAreaAttackTaunt)
base.playSfx(self.warningSfx)
def setBattleDifficulty(self, diff):
self.notify.debug('battleDifficulty = %d' % diff)
self.battleDifficulty = diff
def toonEnteredCannon(self, toonId, cannonIndex):
if base.localAvatar.doId == toonId:
self.cannonIndex = cannonIndex
def numJurorsSeatedByCannon(self, cannonIndex):
retVal = 0
for chair in self.chairs.values():
if chair.state == 'ToonJuror':
if chair.toonJurorIndex == cannonIndex:
retVal += 1
return retVal
def calculateWeightOfToon(self, toonId):
defaultWeight = 1
bonusWeight = 0
newWeight = 1
cannonIndex = self.cannonIndex
numJurors = 0
if not cannonIndex == None and cannonIndex >= 0:
diffSettings = ToontownGlobals.LawbotBossDifficultySettings[self.battleDifficulty]
if diffSettings[4]:
numJurors = self.numJurorsSeatedByCannon(cannonIndex)
bonusWeight = numJurors - diffSettings[5]
if bonusWeight < 0:
bonusWeight = 0
newWeight = defaultWeight + bonusWeight
self.notify.debug('toon %d has weight of %d' % (toonId, newWeight))
return (newWeight, bonusWeight, numJurors)
| [] |
lightmatter-ai/tensorflow-onnx | tests/test_custom_rnncell.py | a08aa32e211b859e8a437c5d8a822ea55c46e7c6 | # SPDX-License-Identifier: Apache-2.0
"""Unit Tests for custom rnns."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tensorflow.python.ops import init_ops
from backend_test_base import Tf2OnnxBackendTestBase
from common import * # pylint: disable=wildcard-import, unused-wildcard-import
from tf2onnx.tf_loader import is_tf2
# pylint: disable=missing-docstring,invalid-name,unused-argument,using-constant-test
# pylint: disable=abstract-method,arguments-differ
if is_tf2():
BasicLSTMCell = tf.compat.v1.nn.rnn_cell.BasicLSTMCell
LSTMCell = tf.compat.v1.nn.rnn_cell.LSTMCell
GRUCell = tf.compat.v1.nn.rnn_cell.GRUCell
RNNCell = tf.compat.v1.nn.rnn_cell.RNNCell
MultiRNNCell = tf.compat.v1.nn.rnn_cell.MultiRNNCell
dynamic_rnn = tf.compat.v1.nn.dynamic_rnn
bidirectional_dynamic_rnn = tf.compat.v1.nn.bidirectional_dynamic_rnn
else:
LSTMBlockCell = tf.contrib.rnn.LSTMBlockCell
LSTMCell = tf.nn.rnn_cell.LSTMCell
GRUCell = tf.nn.rnn_cell.LSTMCell
RNNCell = tf.nn.rnn_cell.RNNCell
MultiRNNCell = tf.contrib.rnn.MultiRNNCell
dynamic_rnn = tf.nn.dynamic_rnn
bidirectional_dynamic_rnn = tf.nn.bidirectional_dynamic_rnn
class CustomRnnCellTests(Tf2OnnxBackendTestBase):
@check_opset_min_version(8, "Scan")
@skip_tf2()
def test_single_dynamic_custom_rnn(self):
size = 5 # size of each model layer.
batch_size = 1
cell = GatedGRUCell(size)
x_val = np.array([[1., 1.], [2., 2.], [3., 3.]], dtype=np.float32)
x_val = np.stack([x_val] * batch_size)
def func(x):
xs, s = dynamic_rnn(cell=cell, dtype=tf.float32, inputs=x, time_major=False)
return tf.identity(xs, name="output"), tf.identity(s, name="final_state")
feed_dict = {"input_1:0": x_val}
input_names_with_port = ["input_1:0"]
output_names_with_port = ["output:0", "final_state:0"]
self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port, 0.1)
@check_opset_min_version(8, "Scan")
@skip_tf2()
def test_single_dynamic_custom_rnn_time_major(self):
size = 5 # size of each model layer.
batch_size = 1
x_val = np.array([[1., 1.], [2., 2.], [3., 3.]], dtype=np.float32)
x_val = np.stack([x_val] * batch_size)
def func(x):
cell = GatedGRUCell(size)
xs, s = dynamic_rnn(cell=cell, dtype=tf.float32, inputs=x, time_major=True)
return tf.identity(xs, name="output"), tf.identity(s, name="final_state")
feed_dict = {"input_1:0": x_val}
input_names_with_port = ["input_1:0"]
output_names_with_port = ["output:0", "final_state:0"]
self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port, 0.1)
@check_opset_min_version(8, "Scan")
@skip_tf2()
def test_single_dynamic_custom_rnn_with_seq_length(self):
units = 5
batch_size = 6
x_val = np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.], [5., 5.]], dtype=np.float32)
x_val = np.stack([x_val] * batch_size)
def func(x):
# no scope
cell = GatedGRUCell(units)
outputs, cell_state = dynamic_rnn(
cell,
x,
dtype=tf.float32,
sequence_length=[4, 3, 4, 5, 2, 1])
return tf.identity(outputs, name="output"), tf.identity(cell_state, name="cell_state")
feed_dict = {"input_1:0": x_val}
input_names_with_port = ["input_1:0"]
output_names_with_port = ["output:0", "cell_state:0"]
self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port, rtol=1e-06)
@check_opset_min_version(8, "Scan")
@skip_tf2()
def test_single_dynamic_custom_rnn_with_non_const_seq_length(self):
units = 5
batch_size = 6
x_val = np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.], [5., 5.]], dtype=np.float32)
x_val = np.stack([x_val] * batch_size)
y_val = np.array([4, 3, 4, 5, 2, 1], dtype=np.int32)
def func(x, seq_length):
# no scope
cell = GatedGRUCell(units)
outputs, cell_state = dynamic_rnn(
cell,
x,
dtype=tf.float32,
sequence_length=tf.identity(seq_length))
return tf.identity(outputs, name="output"), tf.identity(cell_state, name="cell_state")
feed_dict = {"input_1:0": x_val, "input_2:0": y_val}
input_names_with_port = ["input_1:0", "input_2:0"]
output_names_with_port = ["output:0", "cell_state:0"]
self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port, rtol=1e-06)
@check_opset_min_version(8, "Scan")
@check_tf_min_version("1.8")
@skip_tf2()
def test_attention_wrapper_const_encoder(self):
size = 5
time_step = 3
input_size = 4
attn_size = size
batch_size = 9
# shape [batch size, time step, size]
# attention_state: usually the output of an RNN encoder.
# This tensor should be shaped `[batch_size, max_time, ...]`.
decoder_time_step = 6
x_val = np.random.randn(decoder_time_step, input_size).astype('f')
x_val = np.stack([x_val] * batch_size)
attention_states = np.random.randn(batch_size, time_step, attn_size).astype('f')
def func(x):
attention_mechanism = tf.contrib.seq2seq.BahdanauAttention(attn_size, attention_states)
match_input_fn = lambda curr_input, state: tf.concat([curr_input, state], axis=-1)
cell = LSTMCell(size)
match_cell_fw = tf.contrib.seq2seq.AttentionWrapper(cell,
attention_mechanism,
attention_layer_size=attn_size,
cell_input_fn=match_input_fn,
output_attention=False)
output, attr_state = dynamic_rnn(match_cell_fw, x, dtype=tf.float32)
return tf.identity(output, name="output"), tf.identity(attr_state.cell_state, name="final_state")
feed_dict = {"input_1:0": x_val}
input_names_with_port = ["input_1:0"]
output_names_with_port = ["output:0"]
output_names_with_port = ["output:0", "final_state:0"]
self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port, 0.1)
@check_opset_min_version(8, "Scan")
@check_tf_min_version("1.8")
@skip_tf2()
def test_attention_wrapper_lstm_encoder(self):
size = 5
time_step = 3
input_size = 4
attn_size = size
batch_size = 9
# shape [batch size, time step, size]
# attention_state: usually the output of an RNN encoder.
# This tensor should be shaped `[batch_size, max_time, ...]`
encoder_time_step = time_step
encoder_x_val = np.random.randn(encoder_time_step, input_size).astype('f')
encoder_x_val = np.stack([encoder_x_val] * batch_size)
decoder_time_step = 6
decoder_x_val = np.random.randn(decoder_time_step, input_size).astype('f')
decoder_x_val = np.stack([decoder_x_val] * batch_size)
def func(encoder_x, decoder_x):
encoder_cell = LSTMCell(size)
output, attr_state = dynamic_rnn(encoder_cell, encoder_x, dtype=tf.float32)
output_0 = tf.identity(output, name="output_0")
attention_states = output
attention_mechanism = tf.contrib.seq2seq.BahdanauAttention(attn_size,
attention_states)
match_input_fn = lambda curr_input, state: tf.concat([curr_input, state], axis=-1)
cell = LSTMCell(size)
match_cell_fw = tf.contrib.seq2seq.AttentionWrapper(cell,
attention_mechanism,
attention_layer_size=attn_size,
cell_input_fn=match_input_fn,
output_attention=False)
output, attr_state = dynamic_rnn(match_cell_fw, decoder_x, dtype=tf.float32)
return output_0, tf.identity(output, name="output"), tf.identity(attr_state.cell_state, name="final_state")
feed_dict = {"input_1:0": encoder_x_val, "input_2:0": decoder_x_val}
input_names_with_port = ["input_1:0", "input_2:0"]
output_names_with_port = ["output_0:0", "output:0", "final_state:0"]
self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port, 0.1)
@check_opset_min_version(8, "Scan")
@check_tf_min_version("1.8")
@skip_tf2()
def test_attention_wrapper_gru_encoder(self):
size = 5
time_step = 3
input_size = 4
attn_size = size
batch_size = 9
# shape [batch size, time step, size]
# attention_state: usually the output of an RNN encoder.
# This tensor should be shaped `[batch_size, max_time, ...]`
encoder_time_step = time_step
encoder_x_val = np.random.randn(encoder_time_step, input_size).astype('f')
encoder_x_val = np.stack([encoder_x_val] * batch_size)
decoder_time_step = 6
decoder_x_val = np.random.randn(decoder_time_step, input_size).astype('f')
decoder_x_val = np.stack([decoder_x_val] * batch_size)
def func(encoder_x, decoder_x):
encoder_cell = GRUCell(size)
output, attr_state = dynamic_rnn(encoder_cell, encoder_x, dtype=tf.float32)
_ = tf.identity(output, name="output_0")
attention_states = output
attention_mechanism = tf.contrib.seq2seq.BahdanauAttention(attn_size,
attention_states)
match_input_fn = lambda curr_input, state: tf.concat([curr_input, state], axis=-1)
cell = GRUCell(size)
match_cell_fw = tf.contrib.seq2seq.AttentionWrapper(cell,
attention_mechanism,
attention_layer_size=attn_size,
cell_input_fn=match_input_fn,
output_attention=False)
output, attr_state = dynamic_rnn(match_cell_fw, decoder_x, dtype=tf.float32)
return tf.identity(output, name="output"), tf.identity(attr_state.cell_state, name="final_state")
feed_dict = {"input_1:0": encoder_x_val, "input_2:0": decoder_x_val}
input_names_with_port = ["input_1:0", "input_2:0"]
output_names_with_port = ["output_0:0", "output:0", "final_state:0"]
self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port, 0.1)
@check_opset_min_version(8, "Scan")
@check_tf_min_version("1.8")
@skip_tf2()
def test_attention_wrapper_lstm_encoder_input_has_none_dim(self):
size = 5
time_step = 3
input_size = 4
attn_size = size
batch_size = 9
# shape [batch size, time step, size]
# attention_state: usually the output of an RNN encoder.
# This tensor should be shaped `[batch_size, max_time, ...]`
encoder_time_step = time_step
encoder_x_val = np.random.randn(encoder_time_step, input_size).astype('f')
encoder_x_val = np.stack([encoder_x_val] * batch_size)
decoder_time_step = 6
decoder_x_val = np.random.randn(decoder_time_step, input_size).astype('f')
decoder_x_val = np.stack([decoder_x_val] * batch_size)
def func(encoder_x, decoder_x):
encoder_cell = LSTMCell(size)
output, attr_state = dynamic_rnn(encoder_cell, encoder_x, dtype=tf.float32)
_ = tf.identity(output, name="output_0")
attention_states = output
attention_mechanism = tf.contrib.seq2seq.BahdanauAttention(attn_size,
attention_states)
match_input_fn = lambda curr_input, state: tf.concat([curr_input, state], axis=-1)
cell = LSTMCell(size)
match_cell_fw = tf.contrib.seq2seq.AttentionWrapper(cell,
attention_mechanism,
attention_layer_size=attn_size,
cell_input_fn=match_input_fn,
output_attention=False)
output, attr_state = dynamic_rnn(match_cell_fw, decoder_x, dtype=tf.float32)
return tf.identity(output, name="output"), tf.identity(attr_state.cell_state, name="final_state")
feed_dict = {"input_1:0": encoder_x_val, "input_2:0": decoder_x_val}
input_names_with_port = ["input_1:0", "input_2:0"]
output_names_with_port = ["output_0:0", "output:0", "final_state:0"]
self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port, 0.1)
@check_opset_min_version(8, "Scan")
@skip_tf2()
def test_multi_rnn_lstm(self, state_is_tuple=True):
units = 5
batch_size = 6
x_val = np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.]], dtype=np.float32)
x_val = np.stack([x_val] * batch_size)
def func(x):
initializer = init_ops.constant_initializer(0.5)
cell_0 = LSTMCell(units,
initializer=initializer,
state_is_tuple=state_is_tuple)
cell_1 = LSTMCell(units,
initializer=initializer,
state_is_tuple=state_is_tuple)
cell_2 = LSTMCell(units,
initializer=initializer,
state_is_tuple=state_is_tuple)
cells = MultiRNNCell([cell_0, cell_1, cell_2], state_is_tuple=state_is_tuple)
outputs, cell_state = dynamic_rnn(cells, x, dtype=tf.float32)
return tf.identity(outputs, name="output"), tf.identity(cell_state, name="cell_state")
input_names_with_port = ["input_1:0"]
feed_dict = {"input_1:0": x_val}
output_names_with_port = ["output:0", "cell_state:0"]
self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port, rtol=1e-06)
@check_opset_min_version(8, "Scan")
@check_tf_min_version("1.8")
@skip_opset(9, "ReverseSequence")
@skip_tf2()
@allow_missing_shapes("Missing RNN shape")
def test_bidrectional_attention_wrapper_lstm_encoder(self):
size = 30
time_step = 3
input_size = 4
attn_size = size
batch_size = 9
# shape [batch size, time step, size]
# attention_state: usually the output of an RNN encoder.
# This tensor should be shaped `[batch_size, max_time, ...]`
encoder_time_step = time_step
encoder_x_val = np.random.randn(encoder_time_step, input_size).astype('f')
encoder_x_val = np.stack([encoder_x_val] * batch_size)
decoder_time_step = 6
decoder_x_val = np.random.randn(decoder_time_step, batch_size, input_size).astype('f')
def func(encoder_x, decoder_x, seq_length):
encoder_cell = LSTMCell(size)
attention_states, _ = dynamic_rnn(encoder_cell, encoder_x, dtype=tf.float32)
# [9, 3, 30], [9, 30]
attention_mechanism = tf.contrib.seq2seq.BahdanauAttention(attn_size,
attention_states)
match_input_fn = lambda curr_input, state: tf.concat([curr_input, state], axis=-1)
cell = LSTMCell(size)
match_cell_fw = tf.contrib.seq2seq.AttentionWrapper(cell,
attention_mechanism,
attention_layer_size=attn_size,
cell_input_fn=match_input_fn,
output_attention=False)
match_cell_bk = tf.contrib.seq2seq.AttentionWrapper(cell,
attention_mechanism,
attention_layer_size=attn_size,
cell_input_fn=match_input_fn,
output_attention=False)
(match_output_fw, match_output_bk), (match_state_fw, match_state_bk) = \
bidirectional_dynamic_rnn(cell_fw=match_cell_fw,
cell_bw=match_cell_bk,
inputs=decoder_x,
sequence_length=tf.identity(seq_length),
dtype=tf.float32,
time_major=True)
matched_output = tf.concat([match_output_fw, match_output_bk], axis=-1)
matched_state = tf.concat([match_state_fw.cell_state, match_state_bk.cell_state], -1)
return tf.identity(matched_output, name="output_0"), tf.identity(matched_state, name="final_state")
feed_dict = {"input_1:0": encoder_x_val, "input_2:0": decoder_x_val,
"input_3:0": np.array([6, 5, 4, 3, 2, 1, 2, 3, 6], dtype=np.int32)}
input_names_with_port = ["input_1:0", "input_2:0", "input_3:0"]
output_names_with_port = ["output_0:0", "final_state:0"]
self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port, 0.1)
class GatedGRUCell(RNNCell):
def __init__(self, hidden_dim, reuse=None):
super().__init__(self, _reuse=reuse)
self._num_units = hidden_dim
self._activation = tf.tanh
@property
def state_size(self):
return self._num_units
@property
def output_size(self):
return self._num_units
def call(self, inputs, state):
# inputs shape: [batch size, time step, input size] = [1, 3, 2]
# num_units: 5
# W shape: [2, 3 * 5] = [2, 15]
# U shape: [5, 3 * 5] = [5, 15]
# b shape: [1, 3 * 5] = [1, 15]
# state shape: [batch size, state size] = [1, 5]
input_dim = inputs.get_shape()[-1]
assert input_dim is not None, "input dimension must be defined"
# W = tf.get_variable(name="W", shape=[input_dim, 3 * self._num_units], dtype=tf.float32)
W = np.arange(30.0, dtype=np.float32).reshape((2, 15))
# U = tf.get_variable(name='U', shape=[self._num_units, 3 * self._num_units], dtype=tf.float32)
U = np.arange(75.0, dtype=np.float32).reshape((5, 15))
# b = tf.get_variable(name='b', shape=[1, 3 * self._num_units], dtype=tf.float32)
b = np.arange(15.0, dtype=np.float32).reshape((1, 15))
xw = tf.split(tf.matmul(inputs, W) + b, 3, 1)
hu = tf.split(tf.matmul(state, U), 3, 1)
r = tf.sigmoid(xw[0] + hu[0])
z = tf.sigmoid(xw[1] + hu[1])
h1 = self._activation(xw[2] + r * hu[2])
next_h = h1 * (1 - z) + state * z
return next_h, next_h
if __name__ == '__main__':
unittest_main()
| [((22, 3, 22, 11), 'tf2onnx.tf_loader.is_tf2', 'is_tf2', ({}, {}), '()', False, 'from tf2onnx.tf_loader import is_tf2\n'), ((48, 16, 48, 74), 'numpy.array', 'np.array', (), '', True, 'import numpy as np\n'), ((49, 16, 49, 46), 'numpy.stack', 'np.stack', ({(49, 25, 49, 45): '[x_val] * batch_size'}, {}), '([x_val] * batch_size)', True, 'import numpy as np\n'), ((64, 16, 64, 74), 'numpy.array', 'np.array', (), '', True, 'import numpy as np\n'), ((65, 16, 65, 46), 'numpy.stack', 'np.stack', ({(65, 25, 65, 45): '[x_val] * batch_size'}, {}), '([x_val] * batch_size)', True, 'import numpy as np\n'), ((81, 16, 81, 94), 'numpy.array', 'np.array', (), '', True, 'import numpy as np\n'), ((82, 16, 82, 46), 'numpy.stack', 'np.stack', ({(82, 25, 82, 45): '[x_val] * batch_size'}, {}), '([x_val] * batch_size)', True, 'import numpy as np\n'), ((103, 16, 103, 94), 'numpy.array', 'np.array', (), '', True, 'import numpy as np\n'), ((104, 16, 104, 46), 'numpy.stack', 'np.stack', ({(104, 25, 104, 45): '[x_val] * batch_size'}, {}), '([x_val] * batch_size)', True, 'import numpy as np\n'), ((105, 16, 105, 60), 'numpy.array', 'np.array', (), '', True, 'import numpy as np\n'), ((135, 16, 135, 46), 'numpy.stack', 'np.stack', ({(135, 25, 135, 45): '[x_val] * batch_size'}, {}), '([x_val] * batch_size)', True, 'import numpy as np\n'), ((173, 24, 173, 62), 'numpy.stack', 'np.stack', ({(173, 33, 173, 61): '[encoder_x_val] * batch_size'}, {}), '([encoder_x_val] * batch_size)', True, 'import numpy as np\n'), ((176, 24, 176, 62), 'numpy.stack', 'np.stack', ({(176, 33, 176, 61): '[decoder_x_val] * batch_size'}, {}), '([decoder_x_val] * batch_size)', True, 'import numpy as np\n'), ((218, 24, 218, 62), 'numpy.stack', 'np.stack', ({(218, 33, 218, 61): '[encoder_x_val] * batch_size'}, {}), '([encoder_x_val] * batch_size)', True, 'import numpy as np\n'), ((221, 24, 221, 62), 'numpy.stack', 'np.stack', ({(221, 33, 221, 61): '[decoder_x_val] * batch_size'}, {}), '([decoder_x_val] * batch_size)', True, 'import numpy as np\n'), ((261, 24, 261, 62), 'numpy.stack', 'np.stack', ({(261, 33, 261, 61): '[encoder_x_val] * batch_size'}, {}), '([encoder_x_val] * batch_size)', True, 'import numpy as np\n'), ((264, 24, 264, 62), 'numpy.stack', 'np.stack', ({(264, 33, 264, 61): '[decoder_x_val] * batch_size'}, {}), '([decoder_x_val] * batch_size)', True, 'import numpy as np\n'), ((297, 16, 297, 84), 'numpy.array', 'np.array', (), '', True, 'import numpy as np\n'), ((298, 16, 298, 46), 'numpy.stack', 'np.stack', ({(298, 25, 298, 45): '[x_val] * batch_size'}, {}), '([x_val] * batch_size)', True, 'import numpy as np\n'), ((341, 24, 341, 62), 'numpy.stack', 'np.stack', ({(341, 33, 341, 61): '[encoder_x_val] * batch_size'}, {}), '([encoder_x_val] * batch_size)', True, 'import numpy as np\n'), ((416, 12, 416, 37), 'tensorflow.sigmoid', 'tf.sigmoid', ({(416, 23, 416, 36): 'xw[0] + hu[0]'}, {}), '(xw[0] + hu[0])', True, 'import tensorflow as tf\n'), ((417, 12, 417, 37), 'tensorflow.sigmoid', 'tf.sigmoid', ({(417, 23, 417, 36): 'xw[1] + hu[1]'}, {}), '(xw[1] + hu[1])', True, 'import tensorflow as tf\n'), ((139, 34, 139, 99), 'tensorflow.contrib.seq2seq.BahdanauAttention', 'tf.contrib.seq2seq.BahdanauAttention', ({(139, 71, 139, 80): 'attn_size', (139, 82, 139, 98): 'attention_states'}, {}), '(attn_size, attention_states)', True, 'import tensorflow as tf\n'), ((143, 28, 147, 87), 'tensorflow.contrib.seq2seq.AttentionWrapper', 'tf.contrib.seq2seq.AttentionWrapper', (), '', True, 'import tensorflow as tf\n'), ((181, 23, 181, 59), 'tensorflow.identity', 'tf.identity', (), '', True, 'import tensorflow as tf\n'), ((183, 34, 184, 88), 'tensorflow.contrib.seq2seq.BahdanauAttention', 'tf.contrib.seq2seq.BahdanauAttention', ({(183, 71, 183, 80): 'attn_size', (184, 71, 184, 87): 'attention_states'}, {}), '(attn_size, attention_states)', True, 'import tensorflow as tf\n'), ((188, 28, 192, 87), 'tensorflow.contrib.seq2seq.AttentionWrapper', 'tf.contrib.seq2seq.AttentionWrapper', (), '', True, 'import tensorflow as tf\n'), ((226, 16, 226, 52), 'tensorflow.identity', 'tf.identity', (), '', True, 'import tensorflow as tf\n'), ((228, 34, 229, 88), 'tensorflow.contrib.seq2seq.BahdanauAttention', 'tf.contrib.seq2seq.BahdanauAttention', ({(228, 71, 228, 80): 'attn_size', (229, 71, 229, 87): 'attention_states'}, {}), '(attn_size, attention_states)', True, 'import tensorflow as tf\n'), ((233, 28, 237, 87), 'tensorflow.contrib.seq2seq.AttentionWrapper', 'tf.contrib.seq2seq.AttentionWrapper', (), '', True, 'import tensorflow as tf\n'), ((269, 16, 269, 52), 'tensorflow.identity', 'tf.identity', (), '', True, 'import tensorflow as tf\n'), ((271, 34, 272, 88), 'tensorflow.contrib.seq2seq.BahdanauAttention', 'tf.contrib.seq2seq.BahdanauAttention', ({(271, 71, 271, 80): 'attn_size', (272, 71, 272, 87): 'attention_states'}, {}), '(attn_size, attention_states)', True, 'import tensorflow as tf\n'), ((276, 28, 280, 87), 'tensorflow.contrib.seq2seq.AttentionWrapper', 'tf.contrib.seq2seq.AttentionWrapper', (), '', True, 'import tensorflow as tf\n'), ((300, 26, 300, 60), 'tensorflow.python.ops.init_ops.constant_initializer', 'init_ops.constant_initializer', ({(300, 56, 300, 59): '0.5'}, {}), '(0.5)', False, 'from tensorflow.python.ops import init_ops\n'), ((349, 34, 350, 88), 'tensorflow.contrib.seq2seq.BahdanauAttention', 'tf.contrib.seq2seq.BahdanauAttention', ({(349, 71, 349, 80): 'attn_size', (350, 71, 350, 87): 'attention_states'}, {}), '(attn_size, attention_states)', True, 'import tensorflow as tf\n'), ((354, 28, 358, 87), 'tensorflow.contrib.seq2seq.AttentionWrapper', 'tf.contrib.seq2seq.AttentionWrapper', (), '', True, 'import tensorflow as tf\n'), ((359, 28, 363, 87), 'tensorflow.contrib.seq2seq.AttentionWrapper', 'tf.contrib.seq2seq.AttentionWrapper', (), '', True, 'import tensorflow as tf\n'), ((372, 29, 372, 83), 'tensorflow.concat', 'tf.concat', (), '', True, 'import tensorflow as tf\n'), ((373, 28, 373, 97), 'tensorflow.concat', 'tf.concat', ({(373, 38, 373, 92): '[match_state_fw.cell_state, match_state_bk.cell_state]', (373, 94, 373, 96): '-1'}, {}), '([match_state_fw.cell_state, match_state_bk.cell_state], -1)', True, 'import tensorflow as tf\n'), ((377, 34, 377, 87), 'numpy.array', 'np.array', (), '', True, 'import numpy as np\n'), ((415, 22, 415, 41), 'tensorflow.matmul', 'tf.matmul', ({(415, 32, 415, 37): 'state', (415, 39, 415, 40): 'U'}, {}), '(state, U)', True, 'import tensorflow as tf\n'), ((52, 19, 52, 49), 'tensorflow.identity', 'tf.identity', (), '', True, 'import tensorflow as tf\n'), ((52, 51, 52, 85), 'tensorflow.identity', 'tf.identity', (), '', True, 'import tensorflow as tf\n'), ((69, 19, 69, 49), 'tensorflow.identity', 'tf.identity', (), '', True, 'import tensorflow as tf\n'), ((69, 51, 69, 85), 'tensorflow.identity', 'tf.identity', (), '', True, 'import tensorflow as tf\n'), ((91, 19, 91, 54), 'tensorflow.identity', 'tf.identity', (), '', True, 'import tensorflow as tf\n'), ((91, 56, 91, 98), 'tensorflow.identity', 'tf.identity', (), '', True, 'import tensorflow as tf\n'), ((114, 19, 114, 54), 'tensorflow.identity', 'tf.identity', (), '', True, 'import tensorflow as tf\n'), ((114, 56, 114, 98), 'tensorflow.identity', 'tf.identity', (), '', True, 'import tensorflow as tf\n'), ((134, 16, 134, 62), 'numpy.random.randn', 'np.random.randn', ({(134, 32, 134, 49): 'decoder_time_step', (134, 51, 134, 61): 'input_size'}, {}), '(decoder_time_step, input_size)', True, 'import numpy as np\n'), ((137, 27, 137, 76), 'numpy.random.randn', 'np.random.randn', ({(137, 43, 137, 53): 'batch_size', (137, 55, 137, 64): 'time_step', (137, 66, 137, 75): 'attn_size'}, {}), '(batch_size, time_step, attn_size)', True, 'import numpy as np\n'), ((141, 55, 141, 94), 'tensorflow.concat', 'tf.concat', (), '', True, 'import tensorflow as tf\n'), ((150, 19, 150, 53), 'tensorflow.identity', 'tf.identity', (), '', True, 'import tensorflow as tf\n'), ((150, 55, 150, 109), 'tensorflow.identity', 'tf.identity', (), '', True, 'import tensorflow as tf\n'), ((172, 24, 172, 70), 'numpy.random.randn', 'np.random.randn', ({(172, 40, 172, 57): 'encoder_time_step', (172, 59, 172, 69): 'input_size'}, {}), '(encoder_time_step, input_size)', True, 'import numpy as np\n'), ((175, 24, 175, 70), 'numpy.random.randn', 'np.random.randn', ({(175, 40, 175, 57): 'decoder_time_step', (175, 59, 175, 69): 'input_size'}, {}), '(decoder_time_step, input_size)', True, 'import numpy as np\n'), ((186, 55, 186, 94), 'tensorflow.concat', 'tf.concat', (), '', True, 'import tensorflow as tf\n'), ((196, 29, 196, 63), 'tensorflow.identity', 'tf.identity', (), '', True, 'import tensorflow as tf\n'), ((196, 65, 196, 119), 'tensorflow.identity', 'tf.identity', (), '', True, 'import tensorflow as tf\n'), ((217, 24, 217, 70), 'numpy.random.randn', 'np.random.randn', ({(217, 40, 217, 57): 'encoder_time_step', (217, 59, 217, 69): 'input_size'}, {}), '(encoder_time_step, input_size)', True, 'import numpy as np\n'), ((220, 24, 220, 70), 'numpy.random.randn', 'np.random.randn', ({(220, 40, 220, 57): 'decoder_time_step', (220, 59, 220, 69): 'input_size'}, {}), '(decoder_time_step, input_size)', True, 'import numpy as np\n'), ((231, 55, 231, 94), 'tensorflow.concat', 'tf.concat', (), '', True, 'import tensorflow as tf\n'), ((239, 19, 239, 53), 'tensorflow.identity', 'tf.identity', (), '', True, 'import tensorflow as tf\n'), ((239, 55, 239, 109), 'tensorflow.identity', 'tf.identity', (), '', True, 'import tensorflow as tf\n'), ((260, 24, 260, 70), 'numpy.random.randn', 'np.random.randn', ({(260, 40, 260, 57): 'encoder_time_step', (260, 59, 260, 69): 'input_size'}, {}), '(encoder_time_step, input_size)', True, 'import numpy as np\n'), ((263, 24, 263, 70), 'numpy.random.randn', 'np.random.randn', ({(263, 40, 263, 57): 'decoder_time_step', (263, 59, 263, 69): 'input_size'}, {}), '(decoder_time_step, input_size)', True, 'import numpy as np\n'), ((274, 55, 274, 94), 'tensorflow.concat', 'tf.concat', (), '', True, 'import tensorflow as tf\n'), ((284, 19, 284, 53), 'tensorflow.identity', 'tf.identity', (), '', True, 'import tensorflow as tf\n'), ((284, 55, 284, 109), 'tensorflow.identity', 'tf.identity', (), '', True, 'import tensorflow as tf\n'), ((316, 19, 316, 54), 'tensorflow.identity', 'tf.identity', (), '', True, 'import tensorflow as tf\n'), ((316, 56, 316, 98), 'tensorflow.identity', 'tf.identity', (), '', True, 'import tensorflow as tf\n'), ((340, 24, 340, 70), 'numpy.random.randn', 'np.random.randn', ({(340, 40, 340, 57): 'encoder_time_step', (340, 59, 340, 69): 'input_size'}, {}), '(encoder_time_step, input_size)', True, 'import numpy as np\n'), ((343, 24, 343, 82), 'numpy.random.randn', 'np.random.randn', ({(343, 40, 343, 57): 'decoder_time_step', (343, 59, 343, 69): 'batch_size', (343, 71, 343, 81): 'input_size'}, {}), '(decoder_time_step, batch_size, input_size)', True, 'import numpy as np\n'), ((352, 55, 352, 94), 'tensorflow.concat', 'tf.concat', (), '', True, 'import tensorflow as tf\n'), ((374, 19, 374, 63), 'tensorflow.identity', 'tf.identity', (), '', True, 'import tensorflow as tf\n'), ((374, 65, 374, 111), 'tensorflow.identity', 'tf.identity', (), '', True, 'import tensorflow as tf\n'), ((408, 12, 408, 45), 'numpy.arange', 'np.arange', (), '', True, 'import numpy as np\n'), ((410, 12, 410, 45), 'numpy.arange', 'np.arange', (), '', True, 'import numpy as np\n'), ((412, 12, 412, 45), 'numpy.arange', 'np.arange', (), '', True, 'import numpy as np\n'), ((414, 22, 414, 42), 'tensorflow.matmul', 'tf.matmul', ({(414, 32, 414, 38): 'inputs', (414, 40, 414, 41): 'W'}, {}), '(inputs, W)', True, 'import tensorflow as tf\n'), ((113, 32, 113, 55), 'tensorflow.identity', 'tf.identity', ({(113, 44, 113, 54): 'seq_length'}, {}), '(seq_length)', True, 'import tensorflow as tf\n'), ((368, 58, 368, 81), 'tensorflow.identity', 'tf.identity', ({(368, 70, 368, 80): 'seq_length'}, {}), '(seq_length)', True, 'import tensorflow as tf\n')] |
noname34/CHARM_Project_Hazard_Perception_I | cookie-cutter/src/templates/template.py | 2d03d9e8911afad21818c6f837558503508a59bd | #!/user/bin/env python3
# -*- coding: utf-8 -*-
#!/user/bin/env python3
# -*- coding: utf-8 -*-
# @Author: Kevin Bürgisser
# @Email: [email protected]
# @Date: 04.2020
# Context: CHARM PROJECT - Harzard perception
"""
Module documentation.
"""
# Imports
import sys
#import os
# Global variables
# Class declarations
# Function declarations
def main():
args = sys.argv[1:]
if not args:
print('usage: [--flags options] [inputs] ')
sys.exit(1)
# Main body
if __name__ == '__main__':
main() | [((32, 8, 32, 19), 'sys.exit', 'sys.exit', ({(32, 17, 32, 18): '(1)'}, {}), '(1)', False, 'import sys\n')] |
siwill22/magSA | utils/gridpeak.py | 9f3a12e6ed971d67444804cad57734dc0b4772ff | import numpy
def gridpeak(t, X=None):
# GP = GRIDPEAK(...)
# gp = gridpeak(t) return gridpeaks based on Blakely
# and Simpson method
# gp = gridpeak(t,X) optionally remove peak values scoring less than X,
# where X can be between 1 and 4.
print 'shape ', t.shape
m, n = t.shape
p = 1
gp = numpy.zeros((m, n))
for i in numpy.arange(p, m - p):
for j in numpy.arange(p, n - p):
data = numpy.zeros(4)
data[0] = t[i - p, j] < t[i, j] and t[i, j] > t[i + p, j]
data[1] = t[i, j - p] < t[i, j] and t[i, j] > t[i, j + p]
data[2] = t[i + p, j - p] < t[i, j] and t[i, j] > t[i - p, j + p]
data[3] = t[i - p, j - p] < t[i, j] and t[i, j] > t[i + p, j + p]
gp[i, j] = numpy.sum(data)
if X:
gp[gp < X] = numpy.nan
gp = gp / gp
return gp
| [] |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.