max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
tests/test_formatter.py | hbraux/kafkacli | 0 | 3700 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import pytest
import json
from kafkacli.formatter import Formatter
sampleJson = json.loads('{"a":"s", "b":1}')
def test_print_default(capsys):
Formatter().print(sampleJson)
captured = capsys.readouterr()
assert captured.out == '{"a": "s", "b": 1}\n'
def test_print_idents(capsys):
Formatter(indents=True).print(sampleJson)
captured = capsys.readouterr()
assert captured.out == '{\n "a": "s",\n "b": 1\n}\n'
def test_print_colors(capsys):
Formatter(colors=True).print(sampleJson)
captured = capsys.readouterr()
assert captured.out == \
'{"a": \x1b[34m"s"\x1b[39m, "b": \x1b[31m1\x1b[39m}\n'
| 2.40625 | 2 |
src/jobs/forms.py | arc198/DJANGO-JOB-SITE | 20 | 3701 | from django import forms
from .models import Application
class ApplicationForm(forms.ModelForm):
class Meta:
model = Application
fields = ('resume', 'cover_letter',) | 1.820313 | 2 |
src/test/tests/unit/protocol.py | ylee88/visit | 1 | 3702 | # ----------------------------------------------------------------------------
# CLASSES: nightly
#
# Test Case: protocolo.py
#
# Tests: vistprotocol unit test
#
# <NAME>, Tue Jan 11 10:19:23 PST 2011
# ----------------------------------------------------------------------------
tapp = visit_bin_path("visitprotocol")
res = sexe(tapp,ret_output=True)
if res["return_code"] == 0:
excode = 111
else:
excode = 113
Exit(excode)
| 2.09375 | 2 |
pyMazeBacktrack.py | Dozed12/pyMazeBacktrack | 2 | 3703 | import libtcodpy as libtcod
from random import randint
nSquares = 30
nTiles = nSquares * 2 + 1
SCREEN_WIDTH = nTiles
SCREEN_HEIGHT = nTiles
libtcod.console_set_custom_font("cp437_12x12.png", libtcod.FONT_LAYOUT_ASCII_INROW)
libtcod.console_init_root(SCREEN_WIDTH, SCREEN_HEIGHT, 'pyMazeBacktrack', False, libtcod.RENDERER_OPENGL)
def CheckDir(x,y,size,direction,table):
if direction == 1:
if y - 2 <= 0:
return 0
if table[x][y-2] == white:
return 0
elif direction == 2:
if x + 2 >= size:
return 0
if table[x+2][y] == white:
return 0
elif direction == 3:
if y + 2 >= size:
return 0
if table[x][y+2] == white:
return 0
elif direction == 4:
if x - 2 <= 0:
return 0
if table[x-2][y] == white:
return 0
return 1
def Possible(x,y,table,size):
if x+2 < size:
if table[x+2][y] == black:
return 1
if x-2 > 0:
if table[x-2][y] == black:
return 1
if y+2 < size:
if table[x][y+2] == black:
return 1
if y-2 > 0:
if table[x][y-2] == black:
return 1
return 0
black = libtcod.black
white = libtcod.white
Table = [[0 for i in range(nTiles)]for i in range(nTiles)]
for x in range(nTiles):
for y in range(nTiles):
Table[x][y] = black
libtcod.console_put_char_ex(None,x,y,219,Table[x][y],libtcod.white)
libtcod.console_flush()
Memory = []
CurrX = 1
CurrY = 1
Table[CurrX][CurrY] = white
end = 0
while end == 0:
while Possible(CurrX,CurrY,Table,nTiles):
Dir = randint(1,4)
while CheckDir(CurrX,CurrY,nTiles,Dir,Table) == 0:
Dir = randint(1,4)
if Dir == 1:
Table[CurrX][CurrY - 1] = white
CurrY -= 2
Table[CurrX][CurrY] = white
elif Dir == 2:
Table[CurrX + 1][CurrY] = white
CurrX += 2
Table[CurrX][CurrY] = white
elif Dir == 3:
Table[CurrX][CurrY + 1] = white
CurrY += 2
Table[CurrX][CurrY] = white
elif Dir == 4:
Table[CurrX - 1][CurrY] = white
CurrX -= 2
Table[CurrX][CurrY] = white
Memory.append(Dir)
#print
for x in range(nTiles):
for y in range(nTiles):
libtcod.console_put_char_ex(None,x,y,219,Table[x][y],libtcod.white)
libtcod.console_flush()
while Possible(CurrX,CurrY,Table,nTiles) == 0:
MemorySize = len(Memory)
Dir = Memory[MemorySize-1]
if Dir == 1:
CurrY += 2
elif Dir == 2:
CurrX -= 2
elif Dir == 3:
CurrY -= 2
elif Dir == 4:
CurrX += 2
del Memory[MemorySize-1]
if CurrX == 1 and CurrY == 1:
end = 1
break
#print
for x in range(nTiles):
for y in range(nTiles):
libtcod.console_put_char_ex(None,x,y,219,Table[x][y],libtcod.white)
libtcod.console_flush()
libtcod.console_wait_for_keypress(True)
| 2.5625 | 3 |
source/tests/test_resources.py | aws-solutions/maintaining-personalized-experiences-with-machine-learning | 6 | 3704 | <gh_stars>1-10
# ######################################################################################################################
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. #
# #
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance #
# with the License. You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed #
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for #
# the specific language governing permissions and limitations under the License. #
# ######################################################################################################################
import pytest
from shared.resource import (
DatasetGroup,
Schema,
Dataset,
DatasetImportJob,
Solution,
SolutionVersion,
Campaign,
EventTracker,
BatchSegmentJob,
BatchInferenceJob,
)
@pytest.mark.parametrize(
"klass,camel,dash,snake",
[
(DatasetGroup, "datasetGroup", "dataset-group", "dataset_group"),
(Schema, "schema", "schema", "schema"),
(Dataset, "dataset", "dataset", "dataset"),
(
DatasetImportJob,
"datasetImportJob",
"dataset-import-job",
"dataset_import_job",
),
(Solution, "solution", "solution", "solution"),
(SolutionVersion, "solutionVersion", "solution-version", "solution_version"),
(Campaign, "campaign", "campaign", "campaign"),
(EventTracker, "eventTracker", "event-tracker", "event_tracker"),
(
BatchInferenceJob,
"batchInferenceJob",
"batch-inference-job",
"batch_inference_job",
),
(BatchSegmentJob, "batchSegmentJob", "batch-segment-job", "batch_segment_job"),
],
ids=[
"DatasetGroup",
"Schema",
"Dataset",
"DatasetImportJob",
"Solution",
"SolutionVersion",
"Campaign",
"EventTracker",
"BatchInferenceJob",
"BatchSegmentJob,",
],
)
def test_resource_naming(klass, camel, dash, snake):
assert klass().name.camel == camel
assert klass().name.dash == dash
assert klass().name.snake == snake
| 1.570313 | 2 |
gradefiles-send.py | lapets/bu-gsubmit-grading | 3 | 3705 | #####################################################################
##
## gradefiles-send.py
##
## Script to send grade files by email to enrolled students; the
## input grade file names should correspond to the user names of
## the students.
##
##
from email.mime.text import MIMEText # For creating a message string.
from subprocess import Popen, PIPE # For sending email on linux.
import sys # For command line arguments.
import os # For commands and file manipulation (walk, path, system).
#####################################################################
## Sending a simple email message.
##
def send(txt, courseNumber, task, sender, targets):
msg = MIMEText(txt)
msg["From"] = sender + "@bu.edu"
msg["To"] = ",".join([target + "@bu.edu" for target in targets])
msg["Cc"] = sender + "@bu.edu"
msg["Subject"] = "CS " + courseNumber + " " + task + " grade"
p = Popen(["/usr/sbin/sendmail", "-t"], stdin=PIPE)
p.communicate(bytes(msg.as_string(), 'UTF-8'))
#####################################################################
## Process the command line parameters.
##
if len(sys.argv) == 6\
and (int(sys.argv[1][0:3]) in range(100,1000))\
and sys.argv[2] in ['Fall', 'Spring']\
and int(sys.argv[3]) in range(2000,2100):
courseNumber = sys.argv[1] # Accepts course names like "591 X1."
season = sys.argv[2]
year = sys.argv[3]
task = sys.argv[4]
sender = sys.argv[5]
else:
print('\n Usage:\n\n % python gradefiles-send.py <###> <Fall|Spring> <YYYY> <task> <sender-username>\n')
exit()
#####################################################################
## Check for list of files.
##
if not os.path.exists('./data'):
print('No folder "data" containing grade files found. Exiting.')
exit()
#####################################################################
## Send the grade files.
##
for curdir, dirs, files in os.walk('./data/'):
for file in files:
txt = open('./data/'+file, 'r').read()
targets = file.split('.')[0].split("_")
send(txt, courseNumber, task, sender, targets)
print('Sent grade file to ' + str(targets) + '.')
#eof | 3.328125 | 3 |
Publisher/PGGAN-1024 trained on CelebaHQ/2-exporter.py | GalAster/16 | 3 | 3706 | <filename>Publisher/PGGAN-1024 trained on CelebaHQ/2-exporter.py<gh_stars>1-10
import os
import pickle
import tensorflow as tf
import wolframclient.serializers as wxf
name = 'karras2018iclr-celebahq-1024x1024'
file = open(name + '.pkl', 'rb')
sess = tf.InteractiveSession()
G, D, Gs = pickle.load(file)
saver = tf.train.Saver()
save_path = "./target/" + name + "/"
model_name = 'model'
if not os.path.exists(save_path):
os.makedirs(save_path)
save_path_full = os.path.join(save_path, model_name)
saver.save(sess, save_path_full)
ckpt = tf.train.get_checkpoint_state(save_path)
reader = tf.train.NewCheckpointReader(ckpt.model_checkpoint_path)
all_variables = list(reader.get_variable_to_shape_map().keys())
npy = dict(zip(all_variables, map(reader.get_tensor, all_variables)))
wxf.export(npy, name + '.wxf', target_format='wxf')
# Save as protobuf
with tf.Session() as sess:
tf.initialize_all_variables().run()
output_graph_def = tf.graph_util.convert_variables_to_constants(
sess=sess,
input_graph_def=sess.graph_def,
# output_node_names=['G_paper_1/images_out']
output_node_names=['G_paper_1/ToRGB_lod0/add']
)
with tf.gfile.GFile("./target/" + name + ".pb", "wb") as file: # 保存模型
file.write(output_graph_def.SerializeToString()) # 序列化输出
| 2.046875 | 2 |
src/moveGoogle.py | Quanta-Robotics/Robot-Blueberry | 25 | 3707 | #!/usr/bin/env python
import os
import os.path
import yaml
import time
import random
import multiprocessing
import RPi.GPIO as GPIO
from talk import say
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)
from adafruit_servokit import ServoKit
Motor1 = {'EN': 27, 'input1': 19, 'input2': 16}
Motor2 = {'EN': 22, 'input1': 26, 'input2': 20}
for x in Motor1:
GPIO.setup(Motor1[x], GPIO.OUT)
GPIO.setup(Motor2[x], GPIO.OUT)
EN1 = GPIO.PWM(Motor1['EN'], 100)
EN2 = GPIO.PWM(Motor2['EN'], 100)
EN1.start(0)
EN2.start(0)
hand = ServoKit(channels=16)
ROOT_PATH = os.path.realpath(os.path.join(__file__, '..', '..'))
def readYaml():
with open('{}/src/configuration.yaml'.format(ROOT_PATH),'r+', encoding='utf8') as conf:
servo = yaml.load(conf, Loader=yaml.FullLoader)
return servo
def writeYaml(s=None):
with open('{}/src/configuration.yaml'.format(ROOT_PATH),'w', encoding='utf8') as conf:
if s==None:
yaml.dump(servo,conf)
else:
yaml.dump(s,conf)
servo = readYaml()
if servo == None:
with open('{}/src/configurationBackUp.yaml'.format(ROOT_PATH),'r+', encoding='utf8') as conf:
servoBackUp = yaml.load(conf, Loader=yaml.FullLoader)
writeYaml(servoBackUp)
servo = readYaml()
if servo == None:
print('close')
exit()
Initial = servo['Initial_Position']['I2C']
Current = servo['Current_Position']['I2C']
InitialGpio = servo['Initial_Position']['Gpio']
CurrentGpio = servo['Current_Position']['Gpio']
GpioPin = servo['Pin']['Gpio']
for i in range(0,6):
GPIO.setup(GpioPin[i], GPIO.OUT)
Servo = []
for i in range(0,6):
Servo.append(GPIO.PWM(GpioPin[i],50))
Servo[i].start(0)
def changeDegree(pin,newDegree,time1=0.05,update=5):
maxChange = 0
pinSize = len(pin)
for i in range(0,pinSize):
maxChange = max(abs(Current[pin[i]]-newDegree[i]),maxChange)
for deg in range(0,maxChange,update):
for i in range(0,pinSize):
if Current[pin[i]]<newDegree[i]:
Current[pin[i]] += update
elif Current[pin[i]]>newDegree[i]:
Current[pin[i]] -= update
for i in range(0,pinSize):
hand.servo[pin[i]].angle = Current[pin[i]]
servo['Current_Position']['I2C'][pin[i]] = Current[pin[i]]
writeYaml()
time.sleep(time1)
def takePosition():
changeDegree([7,8],[180,0])
changeDegree([0,1,2,3,4,5,6,7,8,9,10,11],[0,50,130,0,170,170,0,180,0,60,150,0])
def changeDegreeGpio(pin,degree,update,duration):
pinSize = len(pin)
for i in range(0,pinSize):
p = pin[i]
if CurrentGpio[p]>degree[i]:
update = -update
for deg in range(CurrentGpio[p],degree[i],update):
duty = deg/18
duty+=2
Servo[p].ChangeDutyCycle(duty)
time.sleep(duration)
CurrentGpio[p]=degree[i]
writeYaml()
def Run(a, b, c, d, x):
GPIO.output(Motor1['input1'], GPIO.LOW)
GPIO.output(Motor1['input2'], GPIO.LOW)
GPIO.output(Motor2['input1'], GPIO.LOW)
GPIO.output(Motor2['input2'], GPIO.LOW)
if a==1:
GPIO.output(Motor1['input1'], GPIO.HIGH)
if b==1:
GPIO.output(Motor1['input2'], GPIO.HIGH)
if c==1:
GPIO.output(Motor2['input1'], GPIO.HIGH)
if d==1:
GPIO.output(Motor2['input2'], GPIO.HIGH)
EN2.ChangeDutyCycle(x)
EN1.ChangeDutyCycle(x)
def Stop():
Run(0,0,0,0,0)
def Start_Slow(a, b, c, d):
for i in range(0,100,20):
Run(a,b,c,d,i)
time.sleep(0.5)
def Stop_Slow(a,b,c,d):
for i in range(100,0,-20):
Run(a,b,c,d,i)
time.sleep(0.5)
def yes(times=3):
for i in range(0,times):
changeDegree([0],[30])
time.sleep(0.08)
changeDegree([0],[0])
time.sleep(0.08)
def no(times=3):
for i in range(0,times):
changeDegree([15],[70],5,0.05)
time.sleep(0.2)
changeDegree([15],[110],5,0.05)
time.sleep(0.2)
changeDegree([15],[90],5,0.05)
def move_head(times=3):
for i in range(0,times):
changeDegree([0],[20])
changeDegreeGpio([0],[80],5,0.05)
changeDegree([0],[0])
changeDegreeGpio([0],[100],5,0.05)
changeDegreeGpio([0],[90],10,0.01)
def random0():
r = random.randrange(1,10000000)%3
if(r==1):
changeDegree([0],[20])
changeDegree([0],[0])
elif(r==2):
changeDegreeGpio([0],[120],5,0.05)
changeDegreeGpio([0],[90],5,0.05)
else:
changeDegreeGpio([0],[60],5,0.05)
changeDegreeGpio([0],[90],5,0.05)
def random1():
r = random.randrange(1,3)
if(r==1):
changeDegree([0],[20])
changeDegree([0],[0])
changeDegree([3],[50])
changeDegree([9],[100])
changeDegree([9],[60])
changeDegree([3],[0])
elif(r==2):
changeDegree([0],[20])
changeDegree([0],[0])
changeDegree([4],[120])
changeDegree([10],[140])
changeDegree([10],[180])
changeDegree([4],[170])
else:
changeDegree([3,4],[50,120])
changeDegree([9,10],[100,140])
changeDegree([9,10],[60,180])
changeDegree([3,4],[0,180])
def random2():
changeDegree([3,4],[20,150])
pin = [7,8,9,10]
deg = [[160,0,60,100],[180,20,100,140]]
ok = [0,0,0,0]
select = [1,2,0,3,1,0,3,2,1,0,2,3,1,2,3,0,3,1,2,3,1,2,3,0,3,1]
for i in range(0,15):
r = select[i%len(select)]%4
print (' move ',r)
changeDegree([pin[r]],[deg[ok[r]][r]])
ok[r]^=1
takePosition()
def random3():
changeDegree([3,4],[20,150])
pin = [7,8,9,10]
deg = [[160,0,60,100],[180,20,100,140]]
ok = [0,0,0,0]
for i in range(0,15):
r = random.randrange(1,1000000)%4
print (' move ',r)
changeDegree([pin[r]],[deg[ok[r]][r]])
takePosition()
def randomCall(t):
changeDegree([3,4,5,6,7,8,9,10],[50,110,80,70,100,80,160,20])
pin = [5,6,7,8]
deg = [[80,50,100,70],[110,90,110,90]]
select = [89,93,472,347,2, 34, 134, 1937, 1983, 1739, 107, 894, 48, 28, 2048,589,689,123, 34,27,4,91,102,893,10283,53,1283,9485,1973,873,1973,0,10973]
ok = [0,0,0,0]
ln = len(select)
for i in range(0,t*3):
r = select[i%16]%4
changeDegree([pin[r]],[deg[ok[r]][r]])
ok[r]^=1
takePosition()
def expression(t):
print (' i got value of t is : ',t)
if(t==0):
random0()
elif(t==1):
random1()
elif(t==2):
random2()
elif(t==3):
random3()
else:
randomCall(t)
def speakOnline(t):
expression(t)
def speakOffline(speech):
t = int(len(speech)/15)
print ('Offline t value is : ',t)
p1 = multiprocessing.Process(target=expression,args=[t])
p1.start()
say(speech)
| 2.28125 | 2 |
src/onegov/translator_directory/models/language.py | politbuero-kampagnen/onegov-cloud | 0 | 3708 | from uuid import uuid4
from sqlalchemy import Index, Column, Text, Table, ForeignKey
from sqlalchemy.orm import object_session
from onegov.core.orm import Base
from onegov.core.orm.types import UUID
spoken_association_table = Table(
'spoken_lang_association',
Base.metadata,
Column(
'translator_id',
UUID,
ForeignKey('translators.id'),
nullable=False),
Column('lang_id', UUID, ForeignKey('languages.id'), nullable=False)
)
written_association_table = Table(
'written_lang_association',
Base.metadata,
Column(
'translator_id',
UUID,
ForeignKey('translators.id'),
nullable=False),
Column('lang_id', UUID, ForeignKey('languages.id'), nullable=False)
)
mother_tongue_association_table = Table(
'mother_tongue_association',
Base.metadata,
Column(
'translator_id',
UUID,
ForeignKey('translators.id'),
nullable=False),
Column('lang_id', UUID, ForeignKey('languages.id'), nullable=False)
)
class Language(Base):
__tablename__ = 'languages'
__table_args__ = (
Index('unique_name', 'name', unique=True),
)
id = Column(UUID, primary_key=True, default=uuid4)
name = Column(Text, nullable=False)
@property
def speakers_count(self):
session = object_session(self)
return session.query(
spoken_association_table).filter_by(lang_id=self.id).count()
@property
def writers_count(self):
session = object_session(self)
return session.query(
written_association_table).filter_by(lang_id=self.id).count()
@property
def native_speakers_count(self):
"""Having it as mother tongue..."""
session = object_session(self)
return session.query(
mother_tongue_association_table).filter_by(lang_id=self.id).count()
@property
def deletable(self):
return (
self.speakers_count
+ self.writers_count
+ self.native_speakers_count
) == 0
| 2.515625 | 3 |
tfjs-converter/python/tensorflowjs/converters/graph_rewrite_util.py | djemeljanovs/tfjs | 0 | 3709 | <gh_stars>0
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import re
from tensorflow.core.framework import graph_pb2
from tensorflow.core.framework import node_def_pb2
from tensorflow.python.framework import tensor_util
# Custom op name for fused depthwise conv2d
FUSED_DEPTHWISE_CONV2D = 'FusedDepthwiseConv2dNative'
# The grappler op name for fused MatMul which starts with '_'
FUSED_MATMUL = '_FusedMatMul'
def node_from_map(node_map, name):
"""Pulls a node def from a dictionary for a given name.
Args:
node_map: Dictionary containing an entry indexed by name for every node.
name: Identifies the node we want to find.
Returns:
NodeDef of the node with the given name.
Raises:
ValueError: If the node isn't present in the dictionary.
"""
stripped_name = node_name_from_input(name)
if stripped_name not in node_map:
raise ValueError("No node named '%s' found in map." % name)
return node_map[stripped_name]
def values_from_const(node_def):
"""Extracts the values from a const NodeDef as a numpy ndarray.
Args:
node_def: Const NodeDef that has the values we want to access.
Returns:
Numpy ndarray containing the values.
Raises:
ValueError: If the node isn't a Const.
"""
if node_def.op != "Const":
raise ValueError(
"Node named '%s' should be a Const op for values_from_const." %
node_def.name)
input_tensor = node_def.attr["value"].tensor
tensor_value = tensor_util.MakeNdarray(input_tensor)
return tensor_value
# Whether to scale by gamma after normalization.
def scale_after_normalization(node):
if node.op == "BatchNormWithGlobalNormalization":
return node.attr["scale_after_normalization"].b
return True
def node_name_from_input(node_name):
"""Strips off ports and other decorations to get the underlying node name."""
if node_name.startswith("^"):
node_name = node_name[1:]
m = re.search(r"(.*):\d+$", node_name)
if m:
node_name = m.group(1)
return node_name
def cleanup_graph_def(input_graph_def, nodes_to_skip, inputs_to_remove):
"""Clean up the graph def by removing the skipped nodes and clean up the nodes
with inputs that have been removed.
Args:
input_graph_def: GraphDef object to be cleaned.
node_to_skip: Dict with node names to be skipped.
inputs_to_remove: List of nodes to be removed from inputs of all nodes.
Returns:
GraphDef that has been cleaned.
"""
result_graph_def = graph_pb2.GraphDef()
for node in input_graph_def.node:
if node.name in nodes_to_skip:
continue
new_node = node_def_pb2.NodeDef()
new_node.CopyFrom(node)
for value in inputs_to_remove:
for i, input_node in enumerate(new_node.input):
if input_node == value.name:
new_node.input[i] = value.input[0]
result_graph_def.node.extend([new_node])
result_graph_def.library.CopyFrom(input_graph_def.library)
result_graph_def.versions.CopyFrom(input_graph_def.versions)
return result_graph_def
| 2.125 | 2 |
loss_fn/classification_loss_fns/binary_cross_entropy.py | apple/ml-cvnets | 209 | 3710 | <filename>loss_fn/classification_loss_fns/binary_cross_entropy.py
#
# For licensing see accompanying LICENSE file.
# Copyright (C) 2022 Apple Inc. All Rights Reserved.
#
from torch.nn import functional as F
from torch import Tensor
import argparse
from . import register_classification_loss_fn
from .. import BaseCriteria
@register_classification_loss_fn(name="binary_cross_entropy")
class ClsBinaryCrossEntropy(BaseCriteria):
"""Binary CE for classification tasks"""
def __init__(self, opts, *args, **kwargs) -> None:
super().__init__()
def forward(
self, input_sample: Tensor, prediction: Tensor, target: Tensor, *args, **kwargs
) -> Tensor:
if target.dim() != prediction.dim():
target = F.one_hot(target, num_classes=prediction.shape[-1])
return F.binary_cross_entropy_with_logits(
input=prediction,
target=target.to(prediction.dtype),
weight=None,
reduction="sum",
)
def __repr__(self) -> str:
return "{}()".format(self.__class__.__name__)
| 2.3125 | 2 |
Sorting/insertion_sort.py | lakshyarawal/pythonPractice | 0 | 3711 | <filename>Sorting/insertion_sort.py
""" Insertion Sort Algorithm:"""
"""Implementation"""
def insertion_sort(arr) -> list:
n = len(arr)
for i in range(1, n):
swap_index = i
for j in range(i-1, -1, -1):
if arr[swap_index] < arr[j]:
arr[swap_index], arr[j] = arr[j], arr[swap_index]
swap_index -= 1
else:
break
return arr
def main():
arr_input = [10, 5, 30, 1, 2, 5, 10, 10]
a2 = insertion_sort(arr_input)
print(a2)
# Using the special variable
# __name__
if __name__ == "__main__":
main()
| 4.25 | 4 |
nipype/interfaces/spm/__init__.py | felixsc1/nipype | 8 | 3712 | <gh_stars>1-10
# -*- coding: utf-8 -*-
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""Top-level namespace for spm."""
from .base import (Info, SPMCommand, logger, no_spm, scans_for_fname,
scans_for_fnames)
from .preprocess import (FieldMap, SliceTiming, Realign, RealignUnwarp,
Coregister, Normalize, Normalize12, Segment,
Smooth, NewSegment, DARTEL, DARTELNorm2MNI,
CreateWarped, VBMSegment)
from .model import (Level1Design, EstimateModel, EstimateContrast, Threshold,
OneSampleTTestDesign, TwoSampleTTestDesign,
PairedTTestDesign, MultipleRegressionDesign)
from .utils import (Analyze2nii, CalcCoregAffine, ApplyTransform, Reslice,
ApplyInverseDeformation, ResliceToReference, DicomImport)
| 1.40625 | 1 |
network.py | tobloef/neural-network | 3 | 3713 | <reponame>tobloef/neural-network<filename>network.py
import numpy as np
from mathUtils import *
class Network(object):
"""
Model for a feedforward Neural Network that use backpropagation with stochastic gradient decent.
"""
def __init__(self, layerSizes, biasVectors, weightMatrices):
"""
Initialise the network with a list of layer sizes and lists for biases and weights for the neurons in the network. The first layer is the input layer and the last layer is the output layer.
"""
self.layerSizes = layerSizes
self.biasVectors = biasVectors
self.weightMatrices = weightMatrices
@staticmethod
def generateRandomNetwork(layerSizes):
"""
Initialise a new network with random weights and biases. Input and output layers are included in the layerSizes list. The random weights and biases are generated using a Gaussian distribution, so the results are more probable to be around 0.
"""
biasVectors = []
"""Generate biases for each neuron in each layer, except the input layer."""
for size in layerSizes[1:]:
"""
np.random.randn generates arrays of arrays of random numbers, based on the paramters.
np.random.randn(3,2) will generate an array of 3 arrays with 2 random numbers.
"""
biasVectors.append(np.random.randn(size, 1))
"""Generate weights for connections between layers."""
weightMatrices = []
for size, prevSize in zip(layerSizes[:-1], layerSizes[1:]):
weightMatrices.append(np.random.randn(prevSize, size))
return Network(layerSizes, biasVectors, weightMatrices)
def getOutputs(self, inputs):
"""Return a vector of the network's outputs based on the given inputs, using feedforward."""
activations = inputs
for biasVector, weightMatrix in zip(self.biasVectors, self.weightMatrices):
"""
For every layer, get the bias vector and the weight matrix. Then get dot product between the weight matrix and the output vector and add the bias vector. This is the activation vector for the current layer.
"""
zVector = np.dot(weightMatrix, activations) + biasVector
activations = sigmoid(zVector)
return activations
def train(self, data, epochs, batchSize, rate, testData=None):
"""
Train the neural network using stochastic gradient descent. Smaller batches of random samples from the training are used to reduce the training time. The training date is a list of tuples (inputs, expected outputs). The learning rate is how much to change the values each batch.
"""
print("Training network with shape {}, batch size {} and learning rate {} for {} epochs...".format(self.layerSizes, batchSize, rate, epochs))
for e in range(epochs):
np.random.shuffle(data)
batches = []
for i in range(0, len(data), batchSize):
batches.append(data[i:i+batchSize])
for batch in batches:
self._tuneNetwork(batch, rate)
if (testData):
result = self._evaluate(testData)
print("Epoch #{} completed with {:.2f}% correctness.".format(e+1, 100/len(testData)*result))
else:
print("Epoch #{} completed.".format(e))
def _tuneNetwork(self, batch, rate):
"""
Tune the weights and biases of the network by using backpropagation with gradient descend.
"""
"""
Setup matrix and vector based on the weight matrix and bias vector filled with zeroes. This is used for storing each change to make for each vector, for each set of training date.
"""
sumBiasVectors = []
for biasVector in self.biasVectors:
sumBiasVectors.append(np.zeros(biasVector.shape))
sumWeightMatrices = []
for weightMatrix in self.weightMatrices:
sumWeightMatrices.append(np.zeros(weightMatrix.shape))
for inputs, expected in batch:
"""
Get a matrix/vector with the required changes to the network, based on that set of training data, and add it to a set of matrix/vector totalling the changes needed from all the training data.
"""
deltaBiasVectors, deltaWeightMatrices = self._backpropagate(inputs, expected)
newSumBiasVectors = []
for totalBiasVector, deltaBiasVector in zip(sumBiasVectors, deltaBiasVectors):
newSumBiasVectors.append(totalBiasVector + deltaBiasVector)
sumBiasVectors = newSumBiasVectors
newSumWeightMatrices = []
for totalWeightMatrix, deltaWeightMatrix in zip(sumWeightMatrices, deltaWeightMatrices):
newSumWeightMatrices.append(totalWeightMatrix + deltaWeightMatrix)
sumWeightMatrices = newSumWeightMatrices
"""
Take each change for each set of training data, get the average of these and subtract them from the current weights and biases. Then use these as the new weights and biases.
"""
newBiasVectors = []
for biasVector, totalBiasVector in zip(self.biasVectors, sumBiasVectors):
newBiasVectors.append(biasVector - (rate/len(batch)) * totalBiasVector)
newWeightMatrices = []
for weightMatrix, totalWeightMatrix in zip(self.weightMatrices, sumWeightMatrices):
newWeightMatrices.append(weightMatrix - (rate/len(batch)) * totalWeightMatrix)
self.biasVectors = newBiasVectors
self.weightMatrices = newWeightMatrices
def _backpropagate(self, inputs, expected):
"""
Return a tuple with gradient of the cost function for each bias and weight, in the format (vector of bias changes, matrix of weight changes), for the specified set of training data.
"""
deltaBiasVectors = []
for biasVector in self.biasVectors:
deltaBiasVectors.append(np.zeros(biasVector.shape))
deltaWeightMatrices = []
for weightMatrix in self.weightMatrices:
deltaWeightMatrices.append(np.zeros(weightMatrix.shape))
"""Store all activations for the entire network, starting with the input layer."""
activationVector = inputs
activationVectors = [inputs]
"""Find the z-vector for layer in the network"""
zVectors = []
for biasVector, weightMatrix in zip(self.biasVectors, self.weightMatrices):
zVector = np.dot(weightMatrix, activationVector) + biasVector
zVectors.append(zVector)
activationVector = sigmoid(zVector)
activationVectors.append(activationVector)
"""
* Start with output compared to expected, tune weights and biases based on the derivative of the cost function with respect to the weight/bias.
* Then move onto each hidden layer and the input layer.
"""
deltaBiasVector = (activationVectors[-1] - expected) * 2 * sigmoidDerivative(zVectors[-1])
deltaBiasVectors[-1] = deltaBiasVector
deltaWeightMatrices[-1] = np.dot(deltaBiasVector, activationVectors[-2].transpose())
for l in range(-2, -len(self.layerSizes), -1):
# Equivalent to https://i.imgur.com/8PQQ28r.png, because deltaBiasVector is * 1 instead
weightMatrix = self.weightMatrices[l+1].transpose()
sigmoidDeriv = sigmoidDerivative(zVectors[l])
deltaBiasVector = np.dot(weightMatrix, deltaBiasVector) * sigmoidDeriv
deltaBiasVectors[l] = deltaBiasVector
deltaWeightMatrices[l] = np.dot(deltaBiasVector, activationVectors[l-1].transpose())
return (deltaBiasVectors, deltaWeightMatrices)
def _evaluate(self, testData):
"""Test the network with the specified test data and return the number of correct guesses."""
correctGuesses = 0
for inputs, expected in testData:
"""Increment correct guesses if the most active output is the expected one."""
outputs = self.getOutputs(inputs)
guess = np.argmax(outputs)
if (guess == expected):
correctGuesses += 1
return correctGuesses | 3.8125 | 4 |
examples/airflow/dags/etl_orders_7_days.py | phixMe/marquez | 0 | 3714 | <reponame>phixMe/marquez
from datetime import datetime
from marquez_airflow import DAG
from airflow.operators.postgres_operator import PostgresOperator
from airflow.utils.dates import days_ago
default_args = {
'owner': 'datascience',
'depends_on_past': False,
'start_date': days_ago(1),
'email_on_failure': False,
'email_on_retry': False,
'email': ['<EMAIL>']
}
dag = DAG(
'etl_orders_7_days',
schedule_interval='@hourly',
catchup=False,
default_args=default_args,
description='Loads newly placed orders weekly.'
)
t1 = PostgresOperator(
task_id='if_not_exists',
postgres_conn_id='food_delivery_db',
sql='''
CREATE TABLE IF NOT EXISTS orders_7_days (
order_id INTEGER REFERENCES orders(id),
placed_on TIMESTAMP NOT NULL,
discount_id INTEGER REFERENCES discounts(id),
menu_id INTEGER REFERENCES menus(id),
restaurant_id INTEGER REFERENCES restaurants(id),
menu_item_id INTEGER REFERENCES menu_items(id),
category_id INTEGER REFERENCES categories(id)
);''',
dag=dag
)
t2 = PostgresOperator(
task_id='tuncate',
postgres_conn_id='food_delivery_db',
sql='TRUNCATE TABLE orders_7_days;',
dag=dag
)
t3 = PostgresOperator(
task_id='insert',
postgres_conn_id='food_delivery_db',
sql='''
INSERT INTO orders_7_days (order_id, placed_on, discount_id, menu_id, restaurant_id, menu_item_id, category_id)
SELECT o.id AS order_id, o.placed_on, o.discount_id, m.id AS menu_id, m.restaurant_id, mi.id AS menu_item_id, c.id AS category_id
FROM orders AS o
INNER JOIN menu_items AS mi
ON mi.id = o.menu_item_id
INNER JOIN categories AS c
ON c.id = mi.category_id
INNER JOIN menus AS m
ON m.id = c.menu_id
WHERE o.placed_on >= NOW() - interval '7 days'
''',
dag=dag
)
t1 >> t2 >> t3
| 2.25 | 2 |
sample/pizza.py | marianarmorgado/python-starter | 0 | 3715 | <filename>sample/pizza.py
# store information about a pizza being ordered
pizza = {
'crust': 'thick',
'toppings': ['mushrooms', 'extra vegan cheese']
}
# summarize the order
print("You ordered a " + pizza['crust'] + "-crust pizza" +
"with the following toppings:")
for topping in pizza['toppings']:
print("\t" + topping) | 4.03125 | 4 |
YouTube/CursoEmVideo/python/ex012.py | Fh-Shadow/Progamando | 0 | 3716 | <filename>YouTube/CursoEmVideo/python/ex012.py<gh_stars>0
a = float(input('Qual é o preço do produto? R$'))
d = a - (a * 23 / 100)
print('O produto que custava R${:.2f}, na promoção de 23% de desconto vai custar: R${:.2f}' .format(a, d))
| 3.515625 | 4 |
dnnlib/submission/submit.py | gperdrizet/gansformer | 1,172 | 3717 | <reponame>gperdrizet/gansformer
# Submit a function to be run either locally or in a computing cluster.
# Compared to original StyleGAN implementation, we extend the support for automatic training resumption,
# and network recompilation.
import copy
import inspect
import os
import pathlib
import pickle
import platform
import pprint
import re
import shutil
import sys
import time
import traceback
from enum import Enum
from .. import util
from ..util import EasyDict
from . import internal
class SubmitTarget(Enum):
# The target where the function should be run
# LOCAL: Run it locally
LOCAL = 1
class PathType(Enum):
# Determines in which format should a path be formatted
# WINDOWS: Format with Windows style
# LINUX: Format with Linux/Posix style
# AUTO: Use current OS type to select either WINDOWS or LINUX
WINDOWS = 1
LINUX = 2
AUTO = 3
class PlatformExtras:
# A mixed bag of values used by dnnlib heuristics
# Attributes:
# data_reader_buffer_size: Used by DataReader to size internal shared memory buffers
# data_reader_process_count: Number of worker processes to spawn (zero for single
# thread operation)
def __init__(self):
self.data_reader_buffer_size = 1<<30 # 1 GB
self.data_reader_process_count = 0 # single threaded default
_user_name_override = None
class SubmitConfig(util.EasyDict):
# Strongly typed config dict needed to submit runs
# Attributes:
# run_dir_root: Path to the run dir root. Can be optionally templated with tags
# Needs to always be run through get_path_from_template
# run_desc: Description of the run. Will be used in the run dir and task name
# run_dir_ignore: List of file patterns used to ignore files when copying files to the run dir
# run_dir_extra_files: List of (abs_path, rel_path) tuples of file paths. rel_path root will
# be the src directory inside the run dir
# submit_target: Submit target enum value. Used to select where the run is actually launched
# num_gpus: Number of GPUs used/requested for the run
# print_info: Whether to print debug information when submitting
# local.do_not_copy_source_files: Do not copy source files from the working directory to the
# run dir.
# run_id: Automatically populated value during submit
# run_name: Automatically populated value during submit
# run_dir: Automatically populated value during submit
# run_func_name: Automatically populated value during submit
# run_func_kwargs: Automatically populated value during submit
# user_name: Automatically populated value during submit. Can be set by the user which will then
# override the automatic value
# task_name: Automatically populated value during submit
# host_name: Automatically populated value during submit
# platform_extras: Automatically populated values during submit. Used by various dnnlib libraries
# such as the DataReader class
def __init__(self):
super().__init__()
# run (set these)
self.run_dir_root = "" # should always be passed through get_path_from_template
self.run_desc = ""
self.run_dir_ignore = ["__pycache__", "*.pyproj", "*.sln", "*.suo", ".cache", ".idea", ".vs",
".vscode", "_cudacache"]
self.run_dir_extra_files = []
# submit (set these)
self.submit_target = SubmitTarget.LOCAL
self.num_gpus = 1
self.print_info = False
self.nvprof = False
self.local = internal.local.TargetOptions()
self.datasets = []
# (automatically populated)
self.run_id = None
self.run_name = None
self.run_dir = None
self.run_func_name = None
self.run_func_kwargs = None
self.user_name = None
self.task_name = None
self.host_name = "localhost"
self.platform_extras = PlatformExtras()
def get_path_from_template(path_template: str, path_type: PathType = PathType.AUTO) -> str:
# Replace tags in the given path template and return either Windows or Linux formatted path
# automatically select path type depending on running OS
if path_type == PathType.AUTO:
if platform.system() == "Windows":
path_type = PathType.WINDOWS
elif platform.system() == "Linux":
path_type = PathType.LINUX
else:
raise RuntimeError("Unknown platform")
path_template = path_template.replace("<USERNAME>", get_user_name())
# return correctly formatted path
if path_type == PathType.WINDOWS:
return str(pathlib.PureWindowsPath(path_template))
elif path_type == PathType.LINUX:
return str(pathlib.PurePosixPath(path_template))
else:
raise RuntimeError("Unknown platform")
def get_template_from_path(path: str) -> str:
# Convert a normal path back to its template representation
path = path.replace("\\", "/")
return path
def convert_path(path: str, path_type: PathType = PathType.AUTO) -> str:
# Convert a normal path to template and the convert it back to a normal path with given path type
path_template = get_template_from_path(path)
path = get_path_from_template(path_template, path_type)
return path
def set_user_name_override(name: str) -> None:
# Set the global username override value
global _user_name_override
_user_name_override = name
def get_user_name():
# Get the current user name
if _user_name_override is not None:
return _user_name_override
elif platform.system() == "Windows":
return os.getlogin()
elif platform.system() == "Linux":
try:
import pwd
return pwd.getpwuid(os.geteuid()).pw_name
except:
return "unknown"
else:
raise RuntimeError("Unknown platform")
def make_run_dir_path(*paths):
# Make a path/filename that resides under the current submit run_dir
# Args:
# *paths: Path components to be passed to os.path.join
# Returns:
# A file/dirname rooted at submit_config.run_dir. If there's no
# submit_config or run_dir, the base directory is the current
# working directory.
# E.g., `os.path.join(dnnlib.submit_config.run_dir, "output.txt"))`
import dnnlib
if (dnnlib.submit_config is None) or (dnnlib.submit_config.run_dir is None):
return os.path.join(os.getcwd(), *paths)
return os.path.join(dnnlib.submit_config.run_dir, *paths)
def _create_run_dir_local(submit_config: SubmitConfig, resume: bool, create_new: str) -> str:
# Create a new run dir with increasing ID number at the start
run_dir_root = get_path_from_template(submit_config.run_dir_root, PathType.AUTO)
if not os.path.exists(run_dir_root):
os.makedirs(run_dir_root)
run_dir = os.path.join(run_dir_root, submit_config.run_name)
if not resume:
if os.path.exists(run_dir) and create_new:
raise RuntimeError("The run dir already exists! ({0})".format(run_dir))
if not os.path.exists(run_dir):
os.makedirs(run_dir)
return run_dir
def _get_next_run_id_local(run_dir_root: str) -> int:
# Reads all directory names in a given directory (non-recursive) and returns the next (increasing) run id
# Assumes IDs are numbers at the start of the directory names
dir_names = [d for d in os.listdir(run_dir_root) if os.path.isdir(os.path.join(run_dir_root, d))]
r = re.compile("^\\d+") # match one or more digits at the start of the string
run_id = 0
for dir_name in dir_names:
m = r.match(dir_name)
if m is not None:
i = int(m.group())
run_id = max(run_id, i + 1)
return run_id
def _populate_run_dir(submit_config: SubmitConfig, run_dir: str) -> None:
# Copy all necessary files into the run dir. Assumes that the dir exists, is local, and is writable
pickle.dump(submit_config, open(os.path.join(run_dir, "submit_config.pkl"), "wb"))
with open(os.path.join(run_dir, "submit_config.txt"), "w") as f:
pprint.pprint(submit_config, stream = f, indent = 4, width = 200, compact = False)
if (submit_config.submit_target == SubmitTarget.LOCAL) and submit_config.local.do_not_copy_source_files:
return
files = []
run_func_module_dir_path = util.get_module_dir_by_obj_name(submit_config.run_func_name)
assert "." in submit_config.run_func_name
for _idx in range(submit_config.run_func_name.count(".") - 1):
run_func_module_dir_path = os.path.dirname(run_func_module_dir_path)
files += util.list_dir_recursively_with_ignore(run_func_module_dir_path, ignores = submit_config.run_dir_ignore, add_base_to_relative = False)
dnnlib_module_dir_path = util.get_module_dir_by_obj_name("dnnlib")
files += util.list_dir_recursively_with_ignore(dnnlib_module_dir_path, ignores = submit_config.run_dir_ignore, add_base_to_relative = True)
files += submit_config.run_dir_extra_files
files = [(f[0], os.path.join(run_dir, "src", f[1])) for f in files]
files += [(os.path.join(dnnlib_module_dir_path, "submission", "internal", "run.py"), os.path.join(run_dir, "run.py"))]
util.copy_files_and_create_dirs(files)
def run_wrapper(submit_config: SubmitConfig) -> None:
# Wrap the actual run function call for handling logging, exceptions, typing, etc
is_local = submit_config.submit_target == SubmitTarget.LOCAL
# when running locally, redirect stderr to stdout, log stdout to a file, and force flushing
if is_local:
logger = util.Logger(file_name = os.path.join(submit_config.run_dir, "log.txt"), file_mode="a", should_flush = True)
else: # when running in a cluster, redirect stderr to stdout, and just force flushing (log writing is handled by run.sh)
logger = util.Logger(file_name = None, should_flush = True)
import dnnlib
dnnlib.submit_config = submit_config
exit_with_errcode = False
try:
print("dnnlib: Running {0}() on {1}...".format(submit_config.run_func_name, submit_config.host_name))
start_time = time.time()
run_func_obj = util.get_obj_by_name(submit_config.run_func_name)
assert callable(run_func_obj)
sig = inspect.signature(run_func_obj)
if "submit_config" in sig.parameters:
run_func_obj(submit_config = submit_config, **submit_config.run_func_kwargs)
else:
run_func_obj(**submit_config.run_func_kwargs)
print("dnnlib: Finished {0}() in {1}.".format(submit_config.run_func_name, util.format_time(time.time() - start_time)))
except:
if is_local:
raise
else:
traceback.print_exc()
log_src = os.path.join(submit_config.run_dir, "log.txt")
log_dst = os.path.join(get_path_from_template(submit_config.run_dir_root), "{0}-error.txt".format(submit_config.run_name))
shutil.copyfile(log_src, log_dst)
# Defer sys.exit(1) to happen after we close the logs and create a _finished.txt
exit_with_errcode = True
finally:
open(os.path.join(submit_config.run_dir, "_finished.txt"), "w").close()
dnnlib.RunContext.get().close()
dnnlib.submit_config = None
logger.close()
# If we hit an error, get out of the script now and signal the error
# to whatever process that started this script.
if exit_with_errcode:
sys.exit(1)
return submit_config
def open_file_or_url(file_or_url):
if util.is_url(file_or_url):
return util.open_url(file_or_url, cache_dir = ".stylegan2-cache")
return open(file_or_url, "rb")
def load_pkl(file_or_url):
with open_file_or_url(file_or_url) as file:
return pickle.load(file, encoding = "latin1")
def submit_run(submit_config: SubmitConfig, run_func_name: str, create_newdir: bool = False,
resume: bool = False, load_config: bool = False, **run_func_kwargs) -> None:
# Create a run dir, gather files related to the run, copy files to the run dir, and launch the run in appropriate place.
# create_newdir: enforces the creation of a new run directory
# resume: resumes a prior experiment using its existing run directory
# load_config: in case resume = True, load prior experiment config instead of using the current command-line parameters
submit_config = copy.deepcopy(submit_config)
submit_target = submit_config.submit_target
farm = None
if submit_target == SubmitTarget.LOCAL:
farm = internal.local.Target()
assert farm is not None # unknown target
# Disallow submitting jobs with zero num_gpus
if (submit_config.num_gpus is None) or (submit_config.num_gpus == 0):
raise RuntimeError("submit_config.num_gpus must be set to a non-zero value")
if submit_config.user_name is None:
submit_config.user_name = get_user_name()
submit_config.run_func_name = run_func_name
submit_config.run_func_kwargs = run_func_kwargs
#--------------------------------------------------------------------
# Prepare submission by populating the run dir
#--------------------------------------------------------------------
host_run_dir = _create_run_dir_local(submit_config, resume, create_new = create_newdir)
submit_config.task_name = "{}-{:05d}-{}".format(submit_config.user_name, submit_config.run_id, submit_config.run_desc)
docker_valid_name_regex = "^[a-zA-Z0-9][a-zA-Z0-9_.-]+$"
if not re.match(docker_valid_name_regex, submit_config.task_name):
raise RuntimeError("Invalid task name. Probable reason: unacceptable characters in your submit_config.run_desc. Task name must be accepted by the following regex: " + docker_valid_name_regex + ", got " + submit_config.task_name)
# Farm specific preparations for a submit
farm.finalize_submit_config(submit_config, host_run_dir)
# In case of resumption, load_config = True to load the prior submit_config file from the directory
# (so to maintain the original configuration of the experiment rather than the newly provided
# command-line arguments.
if load_config:
config_file = os.path.join(host_run_dir, "submit_config.pkl")
if os.path.exists(config_file):
old_submit_config = submit_config
submit_config = load_pkl(config_file)
submit_config["run_id"] = old_submit_config["run_id"]
submit_config["run_name"] = old_submit_config["run_name"]
if "resume_pkl" in old_submit_config["run_func_kwargs"]:
submit_config["run_func_kwargs"]["resume_pkl"] = old_submit_config["run_func_kwargs"]["resume_pkl"]
submit_config["run_func_kwargs"]["resume_kimg"] = old_submit_config["run_func_kwargs"]["resume_kimg"]
_populate_run_dir(submit_config, host_run_dir)
return farm.submit(submit_config, host_run_dir)
| 2.125 | 2 |
pyecharts/custom/grid.py | zilong305/pycharts | 0 | 3718 | <gh_stars>0
#!/usr/bin/env python
# coding=utf-8
from pyecharts.option import grid
class Grid(object):
def __init__(self):
self._chart = None
self._js_dependencies = set()
def add(self, chart,
grid_width=None,
grid_height=None,
grid_top=None,
grid_bottom=None,
grid_left=None,
grid_right=None):
"""
:param chart:
chart instance
:param grid_width:
Width of grid component. Adaptive by default.
:param grid_height:
Height of grid component. Adaptive by default.
:param grid_top:
Distance between grid component and the top side of the container.
:param grid_bottom:
Distance between grid component and the bottom side of the container.
:param grid_left:
Distance between grid component and the left side of the container.
:param grid_right:
Distance between grid component and the right side of the container.
:return:
"""
if self._chart is None:
self._chart = chart
self._chart._option.update(grid=[])
self._js_dependencies = chart._js_dependencies
_grid = grid(grid_width, grid_height, grid_top, grid_bottom, grid_left, grid_right)
if _grid:
for _ in range(len(self._chart._option.get('series'))):
self._chart._option.get('grid').append(_grid)
else:
_series = (
chart._option.get('series'),
chart._option.get('xAxis', None),
chart._option.get('yAxis', None),
chart._option.get('legend')[0],
chart._option.get('title')[0]
)
_index, _index_once, _xaxis, _yaxis, _legned, _title = self.__custom(_series)
self._chart._option.get('legend').append(_legned)
self._chart._option.get('title').append(_title)
if _xaxis and _yaxis is not None:
try:
_xaxis[0].update(gridIndex=_index-1)
_yaxis[0].update(gridIndex=_index-1)
self._chart._option.get('xAxis').append(_xaxis[0])
self._chart._option.get('yAxis').append(_yaxis[0])
except:
pass
# indexflag is only identify for every series
_flag = self._chart._option.get('series')[0].get('indexflag')
_series_index = 0
for s in self._chart._option.get('series'):
if _flag == s.get('indexflag'):
s.update(xAxisIndex=_series_index, yAxisIndex=_series_index)
else:
_series_index += 1
s.update(xAxisIndex=_series_index, yAxisIndex=_series_index)
_flag = s.get('indexflag')
_grid = grid(grid_width, grid_height, grid_top, grid_bottom, grid_left, grid_right)
for _ in range(_index_once):
self._chart._option.get('grid').append(_grid)
self._js_dependencies.union(chart._js_dependencies)
def __custom(self, series):
"""
:param series:
series data
:return:
"""
_series, _xaxis, _yaxis, _legend, _title = series
for s in _series:
self._chart._option.get('series').append(s)
return len(self._chart._option.get('series')), len(_series), _xaxis, _yaxis, _legend, _title
def render(self, path="render.html"):
"""
:param path:
:return:
"""
self._chart.render(path)
def render_embed(self):
"""
:return:
"""
return self._chart.render_embed()
def show_config(self):
"""
:return:
"""
import pprint
return pprint.pprint(self._chart._option)
@property
def chart(self):
"""
:return:
"""
return self._chart
def _repr_html_(self):
"""
:return:
"""
return self._chart._repr_html_()
| 2.375 | 2 |
smooch/conversations.py | devinmcgloin/smooch | 3 | 3719 | import logging
from .endpoint import ask
def send_message(user_id, message, sent_by_maker=True):
if not valid_args(user_id, message):
logging.warning("send message called with invalid args user_id={} message={}".format(user_id, message))
return
logging.debug("Sending message: user_id={0} message={1} sent_by_maker={2}".format(user_id, message, sent_by_maker))
role = "appMaker"
if not sent_by_maker:
role = "appUser"
data = {"text": message, "role": role}
return ask('appusers/{0}/conversation/messages'.format(user_id),
data,
'post')
def get_conversation(user_id):
if not user_id:
logging.warning("get conversation called with invalid arg user_id={}".format(user_id))
return
logging.debug("Get conversation: user_id={}".format(user_id))
return ask('appusers/{0}/conversation'.format(user_id), {}, 'get')
def request_payment(user_id, message, options):
"""Note that amount is a integer which specifies the amount of cents in the transaction
Smooch will default to the currency specified in your account settings."""
if not valid_args(user_id, message, options):
logging.warning("request payment called with invalid args user_id={} message={} options={}"
.format(user_id, message, options))
return
role = "appMaker"
buttons = []
for short_text, result in options:
buttons.append({
"type": "buy",
"text": short_text,
"amount": result})
data = {"text": message,
"role": role,
"actions": buttons}
return ask('appusers/{0}/conversation/messages'.format(user_id),
data,
'post')
def send_links(user_id, message, options):
"""Sends a series of links. The options field is a dictionary in which the keys are
descriptions and values uris"""
if not valid_args(user_id, message, options):
logging.warning("send links called with invalid args user_id={} message={} options={}"
.format(user_id, message, options))
return
role = "appMaker"
buttons = []
for short_text, result in options:
buttons.append({
"type": "link",
"text": short_text,
"uri": result})
data = {"text": message,
"role": role,
"actions": buttons}
return ask('appusers/{0}/conversation/messages'.format(user_id),
data,
'post')
def send_postbacks(user_id, message, options):
"""Sends a series of options that you can listen for on your webhook. The options field is a dictionary in which the keys are
descriptions and values the postback payload. You need to set up a webhook to listen for the postback."""
if not valid_args(user_id, message, options):
logging.warning("send postback called with invalid args user_id={} message={} options={}"
.format(user_id, message, options))
return
role = "appMaker"
buttons = []
for short_text, result in options:
buttons.append({
"type": "postback",
"text": short_text,
"payload": result
})
data = {"text": message,
"role": role,
"actions": buttons}
return ask('appusers/{0}/conversation/messages'.format(user_id),
data,
'post')
def send_buttons(user_id, message, options):
"""Options is a list of tuples in which the first element is the type of the button,
second the short text, and third the result for the specified type."""
if not valid_args(user_id, message, options):
logging.warning("send buttons called with invalid args user_id={} message={} options={}"
.format(user_id, message, options))
return
role = "appMaker"
buttons = []
for text, kind, result in options:
buttons.append({
"type": kind,
"text": text,
"payload": result
})
data = {"text": message,
"role": role,
"actions": buttons}
return ask('appusers/{0}/conversation/messages'.format(user_id),
data,
'post')
def valid_args(user_id, message, options=None):
if options is not None:
if user_id and message and options and type(options) is list:
return True
return False
else:
if user_id and message:
return True
return False
| 2.484375 | 2 |
cifar/evalit.py | Sharkbyteprojects/IRIS-ML_and_Deep-Learning | 0 | 3720 | <reponame>Sharkbyteprojects/IRIS-ML_and_Deep-Learning<gh_stars>0
import keras
from keras.models import load_model
from PIL import Image
import matplotlib.pylab as plt
import numpy as np
import zipfile
print("Extract")
zip_ref = zipfile.ZipFile("./asset.zip", 'r')
zip_ref.extractall(".")
zip_ref.close()
print("Load Model")
model=load_model("cifar-model.h5")
CIFAR_10_CLASSES=["Plane","Car","bird","cat","deer","dog","frog","horse","ship","truck"]
def calc(imname):
test_image =Image.open("asset/"+imname)
test_image=test_image.resize((32,32),Image.ANTIALIAS)
test_image=np.array(test_image,dtype="float32")
test_image/=255
test_image=test_image.reshape(-1,32,32,3)
predictions=model.predict(test_image)
index_max_pred=np.argmax(predictions)
plt.title("Complete: {}".format(CIFAR_10_CLASSES[index_max_pred]))
plt.imshow(test_image[0].reshape(32,32,3))
print(predictions)
plt.show()
print("START TEST")
calc("lkw-image.jpg")
calc("cat.jpg")
calc("frog.jpg")
calc("fog.jpg")
calc("lfog.jpg")
calc("d.jpg")
calc("b.jpg")
calc("bs.jpg")
calc("plapper.jpg")
calc("ds.jpg")
print("Complete")
print("End")
quit(0)
| 2.828125 | 3 |
tt/urls.py | samiksha-patil/Knowledge-Sharing-Platform | 1 | 3721 | """
tt URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
# Uncomment next two lines to enable admin:
from django.contrib import admin
from django.urls import path, include
from users import views as user_views
from django.contrib.auth import views as auth_views
from upload import views as upload_views
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
# Uncomment the next line to enable the admin:
path('admin/', admin.site.urls),
path('', include('blog.urls')),
path('register/', user_views.register, name='register'),
path('login/',auth_views.LoginView.as_view(template_name='users/login.html'),name='login'),
path('logout/',auth_views.LogoutView.as_view(template_name='users/logout.html') ,name='logout'),
path('profile/', user_views.profile, name='profile'),
path('book/',upload_views.book_list,name='book_list'),
path('book/upload',upload_views.upload_book,name='upload_book'),
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| 2.59375 | 3 |
src/git/cmd.py | danihodovic/dht | 2 | 3722 | import os
import click
os.environ["GIT_PYTHON_REFRESH"] = "quiet"
@click.group()
def git():
pass
| 1.453125 | 1 |
TwitterImage2JPG.py | Tymec/Playground | 0 | 3723 | import glob
import os
def main():
os.chdir("F:/Downloads")
extensions = ["*.jpg_large", "*.png_large", "*.jpg_orig"]
file_list = list()
for extension in extensions:
file_list = file_list + glob.glob(extension)
for file in file_list:
for extension in extensions:
new_extension = extension.replace('*', '')
if file.endswith(new_extension):
new_name = file.replace(new_extension, '') + ".jpg"
os.rename(file, new_name)
print("Done!")
if __name__ == __name__:
main()
| 3.5625 | 4 |
Data Analysis/classification.py | Riccardo95Facchini/DIL-2019 | 0 | 3724 | <filename>Data Analysis/classification.py
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.metrics import classification_report
#EVERY TIME THE DATASET IS RETRIEVED FROM GITHUB
input_file = 'https://raw.githubusercontent.com/lcphy/Digital-Innovation-Lab/master/bank-full.csv'
dataset = pd.read_csv(input_file, sep=';', header = 0)
dataset.head()
#DELETE NEXT CALLS DATA
dataset = dataset.drop("contact", axis=1)
dataset = dataset.drop("day", axis=1)
dataset = dataset.drop("month", axis=1)
dataset = dataset.drop("duration", axis=1)
dataset = dataset.drop("campaign", axis=1)
dataset = dataset.drop("pdays", axis=1)
dataset = dataset.drop("previous", axis=1)
dataset = dataset.drop("poutcome", axis=1)
dataset.head()
#FEATURE ENGINEERING
cleanup_nums = {"marital": {"married": 1, "single": 0, "divorced":-1},
"education": {"primary": 1, "secondary": 2, "tertiary": 3},
"default": {"yes": 1, "no": 0},
"housing": {"yes": 1, "no": 0},
"loan": {"yes": 1, "no": 0},
"y": {"yes": 1, "no": 0}}
dataset.replace(cleanup_nums, inplace=True)
dataset.head()
dataset.dtypes
dataset = dataset[dataset.job != 'unknown']
dataset = dataset[dataset.education != 'unknown']
dataset['education'] = dataset['education'].astype(int)
#COLLERATION MATRIX
plt.figure(figsize=(12,10))
cor = dataset.corr()
sns.heatmap(cor, annot=True, cmap=plt.cm.Reds)
plt.show()
#CLASSIFIFICATION
X = dataset.iloc[:, 0:7]
y = dataset.iloc[:, 7]
X = pd.get_dummies(X, columns=["job"], prefix=["job"])
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25)
#DECISION TREE
from sklearn import tree
from sklearn.tree import DecisionTreeClassifier
clf_dt = DecisionTreeClassifier()
clt_dt = clf_dt.fit(X_train,y_train)
esito = clf_dt.predict(X_test)
target_names = ['NOT-sub', 'Subscribed']
print(classification_report(y_test, esito,target_names=target_names))
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_test, esito)
print(cm)
plt.hist(esito)
#RANDOM FOREST
from sklearn.ensemble import RandomForestClassifier
clf_dt = RandomForestClassifier()
clt_dt = clf_dt.fit(X_train,y_train)
esito = clf_dt.predict(X_test)
target_names = ['NOT-sub', 'Subscribed']
print(classification_report(y_test, esito,target_names=target_names))
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_test, esito)
print(cm)
plt.hist(esito)
# K-NEAREST NEIGHBOURS
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# TRAINING - TEST
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.25, random_state = 0)
# SCALING
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
# FITTING
from sklearn.neighbors import KNeighborsClassifier
classifier = KNeighborsClassifier(n_neighbors = 5, metric = 'minkowski', p = 2)
classifier.fit(X_train, y_train)
# PREDICTION
y_pred = classifier.predict(X_test)
# CONFUSION MATRIX
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_test, y_pred)
target_names = ['NOT-sub', 'Subscribed']
print(classification_report(y_test, y_pred,target_names=target_names))
print(cm)
plt.hist(y_pred)
#UNDERSAMPLING
from sklearn.utils import resample
dataset_sample = pd.get_dummies(dataset, columns=["job"], prefix=["job"])
#SPLIT FEATURE AND TARGET
y = dataset_sample.y
X = dataset_sample.drop('y', axis=1)
#TRAIN TEST
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.25, random_state = 0)
X = pd.concat([X_train, y_train], axis=1)
#SELECTING TARGET CLASSES
not_sub = X[X.y==0]
sub = X[X.y==1]
not_sub_downsampled = resample(not_sub,
replace = False,
n_samples = len(sub),
random_state = 27)
# COMBINE MINORITY AND DOWNSAMPLED MAJORITY
downsampled = pd.concat([not_sub_downsampled, sub])
#DECISION TREE
y_train = downsampled.y
X_train = downsampled.drop('y', axis=1)
clf_dt = DecisionTreeClassifier()
clt_dt = clf_dt.fit(X_train,y_train)
esito = clf_dt.predict(X_test)
target_names = ['NOT-sub', 'Subscribed']
print(classification_report(y_test, esito,target_names=target_names))
#RANDOM FOREST
y_train = downsampled.y
X_train = downsampled.drop('y', axis=1)
clf_dt = RandomForestClassifier()
clt_dt = clf_dt.fit(X_train,y_train)
esito = clf_dt.predict(X_test)
target_names = ['NOT-sub', 'Subscribed']
print(classification_report(y_test, esito,target_names=target_names))
#SMOTE - DECISION TREE
from imblearn.over_sampling import SMOTE
#SPLIT FEATURE TARGET
y = dataset_sample.y
X = dataset_sample.drop('y', axis=1)
#TRAIN TEST
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.25, random_state = 0)
#SMOTE
sm = SMOTE(random_state=27, ratio=1.0)
X_train, y_train = sm.fit_sample(X_train, y_train)
clf_dt = DecisionTreeClassifier()
#FIT
smote = clf_dt.fit(X_train,y_train)
#PREDICITON
smote_pred = smote.predict(X_test)
target_names = ['NOT-sub', 'Subscribed']
print(classification_report(y_test, smote_pred,target_names=target_names))
#SMOTE - RANDOM FOREST
from imblearn.over_sampling import SMOTE
y = dataset_sample.y
X = dataset_sample.drop('y', axis=1)
# setting up testing and training sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.25, random_state = 0)
sm = SMOTE(random_state=27, ratio=1.0)
X_train, y_train = sm.fit_sample(X_train, y_train)
clf_dt = RandomForestClassifier()
smote = clf_dt.fit(X_train,y_train)
smote_pred = smote.predict(X_test)
target_names = ['NOT-sub', 'Subscribed']
print(classification_report(y_test, smote_pred,target_names=target_names))
#RECAP on RECALL
x = np.arange(3)
plt.bar(x-0.2, [31,65,37], width=0.2, color='b', align='center', label='DT')
plt.bar(x, [18,61,32], width=0.2, color='r', align='center', label='RF')
plt.xticks(x-0.1, ['Normal','Under','Smote'])
plt.legend(loc='upper right')
#RECAP on F1
x = np.arange(3)
plt.bar(x-0.2, [31,26,32], width=0.2, color='b', align='center', label='DT')
plt.bar(x, [24,28,31], width=0.2, color='r', align='center', label='RF')
plt.xticks(x-0.1, ['Normal','Under','Smote'])
plt.legend(loc='lower right') | 3.40625 | 3 |
tools/c7n_azure/tests/test_route_table.py | anastasiia-zolochevska/cloud-custodian | 2 | 3725 | # Copyright 2015-2018 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from azure_common import BaseTest, arm_template
class RouteTableTest(BaseTest):
route_table_name = 'cctestroutetable'
vnet_name = 'ccroutetablevnet'
allowed_subnet_name = 'cctestsubnet1'
disallowed_subnet_name = 'cctestsubnet2'
@staticmethod
def _subnet_id_suffix(subnet):
return '{}/subnets/{}'.format(RouteTableTest.vnet_name, subnet)
def test_route_table_schema_validate(self):
with self.sign_out_patch():
p = self.load_policy({
'name': 'test-azure-route-table',
'resource': 'azure.routetable'
}, validate=True)
self.assertTrue(p)
@arm_template('route-table-and-vnet.json')
def test_find_route_table_by_name(self):
p = self.load_policy({
'name': 'test-find-route-table-by-name',
'resource': 'azure.routetable',
'filters': [
{
'type': 'value',
'key': 'name',
'op': 'eq',
'value': RouteTableTest.route_table_name
}
]
})
resources = p.run()
self._assert_only_route_table_in_resources(resources)
@arm_template('route-table-and-vnet.json')
def test_detect_route_table_is_routing_to_correct_subnet(self):
p = self.load_policy({
'name': 'test-detect-route-table-is-routing-to-correct-subnet',
'resource': 'azure.routetable',
'filters': [
{
'type': 'value',
'key': 'name',
'op': 'eq',
'value': RouteTableTest.route_table_name
},
{
'type': 'value',
'key': 'properties.subnets[?ends_with(id, \'{}\')] | [0]'.format(
RouteTableTest._subnet_id_suffix(RouteTableTest.allowed_subnet_name)
),
'value': 'not-null'
}
]
})
resources = p.run()
self._assert_only_route_table_in_resources(resources)
@arm_template('route-table-and-vnet.json')
def test_detect_route_table_not_routing_to_incorrect_subnet(self):
p = self.load_policy({
'name': 'test-detect-route-table-not-routing-to-incorrect-subnet',
'resource': 'azure.routetable',
'filters': [
{
'type': 'value',
'key': 'name',
'op': 'eq',
'value': RouteTableTest.route_table_name
},
{
'type': 'value',
'key': 'properties.subnets[?ends_with(id, \'{}\')] | [0]'.format(
RouteTableTest._subnet_id_suffix(RouteTableTest.disallowed_subnet_name)
),
'value': 'not-null'
}
]
})
resources = p.run()
self.assertEqual(len(resources), 0, "A route table is routing to a disallowed subnet")
@arm_template('route-table-and-vnet.json')
def test_detect_route_only_routes_to_specific_subnets(self):
p = self.load_policy({
'name': 'test-detect-route-only-routes-to-specific-subnets',
'resource': 'azure.routetable',
'filters': [
{
'type': 'value',
'key': 'name',
'op': 'eq',
'value': RouteTableTest.route_table_name
},
{
'type': 'value',
'key': 'properties.subnets[?ends_with(id, \'{}\')] | [0]'.format(
RouteTableTest._subnet_id_suffix(RouteTableTest.allowed_subnet_name)
),
'value': 'not-null'
},
{
'type': 'value',
'key': 'length(properties.subnets)',
'op': 'eq',
'value': 1
}
]
})
resources = p.run()
self._assert_only_route_table_in_resources(resources)
def _assert_only_route_table_in_resources(self, resources):
self.assertEqual(len(resources), 1, "Only one route table should be found")
route_table = resources[0]
self.assertEqual(RouteTableTest.route_table_name, route_table.get('name'),
"The wrong route table was found")
properties = route_table.get('properties')
self.assertIsNotNone(properties, "Missing properties")
subnets = properties.get('subnets')
self.assertIsNotNone(subnets, "Missing subnets")
self.assertEqual(1, len(subnets), "There should only be one subnet")
subnet = subnets[0]
self.assertIn(RouteTableTest.allowed_subnet_name, subnet.get('id'), "Incorrect subnet")
| 1.78125 | 2 |
proto/tp_artifact_1.0/build/lib/sawtooth_artifact/processor/handler.py | pkthein/sparts_all_fam | 1 | 3726 | # Copyright 2016 Intel Corporation
# Copyright 2017 Wind River
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------------
################################################################################
# LIBRARIES & DEPENDENCIES #
################################################################################
import hashlib
import logging
import json
from collections import OrderedDict
from sawtooth_sdk.processor.exceptions import InvalidTransaction
from sawtooth_sdk.processor.exceptions import InternalError
from sawtooth_sdk.processor.handler import TransactionHandler
LOGGER = logging.getLogger(__name__)
################################################################################
# HANDLER OBJ #
################################################################################
class ArtifactTransactionHandler:
"""
Class for handling the Transaction Family : Artifact
Attributes:
namespace_prefix (str): The namespace prefix of the transaction family
"""
def __init__(self, namespace_prefix):
"""
Constructs the ArtifactTransactionHandler object.
Args:
namespace_prefix (str):
The namepsace prefix of the transaction family
"""
self._namespace_prefix = namespace_prefix
@property
def family_name(self):
"""
type: str
Returns the family name of the handler object.
"""
return "artifact"
@property
def family_versions(self):
"""
type: list of str
Returns the family version of the handler object.
"""
return ["1.0"]
@property
def encodings(self):
"""
type: list of str
Returns the encoding scheme used for the data for the handler object.
"""
return ["csv-utf8"]
@property
def namespaces(self):
"""
type: list of str
Returns the namespaces associating with the handler object.
"""
return [self._namespace_prefix]
################################################################################
# FUNCTIONS #
################################################################################
def apply(self, transaction, context):
"""
Applys the payload from transaction onto the state storage.
Args:
transaction (Transaction): The transaction pertaining the payload
context (State): The current state of the ledger
Returns:
type: State
The new state of the ledger, which includes the data from the
transaction, is returned to be stored on the state storage.
Raises:
InvalidTransaction:
* If deserialization for payload from transaction failed
* If "create" was called on non-unique uuid
* If "amend" was called on non-existing uuid
* If "Add..." were called on non-existing uuid
* If invalid operation was called
InternalError:
* If deserialization of State.data failed
"""
# Parsing required fields from transaction payload
try:
payload = json.loads(transaction.payload.decode())
artifact_id = payload["uuid"]
artifact_alias = payload["alias"]
artifact_name = payload["name"]
artifact_type = payload["content_type"]
artifact_checksum = payload["checksum"]
artifact_label = payload["label"]
artifact_openchain = payload["openchain"]
action = payload["action"]
prev = payload["prev_block"]
cur = payload["cur_block"]
timestamp = payload["timestamp"]
artifact_list = payload["artifact_list"]
uri_list = payload["uri_list"]
except ValueError:
raise InvalidTransaction("Invalid payload serialization")
# Soft sanity check and loading required data
validate_transaction(artifact_id, action)
data_address = make_artifact_address(self._namespace_prefix,
artifact_id)
state_entries = context.get_state([data_address])
# Hard sanity check before creating final payload for the state storage
if len(state_entries) != 0:
try:
stored_artifact = json.loads(state_entries[0].data.decode())
stored_artifact_id = stored_artifact["uuid"]
except ValueError:
raise InternalError("Failed to deserialize data.")
else:
stored_artifact_id = stored_artifact = None
if action == "create" and stored_artifact_id is not None:
raise InvalidTransaction("Invalid Action-artifact already exists.")
elif action == "create":
artifact = create_artifact(artifact_id, artifact_alias,
artifact_name, artifact_type, artifact_checksum,
artifact_label, artifact_openchain,
prev, cur, timestamp)
elif action == "amend" and stored_artifact_id is not None:
artifact = create_artifact(artifact_id, artifact_alias,
artifact_name, artifact_type, artifact_checksum,
artifact_label, artifact_openchain,
prev, cur, timestamp, artifact_list, uri_list)
elif action == "AddArtifact" or action == "AddURI":
if stored_artifact_id is None:
raise InvalidTransaction(
"Invalid Action-requires an existing artifact."
)
artifact = create_artifact(artifact_id, artifact_alias,
artifact_name, artifact_type, artifact_checksum,
artifact_label, artifact_openchain,
prev, cur, timestamp,
artifact_list, uri_list)
# Adding the final payload to the state storage
data = json.dumps(artifact).encode()
addresses = context.set_state({data_address:data})
return addresses
################################################################################
# HELPER FUNCTIONS #
################################################################################
def create_artifact(artifact_id, artifact_alias, artifact_name, artifact_type,
artifact_checksum, artifact_label, artifact_openchain,
prev, cur, timestamp, artifact_list=[], uri_list=[]):
"""
Constructs the payload to be stored in the state storage.
Args:
artifact_uuid (str): The uuid of the artifact
artifact_alias (str): The alias of the artifact
artifact_name (str): The name of the artifact
artifact_type (str): The type of the artifact
artifact_checksum (str): The checksum of the artifact
artifact_label (str): The label of the artifact
artifact_openchain (str): The openchain of the artifact
prev (str): The previous block id of the transaction (default "0")
cur (str): the current block id of the transaction
timestamp (str): The UTC time for when the transaction was submitted
artifact_list (list of dict):
The list of the artifact uuid associated with the artifact
(default [])
uri_list (list of dict):
The list of the uri associated with the artifact (default [])
Returns:
type: dict
The dictionary pertaining all the param is created and returned to
be stored on the state storage.
"""
return {
"uuid" : artifact_id,
"alias" : artifact_alias,
"name" : artifact_name,
"content_type" : artifact_type,
"checksum" : artifact_checksum,
"label" : artifact_label,
"openchain" : artifact_openchain,
"prev_block" : prev,
"cur_block" : cur,
"timestamp" : timestamp,
"artifact_list" : artifact_list,
"uri_list" : uri_list
}
def validate_transaction(artifact_id, action):
"""
Performs soft sanity check in order to improve runtime by eliminating the
obvious exception errors.
Args:
artifact_id (str): The uuid of the artifact
action (str): The command to be performed
Raises:
InvalidTransaction:
If the uuid or the action are not passed in or the
action is not a valid action.
"""
if not artifact_id:
raise InvalidTransaction("Artifact ID is required")
if not action:
raise InvalidTransaction("Action is required")
if action not in ("AddArtifact", "create", "AddURI", "amend"):
raise InvalidTransaction("Invalid action: {}".format(action))
def make_artifact_address(namespace_prefix, artifact_id):
"""
Creates an artifact address which will be used to recover the associated
UUID if the artifact already exists in the state storage; or, used as a key to
store the new data into the state storage.
Args:
namespace_prefix (str):
The prefix associating with the transaction family
artifact_id (str): The uuid of the artifact
Returns:
type: str
The address-to-be, which associates the uuid and the namespace prefix.
"""
return namespace_prefix + \
hashlib.sha512(artifact_id.encode("utf-8")).hexdigest()[:64]
def _display(msg):
"""
Logs the message to the debug logger.
Args:
msg (str): The message that is to be logged into the debug logger
"""
n = msg.count("\n")
if n > 0:
msg = msg.split("\n")
length = max(len(line) for line in msg)
else:
length = len(msg)
msg = [msg]
LOGGER.debug("+" + (length + 2) * "-" + "+")
for line in msg:
LOGGER.debug("+ " + line.center(length) + " +")
LOGGER.debug("+" + (length + 2) * "-" + "+")
################################################################################
# #
################################################################################
| 1.484375 | 1 |
ReviewsCollector.py | fsandx/moodybooks | 0 | 3727 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
STEP 2
Takes the list of urls in the json files and downloads the html files to local drive
Start with: scrapy runspider ReviewsCollector.py
"""
import scrapy
import json
class ReviewsCollector(scrapy.Spider):
def start_requests(self):
with open("data/books.json") as f:
self.data = json.load(f)
for item in self.data:
if (item['url'] is not None):
yield scrapy.Request(url=item['url'], headers={'Referer':'http://www.google.com/'}, callback=self.parse)
def parse(self, response):
filename = response.url.split("/")[-1] + '.html'
with open('data/reviews/' + filename, 'wb+') as f:
f.write(response.body) | 3.40625 | 3 |
firelight/interfaces/light.py | roshie548/firelight | 16 | 3728 | <gh_stars>10-100
from abc import ABC, abstractmethod
from .color import Color
class LightSystem(ABC):
@classmethod
def __subclasshook__(cls, subclass):
return (hasattr(subclass, 'set_transition_time')
and callable(subclass.set_transition_time)
and hasattr(subclass, 'discover_lights')
and callable(subclass.discover_lights)
and hasattr(subclass, 'set_color_all_lights')
and callable(subclass.set_color_all_lights))
@abstractmethod
def discover_lights(self):
"""Discover the lights and groups in this LightSystem."""
raise NotImplementedError
@abstractmethod
def set_transition_time(self, transition_time: int):
"""Set how long it takes in milliseconds for colors to transition."""
raise NotImplementedError
@abstractmethod
def set_color(self, color: Color):
"""Set the color of all the lights in the LightSystem."""
raise NotImplementedError
class LightGroup(ABC):
@classmethod
def __subclasshook__(cls, subclass):
return (hasattr(subclass, 'turn_on')
and callable(subclass.turn_on)
and hasattr(subclass, 'turn_off')
and callable(subclass.turn_off)
and hasattr(subclass, 'set_transition_time')
and callable(subclass.set_transition_time)
and hasattr(subclass, 'set_color')
and callable(subclass.set_color))
@abstractmethod
def turn_on(self):
"""Turn on the lights in this group."""
raise NotImplementedError
@abstractmethod
def turn_off(self):
"""Turn off the lights in this group."""
raise NotImplementedError
@abstractmethod
def set_transition_time(self, transition_time: int):
"""Set how long it takes in milliseconds for colors to transition."""
raise NotImplementedError
@abstractmethod
def set_color(self, color: Color):
"""Set the color of this light."""
raise NotImplementedError
class LightDevice(ABC):
@classmethod
def __subclasshook__(cls, subclass):
return (hasattr(subclass, 'turn_on')
and callable(subclass.turn_on)
and hasattr(subclass, 'turn_off')
and callable(subclass.turn_off)
and hasattr(subclass, 'set_transition_time')
and callable(subclass.set_transition_time)
and hasattr(subclass, 'set_color')
and callable(subclass.set_color))
@abstractmethod
def turn_on(self):
"""Turn on this light."""
raise NotImplementedError
@abstractmethod
def turn_off(self):
"""Turn off the light."""
raise NotImplementedError
@abstractmethod
def set_transition_time(self, transition_time: int):
"""Set how long it takes in milliseconds for colors to transition."""
raise NotImplementedError
@abstractmethod
def set_color(self, color: Color):
"""Set the color of this light."""
raise NotImplementedError
| 3 | 3 |
PolymorphismPYTHON/Polypy.py | cadeng23/oop-cjgustafson | 0 | 3729 | <reponame>cadeng23/oop-cjgustafson
import random
class Family:
def __init__(self,first, last, hair):
self.first = first
self.last = last
self.hair = hair
def fullname(self):
return '{} {}'.format(self.first,self.last)
def eyefind(self):
temp = random.choice([1,2])
#using the punnet square in genetics we know thatt a donor
#with blue eyes and one with brown makes it 50/50 odds
#that the childs eyes will be brown or blue
if (temp == 1):
self.EYES = ("Brown")
else:
self.EYES = ("Blue")
return self.EYES
def Apply_eyes(self):
self.eyes = self.EYES
Daughter = Family('Ashley', 'Smith', 'Brown')
Son = Family('Kevin', 'Smith', 'Brown')
print(Daughter.eyes)
print(Son.eyes)
#with the kids being born it will define what color hair and eyes
# they may randomly get through inheritance
class Kids(Family):
pass
#Eyes are marked as Grey because they are unknown for now
# hair colors are brown because brown is the dominant hair color
Daughter = Kids('Danielle', 'Smith', 'Brown' )
Son = Kids('Kevin','Smith','Brown')
print(Daughter.eyes)
print(Son.eyes)
Daughter.Apply_eyes()
Son.Apply_eyes()
print(Daughter.eyes)
print(Son.eyes)
| 3.9375 | 4 |
homeassistant/components/device_tracker/owntracks.py | evancohen/home-assistant | 14 | 3730 | """
homeassistant.components.device_tracker.owntracks
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
OwnTracks platform for the device tracker.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/device_tracker.owntracks/
"""
import json
import logging
import homeassistant.components.mqtt as mqtt
DEPENDENCIES = ['mqtt']
LOCATION_TOPIC = 'owntracks/+/+'
def setup_scanner(hass, config, see):
""" Set up a OwnTracksks tracker. """
def owntracks_location_update(topic, payload, qos):
""" MQTT message received. """
# Docs on available data:
# http://owntracks.org/booklet/tech/json/#_typelocation
try:
data = json.loads(payload)
except ValueError:
# If invalid JSON
logging.getLogger(__name__).error(
'Unable to parse payload as JSON: %s', payload)
return
if not isinstance(data, dict) or data.get('_type') != 'location':
return
parts = topic.split('/')
kwargs = {
'dev_id': '{}_{}'.format(parts[1], parts[2]),
'host_name': parts[1],
'gps': (data['lat'], data['lon']),
}
if 'acc' in data:
kwargs['gps_accuracy'] = data['acc']
if 'batt' in data:
kwargs['battery'] = data['batt']
see(**kwargs)
mqtt.subscribe(hass, LOCATION_TOPIC, owntracks_location_update, 1)
return True
| 2.28125 | 2 |
src/models/end_to_end_event_coreference.py | luyaojie/E3C | 2 | 3731 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
# Created by Roger on 2019-09-10
# Mostly by AllenNLP
import logging
import math
from typing import Any, Dict, List, Optional, Tuple
import torch
import torch.nn.functional as F
from allennlp.data import Vocabulary
from allennlp.models.model import Model
from allennlp.modules import FeedForward, Pruner
from allennlp.modules import Seq2SeqEncoder, TimeDistributed, TextFieldEmbedder
from allennlp.modules.seq2seq_encoders import IntraSentenceAttentionEncoder
from allennlp.modules.similarity_functions import DotProductSimilarity
from allennlp.modules.span_extractors import SelfAttentiveSpanExtractor, EndpointSpanExtractor
from allennlp.modules.token_embedders import Embedding
from allennlp.nn import util, InitializerApplicator, RegularizerApplicator
from allennlp.training.metrics import Average
from overrides import overrides
from torch.nn import BCEWithLogitsLoss
from src.metrics.event_coref_scores import EventCorefScores
from src.metrics.mention_f1 import TopSpanMentionTypeF1
from src.utils.cluster_decoding_utils import node_decode
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
@Model.register("end-to-end-event-coreference")
class End2EndEventCoreferenceResolver(Model):
"""
This ``Model`` implements the coreference resolution model described "End-to-end Neural
Coreference Resolution"
<https://www.semanticscholar.org/paper/End-to-end-Neural-Coreference-Resolution-Lee-He/3f2114893dc44eacac951f148fbff142ca200e83>
by Lee et al., 2017.
The basic outline of this model is to get an embedded representation of each span in the
document. These span representations are scored and used to prune away spans that are unlikely
to occur in a coreference cluster. For the remaining spans, the model decides which antecedent
span (if any) they are coreferent with. The resulting coreference links, after applying
transitivity, imply a clustering of the spans in the document.
Parameters
----------
vocab : ``Vocabulary``
text_field_embedder : ``TextFieldEmbedder``
Used to embed the ``text`` ``TextField`` we get as input to the model.
context_layer : ``Seq2SeqEncoder``
This layer incorporates contextual information for each word in the document.
mention_feedforward : ``FeedForward``
This feedforward network is applied to the span representations which is then scored
by a linear layer.
antecedent_feedforward: ``FeedForward``
This feedforward network is applied to pairs of span representation, along with any
pairwise features, which is then scored by a linear layer.
feature_size: ``int``
The embedding size for all the embedded features, such as distances or span widths.
max_span_width: ``int``
The maximum width of candidate spans.
spans_per_word: float, required.
A multiplier between zero and one which controls what percentage of candidate mention
spans we retain with respect to the number of words in the document.
max_antecedents: int, required.
For each mention which survives the pruning stage, we consider this many antecedents.
lexical_dropout: ``int``
The probability of dropping out dimensions of the embedded text.
initializer : ``InitializerApplicator``, optional (default=``InitializerApplicator()``)
Used to initialize the model parameters.
regularizer : ``RegularizerApplicator``, optional (default=``None``)
If provided, will be used to calculate the regularization penalty during training.
"""
def __init__(self,
vocab: Vocabulary,
text_field_embedder: TextFieldEmbedder,
mention_feedforward: FeedForward,
antecedent_feedforward: FeedForward,
feature_size: int,
context_layer: Seq2SeqEncoder = None,
max_span_width: int = 1,
spans_per_word: float = 0.1,
max_antecedents: int = 50,
lexical_dropout: float = 0.2,
pretrain_ed: bool = False,
pretrain_coref: bool = False,
coref_loss_weight: float = 1.0,
bce_loss_weight: float = 1.0,
bce_pos_weight: float = None,
local_window_size: int = 10,
attention_type: str = 'dot',
decoding: str = 'type-guided',
type_threshold: float = -1.,
type_refine: bool = True,
type_match_in_eval: bool = True,
initializer: InitializerApplicator = InitializerApplicator(),
regularizer: Optional[RegularizerApplicator] = None) -> None:
super(End2EndEventCoreferenceResolver, self).__init__(vocab, regularizer)
logger.info(vocab)
self._text_field_embedder = text_field_embedder
self._context_layer = context_layer
self._antecedent_feedforward = TimeDistributed(antecedent_feedforward)
self._event_scorer = torch.nn.Sequential(
TimeDistributed(mention_feedforward),
TimeDistributed(torch.nn.Linear(mention_feedforward.get_output_dim(), 1))
)
self._pretrain_ed = pretrain_ed
self._pretrain_coref = pretrain_coref
self._mention_pruner = Pruner(self._event_scorer)
self._antecedent_scorer = TimeDistributed(torch.nn.Linear(antecedent_feedforward.get_output_dim(), 1))
self._local_window_size = local_window_size
self._attention_type = attention_type
self._decoding = decoding
self._type_threshold = type_threshold
logger.info(vocab.get_token_from_index(0, "labels"))
if context_layer is not None:
endpoint_span_extractor_dim = context_layer.get_output_dim()
attentive_span_extractor_dim = text_field_embedder.get_output_dim()
self._endpoint_span_extractor = EndpointSpanExtractor(endpoint_span_extractor_dim,
combination="x,y",
num_width_embeddings=max_span_width,
span_width_embedding_dim=feature_size)
self._attentive_span_extractor = SelfAttentiveSpanExtractor(input_dim=attentive_span_extractor_dim)
span_embedding_size = self._endpoint_span_extractor.get_output_dim() + self._attentive_span_extractor.get_output_dim()
if self._local_window_size <= 0:
self._attention_layer = None
else:
if self._attention_type == 'dot':
similarity_function = DotProductSimilarity(scale_output=True)
num_head = 1
else:
raise NotImplementedError('Attention Type: %s' % self._attention_type)
self._attention_layer = IntraSentenceAttentionEncoder(input_dim=attentive_span_extractor_dim,
similarity_function=similarity_function,
combination='2',
num_attention_heads=num_head
)
else:
attentive_span_extractor_dim = text_field_embedder.get_output_dim()
if max_span_width > 1:
endpoint_span_extractor_dim = text_field_embedder.get_output_dim()
self._endpoint_span_extractor = EndpointSpanExtractor(endpoint_span_extractor_dim,
combination="x,y",
num_width_embeddings=max_span_width,
span_width_embedding_dim=feature_size)
else:
self._endpoint_span_extractor = None
self._attentive_span_extractor = SelfAttentiveSpanExtractor(input_dim=attentive_span_extractor_dim)
if self._local_window_size <= 0:
self._attention_layer = None
else:
if self._attention_type == 'dot':
similarity_function = DotProductSimilarity(scale_output=True)
num_head = 1
else:
raise NotImplementedError('Attention Type: %s' % self._attention_type)
self._attention_layer = IntraSentenceAttentionEncoder(input_dim=attentive_span_extractor_dim,
similarity_function=similarity_function,
combination='2',
num_attention_heads=num_head
)
if self._endpoint_span_extractor is not None:
span_embedding_size = self._attentive_span_extractor.get_output_dim() + self._endpoint_span_extractor.get_output_dim()
else:
span_embedding_size = self._attentive_span_extractor.get_output_dim()
if type_refine:
self._type_refine_gate = torch.nn.Sequential(
TimeDistributed(torch.nn.Linear(span_embedding_size * 2, span_embedding_size)),
torch.nn.Sigmoid()
)
else:
self._type_refine_gate = None
# NIL for Unified Event
self._event_embedding = Embedding(num_embeddings=vocab.get_vocab_size('labels'),
embedding_dim=span_embedding_size)
self._event_embedding_map = torch.nn.Linear(self._event_embedding.get_output_dim() * 2,
self._event_embedding.get_output_dim())
self._positive_label_size = vocab.get_vocab_size('labels') - 1
# 10 possible distance buckets.
self._num_distance_buckets = 10
self._distance_embedding = Embedding(self._num_distance_buckets, feature_size)
self._coref_loss_weight = coref_loss_weight
self._bce_loss_weight = bce_loss_weight
self._bce_pos_weight = bce_pos_weight
self._max_span_width = max_span_width
self._spans_per_word = spans_per_word
self._max_antecedents = max_antecedents
self._mention_f1_score = TopSpanMentionTypeF1()
self._conll_coref_scores = EventCorefScores(mapping_type=type_match_in_eval)
self._type_loss_metric = Average()
self._realis_loss_metric = Average()
self._coref_loss_metric = Average()
self._coref_label_metric = Average()
self._type_label_metric = Average()
self._nil_label_metric = Average()
if self._bce_pos_weight:
self._bce_loss = BCEWithLogitsLoss(reduction='none', pos_weight=torch.tensor(self._bce_pos_weight))
else:
self._bce_loss = BCEWithLogitsLoss(reduction='none')
if lexical_dropout > 0:
self._lexical_dropout = torch.nn.Dropout(p=lexical_dropout)
else:
self._lexical_dropout = lambda x: x
initializer(self)
def _get_event_embedding(self, span_mask):
"""
:param span_mask:
(batch, top_span_size, 1)
:return:
(batch, top_span_size, positive_label_size)
"""
event_indices = util.get_range_vector(self._positive_label_size, device=util.get_device_of(span_mask)) + 1
event_indices = torch.stack([torch.zeros_like(event_indices), event_indices]).transpose(0, 1)
event_indices = event_indices.expand([event_indices.size(0), event_indices.size(1)])
event_embeddings = self._event_embedding(event_indices)
event_embeddings = event_embeddings.reshape(event_embeddings.size(0),
event_embeddings.size(1) * event_embeddings.size(2))
event_embeddings = self._event_embedding_map.forward(event_embeddings)
event_embeddings = event_embeddings.unsqueeze(0).expand(span_mask.size(0),
event_embeddings.size(0),
event_embeddings.size(1),
)
return event_embeddings
def _get_type_antecedent_labels(self, top_event_type_labels):
"""
:param top_event_type_labels:
(batch, top_span_size, 1)
:return:
(batch, top_span_size, positive_label_size)
"""
event_indices = util.get_range_vector(self.vocab.get_vocab_size('labels'),
device=util.get_device_of(top_event_type_labels))
top_event_type_labels = top_event_type_labels.unsqueeze(-1).expand([top_event_type_labels.size(0),
top_event_type_labels.size(1),
event_indices.size(0)])
type_antecedent_labels = (top_event_type_labels == event_indices).float()
return type_antecedent_labels
def _type_refine_embedding(self, top_embeddings, event_embeddings):
# (batch, top_span_size, emb_size) bmm
event_prob = torch.bmm(top_embeddings, torch.transpose(event_embeddings, 1, 2))
shape = [event_prob.size(0), event_prob.size(1), 1]
dummy_scores = event_prob.new_zeros(*shape)
event_prob = torch.cat([dummy_scores, event_prob], -1)
event_prob = torch.softmax(event_prob, -1)
event_rep = torch.bmm(event_prob[:, :, 1:], event_embeddings) + event_prob[:, :, :1] * top_embeddings
refine_gate = self._type_refine_gate(torch.cat([event_rep, top_embeddings], -1))
top_embeddings = refine_gate * top_embeddings + (1 - refine_gate) * event_rep
return top_embeddings
def _local_attention(self, raw_contextualized_embeddings, text_mask):
device = util.get_device_of(raw_contextualized_embeddings)
if device < 0:
device = 'cpu'
attention_mask = torch.ones((text_mask.size(1), text_mask.size(1)), device=device)
# attention_mask = attention_mask - torch.eye(text_mask.size(1),
# device=util.get_device_of(contextualized_embeddings))
new_attention_mask = text_mask[:, :, None] * attention_mask
new_attention_mask = torch.triu(torch.tril(new_attention_mask, self._local_window_size),
-self._local_window_size)
new_contextualized_embeddings = self._attention_layer(raw_contextualized_embeddings,
new_attention_mask)
return new_contextualized_embeddings
@overrides
def forward(self, # type: ignore
text: Dict[str, torch.LongTensor],
spans: torch.IntTensor,
coref_labels: torch.IntTensor = None,
event_type_labels: torch.IntTensor = None,
realis_labels: torch.IntTensor = None,
metadata: List[Dict[str, Any]] = None) -> Dict[str, torch.Tensor]:
# pylint: disable=arguments-differ
"""
Parameters
----------
text : ``Dict[str, torch.LongTensor]``, required.
The output of a ``TextField`` representing the text of
the document.
spans : ``torch.IntTensor``, required.
A tensor of shape (batch_size, num_spans, 2), representing the inclusive start and end
indices of candidate spans for mentions. Comes from a ``ListField[SpanField]`` of
indices into the text of the document.
coref_labels : ``torch.IntTensor``, optional (default = None).
A tensor of shape (batch_size, num_spans), representing the cluster ids
of each span, or -1 for those which do not appear in any clusters.
event_type_labels : ``torch.IntTensor``, optional (default = None).
A tensor of shape (batch_size, num_spans), representing the event label of the specific span.
realis_labels : ``torch.IntTensor``, optional (default = None).
A tensor of shape (batch_size, num_spans), representing the realis label of the specific span.
metadata : ``List[Dict[str, Any]]``, optional (default = None).
A metadata dictionary for each instance in the batch. We use the "original_text" and "clusters" keys
from this dictionary, which respectively have the original text and the annotated gold coreference
clusters for that instance.
Returns
-------
An output dictionary consisting of:
top_spans : ``torch.IntTensor``
A tensor of shape ``(batch_size, num_spans_to_keep, 2)`` representing
the start and end word indices of the top spans that survived the pruning stage.
antecedent_indices : ``torch.IntTensor``
A tensor of shape ``(num_spans_to_keep, max_antecedents)`` representing for each top span
the index (with respect to top_spans) of the possible antecedents the model considered.
predicted_antecedents : ``torch.IntTensor``
A tensor of shape ``(batch_size, num_spans_to_keep)`` representing, for each top span, the
index (with respect to antecedent_indices) of the most likely antecedent. -1 means there
was no predicted link.
loss : ``torch.FloatTensor``, optional
A scalar loss to be optimised.
"""
# Shape: (batch_size, document_length, embedding_size)
text_embeddings = self._lexical_dropout(self._text_field_embedder(text))
document_length = text_embeddings.size(1)
num_spans = spans.size(1)
# Shape: (batch_size, document_length)
text_mask = util.get_text_field_mask(text).float()
# Shape: (batch_size, num_spans)
span_mask = (spans[:, :, 0] >= 0).squeeze(-1).float()
# SpanFields return -1 when they are used as padding. As we do
# some comparisons based on span widths when we attend over the
# span representations that we generate from these indices, we
# need them to be <= 0. This is only relevant in edge cases where
# the number of spans we consider after the pruning stage is >= the
# total number of spans, because in this case, it is possible we might
# consider a masked span.
# Shape: (batch_size, num_spans, 2)
spans = F.relu(spans.float()).long()
if self._context_layer:
# Shape: (batch_size, document_length, encoding_dim)
raw_contextualized_embeddings = self._context_layer(text_embeddings, text_mask)
if self._attention_layer is not None:
new_contextualized_embeddings = self._local_attention(
raw_contextualized_embeddings=raw_contextualized_embeddings,
text_mask=text_mask
)
else:
new_contextualized_embeddings = raw_contextualized_embeddings
# Shape: (batch_size, num_spans, 2 * encoding_dim + feature_size)
endpoint_span_embeddings = self._endpoint_span_extractor(new_contextualized_embeddings, spans)
# Shape: (batch_size, num_spans, embedding_size)
attended_span_embeddings = self._attentive_span_extractor(text_embeddings, spans)
# Shape: (batch_size, num_spans, embedding_size + 2 * encoding_dim + feature_size)
# span_embeddings = torch.cat([endpoint_span_embeddings, attended_span_embeddings], -1)
span_embeddings = torch.cat([endpoint_span_embeddings, attended_span_embeddings], -1)
else:
raw_contextualized_embeddings = text_embeddings
if self._attention_layer is not None:
new_contextualized_embeddings = self._local_attention(
raw_contextualized_embeddings=raw_contextualized_embeddings,
text_mask=text_mask
)
else:
new_contextualized_embeddings = raw_contextualized_embeddings
span_embeddings_list = list()
attended_span_embeddings = self._attentive_span_extractor(new_contextualized_embeddings, spans)
span_embeddings_list += [attended_span_embeddings]
if self._endpoint_span_extractor is not None:
# Shape: (batch_size, num_spans, embedding_size)
endpoint_span_embeddings = self._endpoint_span_extractor(text_embeddings, spans)
span_embeddings_list += [endpoint_span_embeddings]
span_embeddings = torch.cat(span_embeddings_list, -1)
# event_scores = self._event_classifier.forward(span_embeddings)
# Shape: (batch_size, num_spans, num_event_realis_label)
# Shape: (batch_size, num_spans, num_event_realis_label)
# event_realis_scores = self._event_realis_classifier.forward(span_embeddings)
# Prune based on mention scores.
num_spans_to_keep_according_doc_len = int(math.floor(self._spans_per_word * document_length))
(top_embeddings, top_mask, top_indices, top_scores) = self._mention_pruner(span_embeddings,
span_mask,
num_spans_to_keep_according_doc_len,
)
event_embeddings = self._get_event_embedding(span_mask)
top_mask = top_mask.unsqueeze(-1)
# Shape: (batch_size * num_spans_to_keep)
# torch.index_select only accepts 1D indices, but here
# we need to select spans for each element in the batch.
# This reformats the indices to take into account their
# index into the batch. We precompute this here to make
# the multiple calls to util.batched_index_select below more efficient.
flat_top_span_indices = util.flatten_and_batch_shift_indices(top_indices, num_spans)
# Compute final predictions for which spans to consider as mentions.
# Shape: (batch_size, num_spans_to_keep, 2)
top_spans = util.batched_index_select(spans,
top_indices,
flat_top_span_indices)
# Compute indices for antecedent spans to consider.
max_antecedents = min(self._max_antecedents, num_spans_to_keep_according_doc_len)
# top_span_embeddings = top_span_embeddings.detach()
# top_span_mention_scores = top_span_mention_scores.detach()
# Now that we have our variables in terms of num_spans_to_keep, we need to
# compare span pairs to decide each span's antecedent. Each span can only
# have prior spans as antecedents, and we only consider up to max_antecedents
# prior spans. So the first thing we do is construct a matrix mapping a span's
# index to the indices of its allowed antecedents. Note that this is independent
# of the batch dimension - it's just a function of the span's position in
# top_spans. The spans are in document order, so we can just use the relative
# index of the spans to know which other spans are allowed antecedents.
# Once we have this matrix, we reformat our variables again to get embeddings
# for all valid antecedents for each span. This gives us variables with shapes
# like (batch_size, num_spans_to_keep, max_antecedents, embedding_size), which
# we can use to make coreference decisions between valid span pairs.
# Shapes:
# (num_spans_to_keep, max_antecedents),
# (1, max_antecedents),
# (1, num_spans_to_keep, max_antecedents)
valid_antecedent_indices, valid_antecedent_offsets, valid_antecedent_log_mask = \
_generate_valid_antecedents(num_spans_to_keep_according_doc_len,
max_antecedents,
util.get_device_of(text_mask))
if self._type_refine_gate is not None:
top_embeddings = self._type_refine_embedding(top_embeddings, event_embeddings)
# Select tensors relating to the antecedent spans.
# Shape: (batch_size, num_spans_to_keep, max_antecedents, embedding_size)
candidate_antecedent_embeddings = util.flattened_index_select(top_embeddings,
valid_antecedent_indices)
# Shape: (batch_size, num_spans_to_keep, max_antecedents)
candidate_antecedent_mention_scores = util.flattened_index_select(top_scores,
valid_antecedent_indices).squeeze(-1)
# Shape: (batch_size, num_spans_to_keep, event_type_size + max_antecedents, embedding_size)
candidate_antecedent_embeddings = self._combine_event_embeddings_and_cluster_antecedent_embeddings(
event_embeddings,
candidate_antecedent_embeddings)
# Compute antecedent scores.
# Shape: (batch_size, num_spans_to_keep, event_type_size + max_antecedents, embedding_size)
span_pair_embeddings = self._compute_span_pair_embeddings(top_embeddings,
candidate_antecedent_embeddings,
valid_antecedent_offsets)
# (batch_size, event_type_size, 1)
event_type_prior_scores = self._event_scorer(event_embeddings)
# (batch_size, num_spans_to_keep, event_type_size)
event_type_prior_scores = event_type_prior_scores.transpose(1, 2).expand(
candidate_antecedent_mention_scores.size(0),
candidate_antecedent_mention_scores.size(1),
-1)
# (batch_size, num_spans_to_keep, event_type_size + max_antecedents)
candidate_antecedent_mention_scores = torch.cat([event_type_prior_scores,
candidate_antecedent_mention_scores],
-1)
# Shape: (batch_size, num_spans_to_keep, 1 + event_type_size + max_antecedents)
coreference_scores = self._compute_coreference_scores(span_pair_embeddings,
top_scores,
candidate_antecedent_mention_scores,
valid_antecedent_log_mask)
# We now have, for each span which survived the pruning stage,
# a predicted antecedent. This implies a clustering if we group
# mentions which refer to each other in a chain.
# Shape: (batch_size, num_spans_to_keep)
_, predicted_antecedents = coreference_scores.max(2)
# Subtract one here because index 0 is the "no antecedent" class,
# so this makes the indices line up with actual spans if the prediction
# is greater than -1.
predicted_antecedents -= 1
output_dict = {"top_spans": top_spans,
"antecedent_indices": valid_antecedent_indices,
"predicted_antecedents": predicted_antecedents,
"coreference_scores": coreference_scores,
}
if coref_labels is not None and event_type_labels is not None:
pruned_event_type_labels = torch.gather(event_type_labels, 1, top_indices)
type_antecedent_labels = self._get_type_antecedent_labels(pruned_event_type_labels)
# Find the gold labels for the spans which we kept.
pruned_gold_labels = util.batched_index_select(coref_labels.unsqueeze(-1),
top_indices,
flat_top_span_indices)
antecedent_labels = util.flattened_index_select(pruned_gold_labels,
valid_antecedent_indices).squeeze(-1)
antecedent_labels += valid_antecedent_log_mask.long()
# Compute labels.
# Shape: (batch_size, num_spans_to_keep, max_antecedents + 1)
gold_antecedent_labels = self._compute_antecedent_gold_labels(pruned_gold_labels,
type_antecedent_labels,
antecedent_labels)
bce_loss = self._bce_loss.forward(self._event_scorer.forward(span_embeddings).squeeze(-1),
(event_type_labels > 0).float()) * span_mask
bce_loss = bce_loss.sum() * self._bce_loss_weight
# Now, compute the loss using the negative marginal log-likelihood.
# This is equal to the log of the sum of the probabilities of all antecedent predictions
# that would be consistent with the data, in the sense that we are minimising, for a
# given span, the negative marginal log likelihood of all antecedents which are in the
# same gold cluster as the span we are currently considering. Each span i predicts a
# single antecedent j, but there might be several prior mentions k in the same
# coreference cluster that would be valid antecedents. Our loss is the sum of the
# probability assigned to all valid antecedents. This is a valid objective for
# clustering as we don't mind which antecedent is predicted, so long as they are in
# the same coreference cluster.
if self._pretrain_ed:
# All antecedent mask is 0
top_mask = top_mask.expand_as(coreference_scores).clone()
top_mask[:, :, self._positive_label_size + 2:] = 0
coreference_log_probs = util.masked_log_softmax(coreference_scores, top_mask)
correct_antecedent_log_probs = coreference_log_probs + gold_antecedent_labels.log()
negative_marginal_log_likelihood = -util.logsumexp(correct_antecedent_log_probs).sum()
coref_loss = negative_marginal_log_likelihood * self._coref_loss_weight
output_dict["loss"] = coref_loss + bce_loss
decoded_result = self.decode(output_dict)
pred_label_spans_list = decoded_result['pred_label_spans']
gold_label_spans_list = [m['gold_label_spans'] for m in metadata]
self._mention_f1_score(pred_label_spans_list,
gold_label_spans_list,
)
self._conll_coref_scores(decoded_result['clusters'],
metadata,
pred_label_spans_list,
gold_label_spans_list)
self._type_loss_metric(bce_loss.item())
self._coref_loss_metric(negative_marginal_log_likelihood.item())
else:
self._coref_loss_metric(0.)
if metadata is not None:
output_dict["document"] = [x["original_text"] for x in metadata]
output_dict["offset"] = [x["token_offset"] for x in metadata]
output_dict['doc_id'] = [x.get("doc_id", None) for x in metadata]
return output_dict
@overrides
def decode(self, output_dict: Dict[str, torch.Tensor]):
"""
Converts the list of spans and predicted antecedent indices into clusters
of spans for each element in the batch.
Parameters
----------
output_dict : ``Dict[str, torch.Tensor]``, required.
The result of calling :func:`forward` on an instance or batch of instances.
Returns
-------
The same output dictionary, but with an additional ``clusters`` key:
clusters : ``List[List[List[Tuple[int, int]]]]``
A nested list, representing, for each instance in the batch, the list of clusters,
which are in turn comprised of a list of (start, end) inclusive spans into the
original document.
"""
return node_decode(output_dict,
self.vocab, decoding_algorithm=self._decoding,
positive_label_size=self._positive_label_size,
type_threshold=self._type_threshold)
@overrides
def get_metrics(self, reset: bool = False) -> Dict[str, float]:
mention_result = self._mention_f1_score.get_metric(reset)
coref_precision, coref_recall, coref_f1 = self._conll_coref_scores.get_metric(reset)
return {"c_p": coref_precision,
"c_r": coref_recall,
"c_f1": coref_f1,
"m_p": mention_result['precision'],
"m_r": mention_result['recall'],
"m_f1": mention_result['f1-score'],
"nil": self._nil_label_metric.get_metric(reset),
"type": self._type_label_metric.get_metric(reset),
"coref": self._coref_label_metric.get_metric(reset),
"t_l": self._type_loss_metric.get_metric(reset),
"c_l": self._coref_loss_metric.get_metric(reset),
"a_f1": (mention_result['f1-score'] + coref_f1) / 2.}
@staticmethod
def _combine_event_embeddings_and_cluster_antecedent_embeddings(event_embeddings: torch.FloatTensor,
antecedent_embeddings: torch.FloatTensor):
"""
event_embeddings: ``torch.FloatTensor``, required.
Embedding representations of the event types. Has shape
(batch_size, event_type_size, embedding_size).
antecedent_embeddings : ``torch.FloatTensor``, required.
Embedding representations of the antecedent spans we are considering
for each top span. Has shape
(batch_size, num_spans_to_keep, max_antecedents, embedding_size).
return:
(batch_size, num_spans_to_keep, max_antecedents + event_type_size, embedding_size)
"""
event_embeddings = event_embeddings.unsqueeze(1).expand((antecedent_embeddings.size(0),
antecedent_embeddings.size(1),
event_embeddings.size(1),
antecedent_embeddings.size(3),))
return torch.cat([event_embeddings, antecedent_embeddings], 2)
def _compute_span_pair_embeddings(self,
top_span_embeddings: torch.FloatTensor,
antecedent_embeddings: torch.FloatTensor,
antecedent_offsets: torch.FloatTensor):
"""
Computes an embedding representation of pairs of spans for the pairwise scoring function
to consider. This includes both the original span representations, the element-wise
similarity of the span representations, and an embedding representation of the distance
between the two spans.
Parameters
---------- shape
(batch_size, event_type_size, embedding_size).
top_span_embeddings : ``torch.FloatTensor``, required.
Embedding representations of the top spans. Has shape
(batch_size, num_spans_to_keep, embedding_size).
antecedent_embeddings : ``torch.FloatTensor``, required.
Embedding representations of the antecedent spans we are considering
for each top span. Has shape
(batch_size, num_spans_to_keep, event_type_size + max_antecedents, embedding_size).
antecedent_offsets : ``torch.IntTensor``, required.
The offsets between each top span and its antecedent spans in terms
of spans we are considering. Has shape (1, max_antecedents).
Returns
-------
span_pair_embeddings : ``torch.FloatTensor``
Embedding representation of the pair of spans to consider. Has shape
(batch_size, num_spans_to_keep, max_antecedents, embedding_size)
"""
# Shape: (batch_size, num_spans_to_keep, max_antecedents, embedding_size)
target_embeddings = top_span_embeddings.unsqueeze(2).expand_as(antecedent_embeddings)
# Shape: (1, max_antecedents)
bucket_values = util.bucket_values(antecedent_offsets, num_total_buckets=self._num_distance_buckets)
# (1, event_type)
label_bucket_values = bucket_values.new_zeros((1, self._positive_label_size))
# Shape: (1, max_antecedents + event_type_size, embedding_size)
antecedent_distance_embeddings = self._distance_embedding(
torch.cat([bucket_values, label_bucket_values], 1)
)
# Shape: (1, 1, max_antecedents + event_type_size, embedding_size)
antecedent_distance_embeddings = antecedent_distance_embeddings.unsqueeze(0)
expanded_distance_embeddings_shape = (antecedent_embeddings.size(0),
antecedent_embeddings.size(1),
antecedent_embeddings.size(2),
antecedent_distance_embeddings.size(-1))
# Shape: (batch_size, num_spans_to_keep, max_antecedents + event_type_size, embedding_size)
antecedent_distance_embeddings = antecedent_distance_embeddings.expand(*expanded_distance_embeddings_shape)
# Shape: (batch_size, num_spans_to_keep, max_antecedents + event_type_size, embedding_size)
span_pair_embeddings = torch.cat([target_embeddings,
antecedent_embeddings,
antecedent_embeddings * target_embeddings,
antecedent_distance_embeddings], -1)
return span_pair_embeddings
def _compute_antecedent_gold_labels(self,
top_span_labels: torch.IntTensor,
type_antecedent_labels: torch.IntTensor,
antecedent_labels: torch.IntTensor):
"""
Generates a binary indicator for every pair of spans. This label is one if and
only if the pair of spans belong to the same cluster. The labels are augmented
with a dummy antecedent at the zeroth position, which represents the prediction
that a span does not have any antecedent.
Parameters
----------
top_span_labels : ``torch.IntTensor``, required.
The cluster id label for every span. The id is arbitrary,
as we just care about the clustering. Has shape (batch_size, num_spans_to_keep).
antecedent_labels : ``torch.IntTensor``, required.
The cluster id label for every antecedent span. The id is arbitrary,
as we just care about the clustering. Has shape
(batch_size, num_spans_to_keep, max_antecedents).
Returns
-------
pairwise_labels_with_dummy_label : ``torch.FloatTensor``
A binary tensor representing whether a given pair of spans belong to
the same cluster in the gold clustering.
Has shape (batch_size, num_spans_to_keep, max_antecedents + 1).
"""
# Shape: (batch_size, num_spans_to_keep, max_antecedents)
# print(top_span_labels)
# print(antecedent_labels)
target_labels = top_span_labels.expand_as(antecedent_labels)
same_cluster_indicator = (target_labels == antecedent_labels).float()
non_dummy_indicator = (target_labels >= 0).float()
pairwise_labels = same_cluster_indicator * non_dummy_indicator
if self._pretrain_ed:
pairwise_labels = pairwise_labels * 0
else:
# for pairwise_labels without type_antecedent_labels
pairwise_labels_indicator = (pairwise_labels.sum(-1, keepdim=True) > 0).float()
type_antecedent_labels = type_antecedent_labels * (1 - pairwise_labels_indicator)
self._coref_label_metric(torch.sum(pairwise_labels).item())
self._nil_label_metric(torch.sum(type_antecedent_labels[:, :, 0]).item())
self._type_label_metric(torch.sum(type_antecedent_labels[:, :, 1: self._positive_label_size + 1]).item())
# print(pairwise_labels)
#
# # Shape: (batch_size, num_spans_to_keep, 1)
# dummy_labels = (1 - pairwise_labels).prod(-1, keepdim=True)
# Shape: (batch_size, num_spans_to_keep, event_type_size + max_antecedents + 1)
pairwise_labels_with_dummy_label = torch.cat([type_antecedent_labels, pairwise_labels], -1)
return pairwise_labels_with_dummy_label
def _compute_coreference_scores(self,
pairwise_embeddings: torch.FloatTensor,
top_span_mention_scores: torch.FloatTensor,
antecedent_mention_scores: torch.FloatTensor,
antecedent_log_mask: torch.FloatTensor) -> torch.FloatTensor:
"""
Computes scores for every pair of spans. Additionally, a dummy label is included,
representing the decision that the span is not coreferent with anything. For the dummy
label, the score is always zero. For the true antecedent spans, the score consists of
the pairwise antecedent score and the unary mention scores for the span and its
antecedent. The factoring allows the model to blame many of the absent links on bad
spans, enabling the pruning strategy used in the forward pass.
Parameters
----------
pairwise_embeddings: ``torch.FloatTensor``, required.
Embedding representations of pairs of spans. Has shape
(batch_size, num_spans_to_keep, max_antecedents, encoding_dim)
top_span_mention_scores: ``torch.FloatTensor``, required.
Mention scores for every span. Has shape
(batch_size, num_spans_to_keep, max_antecedents).
antecedent_mention_scores: ``torch.FloatTensor``, required.
Mention scores for every antecedent. Has shape
(batch_size, num_spans_to_keep, max_antecedents).
antecedent_log_mask: ``torch.FloatTensor``, required.
The log of the mask for valid antecedents.
Returns
-------
coreference_scores: ``torch.FloatTensor``
A tensor of shape (batch_size, num_spans_to_keep, max_antecedents + 1),
representing the unormalised score for each (span, antecedent) pair
we considered.
"""
antecedent_log_mask = torch.cat([antecedent_log_mask.new_zeros((antecedent_log_mask.size(0),
antecedent_log_mask.size(1),
self._positive_label_size)),
antecedent_log_mask],
-1)
# Shape: (batch_size, num_spans_to_keep, max_antecedents)
antecedent_scores = self._antecedent_scorer(
self._antecedent_feedforward(pairwise_embeddings)).squeeze(-1)
antecedent_scores += top_span_mention_scores + antecedent_mention_scores
antecedent_scores += antecedent_log_mask
# Shape: (batch_size, num_spans_to_keep, 1)
shape = [antecedent_scores.size(0), antecedent_scores.size(1), 1]
dummy_scores = antecedent_scores.new_zeros(*shape)
# Shape: (batch_size, num_spans_to_keep, max_antecedents + 1)
coreference_scores = torch.cat([dummy_scores, antecedent_scores], -1)
return coreference_scores
def _generate_valid_antecedents(num_spans_to_keep: int,
max_antecedents: int,
device: int) -> Tuple[torch.IntTensor,
torch.IntTensor,
torch.FloatTensor]:
"""
This method generates possible antecedents per span which survived the pruning
stage. This procedure is `generic across the batch`. The reason this is the case is
that each span in a batch can be coreferent with any previous span, but here we
are computing the possible `indices` of these spans. So, regardless of the batch,
the 1st span _cannot_ have any antecedents, because there are none to select from.
Similarly, each element can only predict previous spans, so this returns a matrix
of shape (num_spans_to_keep, max_antecedents), where the (i,j)-th index is equal to
(i - 1) - j if j <= i, or zero otherwise.
Parameters
----------
num_spans_to_keep : ``int``, required.
The number of spans that were kept while pruning.
max_antecedents : ``int``, required.
The maximum number of antecedent spans to consider for every span.
device: ``int``, required.
The CUDA device to use.
Returns
-------
valid_antecedent_indices : ``torch.IntTensor``
The indices of every antecedent to consider with respect to the top k spans.
Has shape ``(num_spans_to_keep, max_antecedents)``.
valid_antecedent_offsets : ``torch.IntTensor``
The distance between the span and each of its antecedents in terms of the number
of considered spans (i.e not the word distance between the spans).
Has shape ``(1, max_antecedents)``.
valid_antecedent_log_mask : ``torch.FloatTensor``
The logged mask representing whether each antecedent span is valid. Required since
different spans have different numbers of valid antecedents. For example, the first
span in the document should have no valid antecedents.
Has shape ``(1, num_spans_to_keep, max_antecedents)``.
"""
# Shape: (num_spans_to_keep, 1)
target_indices = util.get_range_vector(num_spans_to_keep, device).unsqueeze(1)
# Shape: (1, max_antecedents)
valid_antecedent_offsets = (util.get_range_vector(max_antecedents, device) + 1).unsqueeze(0)
# This is a broadcasted subtraction.
# Shape: (num_spans_to_keep, max_antecedents)
raw_antecedent_indices = target_indices - valid_antecedent_offsets
# In our matrix of indices, the upper triangular part will be negative
# because the offsets will be > the target indices. We want to mask these,
# because these are exactly the indices which we don't want to predict, per span.
# We're generating a logspace mask here because we will eventually create a
# distribution over these indices, so we need the 0 elements of the mask to be -inf
# in order to not mess up the normalisation of the distribution.
# Shape: (1, num_spans_to_keep, max_antecedents)
valid_antecedent_log_mask = (raw_antecedent_indices >= 0).float().unsqueeze(0).log()
# Shape: (num_spans_to_keep, max_antecedents)
valid_antecedent_indices = F.relu(raw_antecedent_indices.float()).long()
return valid_antecedent_indices, valid_antecedent_offsets, valid_antecedent_log_mask
| 2 | 2 |
week2/7litersProblem.py | vietanhtran2710/ArtificialIntelligenceHomework | 3 | 3732 | <filename>week2/7litersProblem.py
"""
Given 3 bottles of capacities 3, 5, and 9 liters,
count number of all possible solutions to get 7 liters
"""
current_path = [[0, 0, 0]]
CAPACITIES = (3, 5, 9)
solutions_count = 0
def move_to_new_state(current_state):
global solutions_count, current_path
if 7 in current_state:
solutions_count += 1
else:
# Empty bottle
for i in range(3):
if current_state[i] != 0:
new_state = list(current_state)
new_state[i] = 0
if new_state not in current_path:
current_path.append(new_state)
move_to_new_state(new_state)
current_path.pop()
# Fill bottle
for i in range(3):
if current_state[i] != CAPACITIES[i]:
new_state = list(current_state)
new_state[i] = CAPACITIES[i]
if new_state not in current_path:
current_path.append(new_state)
move_to_new_state(new_state)
current_path.pop()
# Pour from one bottle to another
for i in range(3):
for j in range(3):
if i != j and current_state[i] != 0 and current_state[j] != CAPACITIES[j]:
new_state = list(current_state)
liters_change = min(CAPACITIES[j] - current_state[j], current_state[i])
new_state[j] += liters_change
new_state[i] -= liters_change
if new_state not in current_path:
current_path.append(new_state)
move_to_new_state(new_state)
current_path.pop()
if __name__ == "__main__":
try:
current_state = [0, 0, 0]
move_to_new_state(current_state)
print(solutions_count)
except KeyboardInterrupt:
print(solutions_count)
# Result: at least 44900799 solution
| 3.546875 | 4 |
st2common/st2common/bootstrap/rulesregistrar.py | avezraj/st2 | 0 | 3733 | <gh_stars>0
# Copyright 2019 Extreme Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import os
import six
from st2common import log as logging
from st2common.constants.meta import ALLOWED_EXTS
from st2common.constants.pack import DEFAULT_PACK_NAME
from st2common.bootstrap.base import ResourceRegistrar
from st2common.models.api.rule import RuleAPI
from st2common.models.system.common import ResourceReference
from st2common.persistence.rule import Rule
from st2common.services.triggers import cleanup_trigger_db_for_rule, increment_trigger_ref_count
from st2common.exceptions.db import coditationDBObjectNotFoundError
import st2common.content.utils as content_utils
__all__ = [
'RulesRegistrar',
'register_rules'
]
LOG = logging.getLogger(__name__)
class RulesRegistrar(ResourceRegistrar):
ALLOWED_EXTENSIONS = ALLOWED_EXTS
def register_from_packs(self, base_dirs):
"""
:return: Number of rules registered.
:rtype: ``int``
"""
# Register packs first
self.register_packs(base_dirs=base_dirs)
registered_count = 0
content = self._pack_loader.get_content(base_dirs=base_dirs,
content_type='rules')
for pack, rules_dir in six.iteritems(content):
if not rules_dir:
LOG.debug('Pack %s does not contain rules.', pack)
continue
try:
LOG.debug('Registering rules from pack: %s', pack)
rules = self._get_rules_from_pack(rules_dir)
count = self._register_rules_from_pack(pack, rules)
registered_count += count
except Exception as e:
if self._fail_on_failure:
raise e
LOG.exception('Failed registering all rules from pack: %s', rules_dir)
return registered_count
def register_from_pack(self, pack_dir):
"""
Register all the rules from the provided pack.
:return: Number of rules registered.
:rtype: ``int``
"""
pack_dir = pack_dir[:-1] if pack_dir.endswith('/') else pack_dir
_, pack = os.path.split(pack_dir)
rules_dir = self._pack_loader.get_content_from_pack(pack_dir=pack_dir,
content_type='rules')
# Register pack first
self.register_pack(pack_name=pack, pack_dir=pack_dir)
registered_count = 0
if not rules_dir:
return registered_count
LOG.debug('Registering rules from pack %s:, dir: %s', pack, rules_dir)
try:
rules = self._get_rules_from_pack(rules_dir=rules_dir)
registered_count = self._register_rules_from_pack(pack=pack, rules=rules)
except Exception as e:
if self._fail_on_failure:
raise e
LOG.exception('Failed registering all rules from pack: %s', rules_dir)
return registered_count
def _get_rules_from_pack(self, rules_dir):
return self.get_resources_from_pack(resources_dir=rules_dir)
def _register_rules_from_pack(self, pack, rules):
registered_count = 0
# TODO: Refactor this monstrosity
for rule in rules:
LOG.debug('Loading rule from %s.', rule)
try:
content = self._meta_loader.load(rule)
pack_field = content.get('pack', None)
if not pack_field:
content['pack'] = pack
pack_field = pack
if pack_field != pack:
raise Exception('Model is in pack "%s" but field "pack" is different: %s' %
(pack, pack_field))
metadata_file = content_utils.get_relative_path_to_pack_file(pack_ref=pack,
file_path=rule,
use_pack_cache=True)
content['metadata_file'] = metadata_file
rule_api = RuleAPI(**content)
rule_api.validate()
rule_db = RuleAPI.to_model(rule_api)
# Migration from rule without pack to rule with pack.
# There might be a rule with same name but in pack `default`
# generated in migration script. In this case, we want to
# delete so we don't have duplicates.
if pack_field != DEFAULT_PACK_NAME:
try:
rule_ref = ResourceReference.to_string_reference(name=content['name'],
pack=DEFAULT_PACK_NAME)
LOG.debug('Looking for rule %s in pack %s', content['name'],
DEFAULT_PACK_NAME)
existing = Rule.get_by_ref(rule_ref)
LOG.debug('Existing = %s', existing)
if existing:
LOG.debug('Found rule in pack default: %s; Deleting.', rule_ref)
Rule.delete(existing)
except:
LOG.exception('Exception deleting rule from %s pack.', DEFAULT_PACK_NAME)
try:
rule_ref = ResourceReference.to_string_reference(name=content['name'],
pack=content['pack'])
existing = Rule.get_by_ref(rule_ref)
if existing:
rule_db.id = existing.id
LOG.debug('Found existing rule: %s with id: %s', rule_ref, existing.id)
except coditationDBObjectNotFoundError:
LOG.debug('Rule %s not found. Creating new one.', rule)
try:
rule_db = Rule.add_or_update(rule_db)
increment_trigger_ref_count(rule_api=rule_api)
extra = {'rule_db': rule_db}
LOG.audit('Rule updated. Rule %s from %s.', rule_db, rule, extra=extra)
except Exception:
LOG.exception('Failed to create rule %s.', rule_api.name)
# If there was an existing rule then the ref count was updated in
# to_model so it needs to be adjusted down here. Also, update could
# lead to removal of a Trigger so now is a good time for book-keeping.
if existing:
cleanup_trigger_db_for_rule(existing)
except Exception as e:
if self._fail_on_failure:
msg = ('Failed to register rule "%s" from pack "%s": %s' % (rule, pack,
six.text_type(e)))
raise ValueError(msg)
LOG.exception('Failed registering rule from %s.', rule)
else:
registered_count += 1
return registered_count
def register_rules(packs_base_paths=None, pack_dir=None, use_pack_cache=True,
fail_on_failure=False):
if packs_base_paths:
assert isinstance(packs_base_paths, list)
if not packs_base_paths:
packs_base_paths = content_utils.get_packs_base_paths()
registrar = RulesRegistrar(use_pack_cache=use_pack_cache,
fail_on_failure=fail_on_failure)
if pack_dir:
result = registrar.register_from_pack(pack_dir=pack_dir)
else:
result = registrar.register_from_packs(base_dirs=packs_base_paths)
return result
| 1.765625 | 2 |
sdk/costmanagement/azure-mgmt-costmanagement/azure/mgmt/costmanagement/models/_models_py3.py | aiven/azure-sdk-for-python | 1 | 3734 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import datetime
from typing import Dict, List, Optional, Union
from azure.core.exceptions import HttpResponseError
import msrest.serialization
from ._cost_management_client_enums import *
class Resource(msrest.serialization.Model):
"""The Resource model definition.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:ivar tags: A set of tags. Resource tags.
:vartype tags: dict[str, str]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'tags': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(
self,
**kwargs
):
super(Resource, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
self.tags = None
class Alert(Resource):
"""An individual alert.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:ivar tags: A set of tags. Resource tags.
:vartype tags: dict[str, str]
:param definition: defines the type of alert.
:type definition: ~azure.mgmt.costmanagement.models.AlertPropertiesDefinition
:param description: Alert description.
:type description: str
:param source: Source of alert. Possible values include: "Preset", "User".
:type source: str or ~azure.mgmt.costmanagement.models.AlertSource
:param details: Alert details.
:type details: ~azure.mgmt.costmanagement.models.AlertPropertiesDetails
:param cost_entity_id: related budget.
:type cost_entity_id: str
:param status: alert status. Possible values include: "None", "Active", "Overridden",
"Resolved", "Dismissed".
:type status: str or ~azure.mgmt.costmanagement.models.AlertStatus
:param creation_time: dateTime in which alert was created.
:type creation_time: str
:param close_time: dateTime in which alert was closed.
:type close_time: str
:param modification_time: dateTime in which alert was last modified.
:type modification_time: str
:param status_modification_user_name:
:type status_modification_user_name: str
:param status_modification_time: dateTime in which the alert status was last modified.
:type status_modification_time: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'tags': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'definition': {'key': 'properties.definition', 'type': 'AlertPropertiesDefinition'},
'description': {'key': 'properties.description', 'type': 'str'},
'source': {'key': 'properties.source', 'type': 'str'},
'details': {'key': 'properties.details', 'type': 'AlertPropertiesDetails'},
'cost_entity_id': {'key': 'properties.costEntityId', 'type': 'str'},
'status': {'key': 'properties.status', 'type': 'str'},
'creation_time': {'key': 'properties.creationTime', 'type': 'str'},
'close_time': {'key': 'properties.closeTime', 'type': 'str'},
'modification_time': {'key': 'properties.modificationTime', 'type': 'str'},
'status_modification_user_name': {'key': 'properties.statusModificationUserName', 'type': 'str'},
'status_modification_time': {'key': 'properties.statusModificationTime', 'type': 'str'},
}
def __init__(
self,
*,
definition: Optional["AlertPropertiesDefinition"] = None,
description: Optional[str] = None,
source: Optional[Union[str, "AlertSource"]] = None,
details: Optional["AlertPropertiesDetails"] = None,
cost_entity_id: Optional[str] = None,
status: Optional[Union[str, "AlertStatus"]] = None,
creation_time: Optional[str] = None,
close_time: Optional[str] = None,
modification_time: Optional[str] = None,
status_modification_user_name: Optional[str] = None,
status_modification_time: Optional[str] = None,
**kwargs
):
super(Alert, self).__init__(**kwargs)
self.definition = definition
self.description = description
self.source = source
self.details = details
self.cost_entity_id = cost_entity_id
self.status = status
self.creation_time = creation_time
self.close_time = close_time
self.modification_time = modification_time
self.status_modification_user_name = status_modification_user_name
self.status_modification_time = status_modification_time
class AlertPropertiesDefinition(msrest.serialization.Model):
"""defines the type of alert.
:param type: type of alert. Possible values include: "Budget", "Invoice", "Credit", "Quota",
"General", "xCloud", "BudgetForecast".
:type type: str or ~azure.mgmt.costmanagement.models.AlertType
:param category: Alert category. Possible values include: "Cost", "Usage", "Billing", "System".
:type category: str or ~azure.mgmt.costmanagement.models.AlertCategory
:param criteria: Criteria that triggered alert. Possible values include:
"CostThresholdExceeded", "UsageThresholdExceeded", "CreditThresholdApproaching",
"CreditThresholdReached", "QuotaThresholdApproaching", "QuotaThresholdReached",
"MultiCurrency", "ForecastCostThresholdExceeded", "ForecastUsageThresholdExceeded",
"InvoiceDueDateApproaching", "InvoiceDueDateReached", "CrossCloudNewDataAvailable",
"CrossCloudCollectionError", "GeneralThresholdError".
:type criteria: str or ~azure.mgmt.costmanagement.models.AlertCriteria
"""
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'category': {'key': 'category', 'type': 'str'},
'criteria': {'key': 'criteria', 'type': 'str'},
}
def __init__(
self,
*,
type: Optional[Union[str, "AlertType"]] = None,
category: Optional[Union[str, "AlertCategory"]] = None,
criteria: Optional[Union[str, "AlertCriteria"]] = None,
**kwargs
):
super(AlertPropertiesDefinition, self).__init__(**kwargs)
self.type = type
self.category = category
self.criteria = criteria
class AlertPropertiesDetails(msrest.serialization.Model):
"""Alert details.
:param time_grain_type: Type of timegrain cadence. Possible values include: "None", "Monthly",
"Quarterly", "Annually", "BillingMonth", "BillingQuarter", "BillingAnnual".
:type time_grain_type: str or ~azure.mgmt.costmanagement.models.AlertTimeGrainType
:param period_start_date: datetime of periodStartDate.
:type period_start_date: str
:param triggered_by: notificationId that triggered this alert.
:type triggered_by: str
:param resource_group_filter: array of resourceGroups to filter by.
:type resource_group_filter: list[object]
:param resource_filter: array of resources to filter by.
:type resource_filter: list[object]
:param meter_filter: array of meters to filter by.
:type meter_filter: list[object]
:param tag_filter: tags to filter by.
:type tag_filter: object
:param threshold: notification threshold percentage as a decimal which activated this alert.
:type threshold: float
:param operator: operator used to compare currentSpend with amount. Possible values include:
"None", "EqualTo", "GreaterThan", "GreaterThanOrEqualTo", "LessThan", "LessThanOrEqualTo".
:type operator: str or ~azure.mgmt.costmanagement.models.AlertOperator
:param amount: budget threshold amount.
:type amount: float
:param unit: unit of currency being used.
:type unit: str
:param current_spend: current spend.
:type current_spend: float
:param contact_emails: list of emails to contact.
:type contact_emails: list[str]
:param contact_groups: list of action groups to broadcast to.
:type contact_groups: list[str]
:param contact_roles: list of contact roles.
:type contact_roles: list[str]
:param overriding_alert: overriding alert.
:type overriding_alert: str
"""
_attribute_map = {
'time_grain_type': {'key': 'timeGrainType', 'type': 'str'},
'period_start_date': {'key': 'periodStartDate', 'type': 'str'},
'triggered_by': {'key': 'triggeredBy', 'type': 'str'},
'resource_group_filter': {'key': 'resourceGroupFilter', 'type': '[object]'},
'resource_filter': {'key': 'resourceFilter', 'type': '[object]'},
'meter_filter': {'key': 'meterFilter', 'type': '[object]'},
'tag_filter': {'key': 'tagFilter', 'type': 'object'},
'threshold': {'key': 'threshold', 'type': 'float'},
'operator': {'key': 'operator', 'type': 'str'},
'amount': {'key': 'amount', 'type': 'float'},
'unit': {'key': 'unit', 'type': 'str'},
'current_spend': {'key': 'currentSpend', 'type': 'float'},
'contact_emails': {'key': 'contactEmails', 'type': '[str]'},
'contact_groups': {'key': 'contactGroups', 'type': '[str]'},
'contact_roles': {'key': 'contactRoles', 'type': '[str]'},
'overriding_alert': {'key': 'overridingAlert', 'type': 'str'},
}
def __init__(
self,
*,
time_grain_type: Optional[Union[str, "AlertTimeGrainType"]] = None,
period_start_date: Optional[str] = None,
triggered_by: Optional[str] = None,
resource_group_filter: Optional[List[object]] = None,
resource_filter: Optional[List[object]] = None,
meter_filter: Optional[List[object]] = None,
tag_filter: Optional[object] = None,
threshold: Optional[float] = None,
operator: Optional[Union[str, "AlertOperator"]] = None,
amount: Optional[float] = None,
unit: Optional[str] = None,
current_spend: Optional[float] = None,
contact_emails: Optional[List[str]] = None,
contact_groups: Optional[List[str]] = None,
contact_roles: Optional[List[str]] = None,
overriding_alert: Optional[str] = None,
**kwargs
):
super(AlertPropertiesDetails, self).__init__(**kwargs)
self.time_grain_type = time_grain_type
self.period_start_date = period_start_date
self.triggered_by = triggered_by
self.resource_group_filter = resource_group_filter
self.resource_filter = resource_filter
self.meter_filter = meter_filter
self.tag_filter = tag_filter
self.threshold = threshold
self.operator = operator
self.amount = amount
self.unit = unit
self.current_spend = current_spend
self.contact_emails = contact_emails
self.contact_groups = contact_groups
self.contact_roles = contact_roles
self.overriding_alert = overriding_alert
class AlertsResult(msrest.serialization.Model):
"""Result of alerts.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: List of alerts.
:vartype value: list[~azure.mgmt.costmanagement.models.Alert]
:ivar next_link: URL to get the next set of alerts results if there are any.
:vartype next_link: str
"""
_validation = {
'value': {'readonly': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[Alert]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(AlertsResult, self).__init__(**kwargs)
self.value = None
self.next_link = None
class CommonExportProperties(msrest.serialization.Model):
"""The common properties of the export.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param format: The format of the export being delivered. Currently only 'Csv' is supported.
Possible values include: "Csv".
:type format: str or ~azure.mgmt.costmanagement.models.FormatType
:param delivery_info: Required. Has delivery information for the export.
:type delivery_info: ~azure.mgmt.costmanagement.models.ExportDeliveryInfo
:param definition: Required. Has the definition for the export.
:type definition: ~azure.mgmt.costmanagement.models.ExportDefinition
:param run_history: If requested, has the most recent execution history for the export.
:type run_history: ~azure.mgmt.costmanagement.models.ExportExecutionListResult
:ivar next_run_time_estimate: If the export has an active schedule, provides an estimate of the
next execution time.
:vartype next_run_time_estimate: ~datetime.datetime
"""
_validation = {
'delivery_info': {'required': True},
'definition': {'required': True},
'next_run_time_estimate': {'readonly': True},
}
_attribute_map = {
'format': {'key': 'format', 'type': 'str'},
'delivery_info': {'key': 'deliveryInfo', 'type': 'ExportDeliveryInfo'},
'definition': {'key': 'definition', 'type': 'ExportDefinition'},
'run_history': {'key': 'runHistory', 'type': 'ExportExecutionListResult'},
'next_run_time_estimate': {'key': 'nextRunTimeEstimate', 'type': 'iso-8601'},
}
def __init__(
self,
*,
delivery_info: "ExportDeliveryInfo",
definition: "ExportDefinition",
format: Optional[Union[str, "FormatType"]] = None,
run_history: Optional["ExportExecutionListResult"] = None,
**kwargs
):
super(CommonExportProperties, self).__init__(**kwargs)
self.format = format
self.delivery_info = delivery_info
self.definition = definition
self.run_history = run_history
self.next_run_time_estimate = None
class Dimension(Resource):
"""Dimension.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:ivar tags: A set of tags. Resource tags.
:vartype tags: dict[str, str]
:ivar description: Dimension description.
:vartype description: str
:ivar filter_enabled: Filter enabled.
:vartype filter_enabled: bool
:ivar grouping_enabled: Grouping enabled.
:vartype grouping_enabled: bool
:param data:
:type data: list[str]
:ivar total: Total number of data for the dimension.
:vartype total: int
:ivar category: Dimension category.
:vartype category: str
:ivar usage_start: Usage start.
:vartype usage_start: ~datetime.datetime
:ivar usage_end: Usage end.
:vartype usage_end: ~datetime.datetime
:ivar next_link: The link (url) to the next page of results.
:vartype next_link: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'tags': {'readonly': True},
'description': {'readonly': True},
'filter_enabled': {'readonly': True},
'grouping_enabled': {'readonly': True},
'total': {'readonly': True},
'category': {'readonly': True},
'usage_start': {'readonly': True},
'usage_end': {'readonly': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'description': {'key': 'properties.description', 'type': 'str'},
'filter_enabled': {'key': 'properties.filterEnabled', 'type': 'bool'},
'grouping_enabled': {'key': 'properties.groupingEnabled', 'type': 'bool'},
'data': {'key': 'properties.data', 'type': '[str]'},
'total': {'key': 'properties.total', 'type': 'int'},
'category': {'key': 'properties.category', 'type': 'str'},
'usage_start': {'key': 'properties.usageStart', 'type': 'iso-8601'},
'usage_end': {'key': 'properties.usageEnd', 'type': 'iso-8601'},
'next_link': {'key': 'properties.nextLink', 'type': 'str'},
}
def __init__(
self,
*,
data: Optional[List[str]] = None,
**kwargs
):
super(Dimension, self).__init__(**kwargs)
self.description = None
self.filter_enabled = None
self.grouping_enabled = None
self.data = data
self.total = None
self.category = None
self.usage_start = None
self.usage_end = None
self.next_link = None
class DimensionsListResult(msrest.serialization.Model):
"""Result of listing dimensions. It contains a list of available dimensions.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: The list of dimensions.
:vartype value: list[~azure.mgmt.costmanagement.models.Dimension]
"""
_validation = {
'value': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[Dimension]'},
}
def __init__(
self,
**kwargs
):
super(DimensionsListResult, self).__init__(**kwargs)
self.value = None
class DismissAlertPayload(msrest.serialization.Model):
"""The request payload to update an alert.
:param definition: defines the type of alert.
:type definition: ~azure.mgmt.costmanagement.models.AlertPropertiesDefinition
:param description: Alert description.
:type description: str
:param source: Source of alert. Possible values include: "Preset", "User".
:type source: str or ~azure.mgmt.costmanagement.models.AlertSource
:param details: Alert details.
:type details: ~azure.mgmt.costmanagement.models.AlertPropertiesDetails
:param cost_entity_id: related budget.
:type cost_entity_id: str
:param status: alert status. Possible values include: "None", "Active", "Overridden",
"Resolved", "Dismissed".
:type status: str or ~azure.mgmt.costmanagement.models.AlertStatus
:param creation_time: dateTime in which alert was created.
:type creation_time: str
:param close_time: dateTime in which alert was closed.
:type close_time: str
:param modification_time: dateTime in which alert was last modified.
:type modification_time: str
:param status_modification_user_name:
:type status_modification_user_name: str
:param status_modification_time: dateTime in which the alert status was last modified.
:type status_modification_time: str
"""
_attribute_map = {
'definition': {'key': 'properties.definition', 'type': 'AlertPropertiesDefinition'},
'description': {'key': 'properties.description', 'type': 'str'},
'source': {'key': 'properties.source', 'type': 'str'},
'details': {'key': 'properties.details', 'type': 'AlertPropertiesDetails'},
'cost_entity_id': {'key': 'properties.costEntityId', 'type': 'str'},
'status': {'key': 'properties.status', 'type': 'str'},
'creation_time': {'key': 'properties.creationTime', 'type': 'str'},
'close_time': {'key': 'properties.closeTime', 'type': 'str'},
'modification_time': {'key': 'properties.modificationTime', 'type': 'str'},
'status_modification_user_name': {'key': 'properties.statusModificationUserName', 'type': 'str'},
'status_modification_time': {'key': 'properties.statusModificationTime', 'type': 'str'},
}
def __init__(
self,
*,
definition: Optional["AlertPropertiesDefinition"] = None,
description: Optional[str] = None,
source: Optional[Union[str, "AlertSource"]] = None,
details: Optional["AlertPropertiesDetails"] = None,
cost_entity_id: Optional[str] = None,
status: Optional[Union[str, "AlertStatus"]] = None,
creation_time: Optional[str] = None,
close_time: Optional[str] = None,
modification_time: Optional[str] = None,
status_modification_user_name: Optional[str] = None,
status_modification_time: Optional[str] = None,
**kwargs
):
super(DismissAlertPayload, self).__init__(**kwargs)
self.definition = definition
self.description = description
self.source = source
self.details = details
self.cost_entity_id = cost_entity_id
self.status = status
self.creation_time = creation_time
self.close_time = close_time
self.modification_time = modification_time
self.status_modification_user_name = status_modification_user_name
self.status_modification_time = status_modification_time
class ErrorDetails(msrest.serialization.Model):
"""The details of the error.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar code: Error code.
:vartype code: str
:ivar message: Error message indicating why the operation failed.
:vartype message: str
"""
_validation = {
'code': {'readonly': True},
'message': {'readonly': True},
}
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ErrorDetails, self).__init__(**kwargs)
self.code = None
self.message = None
class ErrorResponse(msrest.serialization.Model):
"""Error response indicates that the service is not able to process the incoming request. The reason is provided in the error message.
Some Error responses:
*
429 TooManyRequests - Request is throttled. Retry after waiting for the time specified in the "x-ms-ratelimit-microsoft.consumption-retry-after" header.
*
503 ServiceUnavailable - Service is temporarily unavailable. Retry after waiting for the time specified in the "Retry-After" header.
:param error: The details of the error.
:type error: ~azure.mgmt.costmanagement.models.ErrorDetails
"""
_attribute_map = {
'error': {'key': 'error', 'type': 'ErrorDetails'},
}
def __init__(
self,
*,
error: Optional["ErrorDetails"] = None,
**kwargs
):
super(ErrorResponse, self).__init__(**kwargs)
self.error = error
class ProxyResource(msrest.serialization.Model):
"""The Resource model definition.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param e_tag: eTag of the resource. To handle concurrent update scenario, this field will be
used to determine whether the user is updating the latest version or not.
:type e_tag: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'e_tag': {'key': 'eTag', 'type': 'str'},
}
def __init__(
self,
*,
e_tag: Optional[str] = None,
**kwargs
):
super(ProxyResource, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
self.e_tag = e_tag
class Export(ProxyResource):
"""An export resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param e_tag: eTag of the resource. To handle concurrent update scenario, this field will be
used to determine whether the user is updating the latest version or not.
:type e_tag: str
:param format: The format of the export being delivered. Currently only 'Csv' is supported.
Possible values include: "Csv".
:type format: str or ~azure.mgmt.costmanagement.models.FormatType
:param delivery_info: Has delivery information for the export.
:type delivery_info: ~azure.mgmt.costmanagement.models.ExportDeliveryInfo
:param definition: Has the definition for the export.
:type definition: ~azure.mgmt.costmanagement.models.ExportDefinition
:param run_history: If requested, has the most recent execution history for the export.
:type run_history: ~azure.mgmt.costmanagement.models.ExportExecutionListResult
:ivar next_run_time_estimate: If the export has an active schedule, provides an estimate of the
next execution time.
:vartype next_run_time_estimate: ~datetime.datetime
:param schedule: Has schedule information for the export.
:type schedule: ~azure.mgmt.costmanagement.models.ExportSchedule
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'next_run_time_estimate': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'e_tag': {'key': 'eTag', 'type': 'str'},
'format': {'key': 'properties.format', 'type': 'str'},
'delivery_info': {'key': 'properties.deliveryInfo', 'type': 'ExportDeliveryInfo'},
'definition': {'key': 'properties.definition', 'type': 'ExportDefinition'},
'run_history': {'key': 'properties.runHistory', 'type': 'ExportExecutionListResult'},
'next_run_time_estimate': {'key': 'properties.nextRunTimeEstimate', 'type': 'iso-8601'},
'schedule': {'key': 'properties.schedule', 'type': 'ExportSchedule'},
}
def __init__(
self,
*,
e_tag: Optional[str] = None,
format: Optional[Union[str, "FormatType"]] = None,
delivery_info: Optional["ExportDeliveryInfo"] = None,
definition: Optional["ExportDefinition"] = None,
run_history: Optional["ExportExecutionListResult"] = None,
schedule: Optional["ExportSchedule"] = None,
**kwargs
):
super(Export, self).__init__(e_tag=e_tag, **kwargs)
self.format = format
self.delivery_info = delivery_info
self.definition = definition
self.run_history = run_history
self.next_run_time_estimate = None
self.schedule = schedule
class ExportDataset(msrest.serialization.Model):
"""The definition for data in the export.
:param granularity: The granularity of rows in the export. Currently only 'Daily' is supported.
Possible values include: "Daily".
:type granularity: str or ~azure.mgmt.costmanagement.models.GranularityType
:param configuration: The export dataset configuration.
:type configuration: ~azure.mgmt.costmanagement.models.ExportDatasetConfiguration
"""
_attribute_map = {
'granularity': {'key': 'granularity', 'type': 'str'},
'configuration': {'key': 'configuration', 'type': 'ExportDatasetConfiguration'},
}
def __init__(
self,
*,
granularity: Optional[Union[str, "GranularityType"]] = None,
configuration: Optional["ExportDatasetConfiguration"] = None,
**kwargs
):
super(ExportDataset, self).__init__(**kwargs)
self.granularity = granularity
self.configuration = configuration
class ExportDatasetConfiguration(msrest.serialization.Model):
"""The export dataset configuration. Allows columns to be selected for the export. If not provided then the export will include all available columns.
:param columns: Array of column names to be included in the export. If not provided then the
export will include all available columns. The available columns can vary by customer channel
(see examples).
:type columns: list[str]
"""
_attribute_map = {
'columns': {'key': 'columns', 'type': '[str]'},
}
def __init__(
self,
*,
columns: Optional[List[str]] = None,
**kwargs
):
super(ExportDatasetConfiguration, self).__init__(**kwargs)
self.columns = columns
class ExportDefinition(msrest.serialization.Model):
"""The definition of an export.
All required parameters must be populated in order to send to Azure.
:param type: Required. The type of the export. Note that 'Usage' is equivalent to 'ActualCost'
and is applicable to exports that do not yet provide data for charges or amortization for
service reservations. Possible values include: "Usage", "ActualCost", "AmortizedCost".
:type type: str or ~azure.mgmt.costmanagement.models.ExportType
:param timeframe: Required. The time frame for pulling data for the export. If custom, then a
specific time period must be provided. Possible values include: "MonthToDate",
"BillingMonthToDate", "TheLastMonth", "TheLastBillingMonth", "WeekToDate", "Custom".
:type timeframe: str or ~azure.mgmt.costmanagement.models.TimeframeType
:param time_period: Has time period for pulling data for the export.
:type time_period: ~azure.mgmt.costmanagement.models.ExportTimePeriod
:param data_set: The definition for data in the export.
:type data_set: ~azure.mgmt.costmanagement.models.ExportDataset
"""
_validation = {
'type': {'required': True},
'timeframe': {'required': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'timeframe': {'key': 'timeframe', 'type': 'str'},
'time_period': {'key': 'timePeriod', 'type': 'ExportTimePeriod'},
'data_set': {'key': 'dataSet', 'type': 'ExportDataset'},
}
def __init__(
self,
*,
type: Union[str, "ExportType"],
timeframe: Union[str, "TimeframeType"],
time_period: Optional["ExportTimePeriod"] = None,
data_set: Optional["ExportDataset"] = None,
**kwargs
):
super(ExportDefinition, self).__init__(**kwargs)
self.type = type
self.timeframe = timeframe
self.time_period = time_period
self.data_set = data_set
class ExportDeliveryDestination(msrest.serialization.Model):
"""The destination information for the delivery of the export. To allow access to a storage account, you must register the account's subscription with the Microsoft.CostManagementExports resource provider. This is required once per subscription. When creating an export in the Azure portal, it is done automatically, however API users need to register the subscription. For more information see https://docs.microsoft.com/en-us/azure/azure-resource-manager/resource-manager-supported-services .
All required parameters must be populated in order to send to Azure.
:param resource_id: Required. The resource id of the storage account where exports will be
delivered.
:type resource_id: str
:param container: Required. The name of the container where exports will be uploaded.
:type container: str
:param root_folder_path: The name of the directory where exports will be uploaded.
:type root_folder_path: str
"""
_validation = {
'resource_id': {'required': True},
'container': {'required': True},
}
_attribute_map = {
'resource_id': {'key': 'resourceId', 'type': 'str'},
'container': {'key': 'container', 'type': 'str'},
'root_folder_path': {'key': 'rootFolderPath', 'type': 'str'},
}
def __init__(
self,
*,
resource_id: str,
container: str,
root_folder_path: Optional[str] = None,
**kwargs
):
super(ExportDeliveryDestination, self).__init__(**kwargs)
self.resource_id = resource_id
self.container = container
self.root_folder_path = root_folder_path
class ExportDeliveryInfo(msrest.serialization.Model):
"""The delivery information associated with a export.
All required parameters must be populated in order to send to Azure.
:param destination: Required. Has destination for the export being delivered.
:type destination: ~azure.mgmt.costmanagement.models.ExportDeliveryDestination
"""
_validation = {
'destination': {'required': True},
}
_attribute_map = {
'destination': {'key': 'destination', 'type': 'ExportDeliveryDestination'},
}
def __init__(
self,
*,
destination: "ExportDeliveryDestination",
**kwargs
):
super(ExportDeliveryInfo, self).__init__(**kwargs)
self.destination = destination
class ExportExecution(Resource):
"""An export execution.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:ivar tags: A set of tags. Resource tags.
:vartype tags: dict[str, str]
:param execution_type: The type of the export execution. Possible values include: "OnDemand",
"Scheduled".
:type execution_type: str or ~azure.mgmt.costmanagement.models.ExecutionType
:param status: The last known status of the export execution. Possible values include:
"Queued", "InProgress", "Completed", "Failed", "Timeout", "NewDataNotAvailable",
"DataNotAvailable".
:type status: str or ~azure.mgmt.costmanagement.models.ExecutionStatus
:param submitted_by: The identifier for the entity that executed the export. For OnDemand
executions it is the user email. For scheduled executions it is 'System'.
:type submitted_by: str
:param submitted_time: The time when export was queued to be executed.
:type submitted_time: ~datetime.datetime
:param processing_start_time: The time when export was picked up to be executed.
:type processing_start_time: ~datetime.datetime
:param processing_end_time: The time when the export execution finished.
:type processing_end_time: ~datetime.datetime
:param file_name: The name of the exported file.
:type file_name: str
:param run_settings: The export settings that were in effect for this execution.
:type run_settings: ~azure.mgmt.costmanagement.models.CommonExportProperties
:param error: The details of any error.
:type error: ~azure.mgmt.costmanagement.models.ErrorDetails
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'tags': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'execution_type': {'key': 'properties.executionType', 'type': 'str'},
'status': {'key': 'properties.status', 'type': 'str'},
'submitted_by': {'key': 'properties.submittedBy', 'type': 'str'},
'submitted_time': {'key': 'properties.submittedTime', 'type': 'iso-8601'},
'processing_start_time': {'key': 'properties.processingStartTime', 'type': 'iso-8601'},
'processing_end_time': {'key': 'properties.processingEndTime', 'type': 'iso-8601'},
'file_name': {'key': 'properties.fileName', 'type': 'str'},
'run_settings': {'key': 'properties.runSettings', 'type': 'CommonExportProperties'},
'error': {'key': 'properties.error', 'type': 'ErrorDetails'},
}
def __init__(
self,
*,
execution_type: Optional[Union[str, "ExecutionType"]] = None,
status: Optional[Union[str, "ExecutionStatus"]] = None,
submitted_by: Optional[str] = None,
submitted_time: Optional[datetime.datetime] = None,
processing_start_time: Optional[datetime.datetime] = None,
processing_end_time: Optional[datetime.datetime] = None,
file_name: Optional[str] = None,
run_settings: Optional["CommonExportProperties"] = None,
error: Optional["ErrorDetails"] = None,
**kwargs
):
super(ExportExecution, self).__init__(**kwargs)
self.execution_type = execution_type
self.status = status
self.submitted_by = submitted_by
self.submitted_time = submitted_time
self.processing_start_time = processing_start_time
self.processing_end_time = processing_end_time
self.file_name = file_name
self.run_settings = run_settings
self.error = error
class ExportExecutionListResult(msrest.serialization.Model):
"""Result of listing the execution history of an export.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: A list of export executions.
:vartype value: list[~azure.mgmt.costmanagement.models.ExportExecution]
"""
_validation = {
'value': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[ExportExecution]'},
}
def __init__(
self,
**kwargs
):
super(ExportExecutionListResult, self).__init__(**kwargs)
self.value = None
class ExportListResult(msrest.serialization.Model):
"""Result of listing exports. It contains a list of available exports in the scope provided.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: The list of exports.
:vartype value: list[~azure.mgmt.costmanagement.models.Export]
"""
_validation = {
'value': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[Export]'},
}
def __init__(
self,
**kwargs
):
super(ExportListResult, self).__init__(**kwargs)
self.value = None
class ExportProperties(CommonExportProperties):
"""The properties of the export.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param format: The format of the export being delivered. Currently only 'Csv' is supported.
Possible values include: "Csv".
:type format: str or ~azure.mgmt.costmanagement.models.FormatType
:param delivery_info: Required. Has delivery information for the export.
:type delivery_info: ~azure.mgmt.costmanagement.models.ExportDeliveryInfo
:param definition: Required. Has the definition for the export.
:type definition: ~azure.mgmt.costmanagement.models.ExportDefinition
:param run_history: If requested, has the most recent execution history for the export.
:type run_history: ~azure.mgmt.costmanagement.models.ExportExecutionListResult
:ivar next_run_time_estimate: If the export has an active schedule, provides an estimate of the
next execution time.
:vartype next_run_time_estimate: ~datetime.datetime
:param schedule: Has schedule information for the export.
:type schedule: ~azure.mgmt.costmanagement.models.ExportSchedule
"""
_validation = {
'delivery_info': {'required': True},
'definition': {'required': True},
'next_run_time_estimate': {'readonly': True},
}
_attribute_map = {
'format': {'key': 'format', 'type': 'str'},
'delivery_info': {'key': 'deliveryInfo', 'type': 'ExportDeliveryInfo'},
'definition': {'key': 'definition', 'type': 'ExportDefinition'},
'run_history': {'key': 'runHistory', 'type': 'ExportExecutionListResult'},
'next_run_time_estimate': {'key': 'nextRunTimeEstimate', 'type': 'iso-8601'},
'schedule': {'key': 'schedule', 'type': 'ExportSchedule'},
}
def __init__(
self,
*,
delivery_info: "ExportDeliveryInfo",
definition: "ExportDefinition",
format: Optional[Union[str, "FormatType"]] = None,
run_history: Optional["ExportExecutionListResult"] = None,
schedule: Optional["ExportSchedule"] = None,
**kwargs
):
super(ExportProperties, self).__init__(format=format, delivery_info=delivery_info, definition=definition, run_history=run_history, **kwargs)
self.schedule = schedule
class ExportRecurrencePeriod(msrest.serialization.Model):
"""The start and end date for recurrence schedule.
All required parameters must be populated in order to send to Azure.
:param from_property: Required. The start date of recurrence.
:type from_property: ~datetime.datetime
:param to: The end date of recurrence.
:type to: ~datetime.datetime
"""
_validation = {
'from_property': {'required': True},
}
_attribute_map = {
'from_property': {'key': 'from', 'type': 'iso-8601'},
'to': {'key': 'to', 'type': 'iso-8601'},
}
def __init__(
self,
*,
from_property: datetime.datetime,
to: Optional[datetime.datetime] = None,
**kwargs
):
super(ExportRecurrencePeriod, self).__init__(**kwargs)
self.from_property = from_property
self.to = to
class ExportSchedule(msrest.serialization.Model):
"""The schedule associated with the export.
All required parameters must be populated in order to send to Azure.
:param status: The status of the export's schedule. If 'Inactive', the export's schedule is
paused. Possible values include: "Active", "Inactive".
:type status: str or ~azure.mgmt.costmanagement.models.StatusType
:param recurrence: Required. The schedule recurrence. Possible values include: "Daily",
"Weekly", "Monthly", "Annually".
:type recurrence: str or ~azure.mgmt.costmanagement.models.RecurrenceType
:param recurrence_period: Has start and end date of the recurrence. The start date must be in
future. If present, the end date must be greater than start date.
:type recurrence_period: ~azure.mgmt.costmanagement.models.ExportRecurrencePeriod
"""
_validation = {
'recurrence': {'required': True},
}
_attribute_map = {
'status': {'key': 'status', 'type': 'str'},
'recurrence': {'key': 'recurrence', 'type': 'str'},
'recurrence_period': {'key': 'recurrencePeriod', 'type': 'ExportRecurrencePeriod'},
}
def __init__(
self,
*,
recurrence: Union[str, "RecurrenceType"],
status: Optional[Union[str, "StatusType"]] = None,
recurrence_period: Optional["ExportRecurrencePeriod"] = None,
**kwargs
):
super(ExportSchedule, self).__init__(**kwargs)
self.status = status
self.recurrence = recurrence
self.recurrence_period = recurrence_period
class ExportTimePeriod(msrest.serialization.Model):
"""The date range for data in the export. This should only be specified with timeFrame set to 'Custom'. The maximum date range is 3 months.
All required parameters must be populated in order to send to Azure.
:param from_property: Required. The start date for export data.
:type from_property: ~datetime.datetime
:param to: Required. The end date for export data.
:type to: ~datetime.datetime
"""
_validation = {
'from_property': {'required': True},
'to': {'required': True},
}
_attribute_map = {
'from_property': {'key': 'from', 'type': 'iso-8601'},
'to': {'key': 'to', 'type': 'iso-8601'},
}
def __init__(
self,
*,
from_property: datetime.datetime,
to: datetime.datetime,
**kwargs
):
super(ExportTimePeriod, self).__init__(**kwargs)
self.from_property = from_property
self.to = to
class ForecastDataset(msrest.serialization.Model):
"""The definition of data present in the forecast.
:param granularity: The granularity of rows in the forecast. Possible values include: "Daily".
:type granularity: str or ~azure.mgmt.costmanagement.models.GranularityType
:param configuration: Has configuration information for the data in the export. The
configuration will be ignored if aggregation and grouping are provided.
:type configuration: ~azure.mgmt.costmanagement.models.QueryDatasetConfiguration
:param aggregation: Dictionary of aggregation expression to use in the forecast. The key of
each item in the dictionary is the alias for the aggregated column. forecast can have up to 2
aggregation clauses.
:type aggregation: dict[str, ~azure.mgmt.costmanagement.models.QueryAggregation]
:param filter: Has filter expression to use in the forecast.
:type filter: ~azure.mgmt.costmanagement.models.QueryFilter
"""
_attribute_map = {
'granularity': {'key': 'granularity', 'type': 'str'},
'configuration': {'key': 'configuration', 'type': 'QueryDatasetConfiguration'},
'aggregation': {'key': 'aggregation', 'type': '{QueryAggregation}'},
'filter': {'key': 'filter', 'type': 'QueryFilter'},
}
def __init__(
self,
*,
granularity: Optional[Union[str, "GranularityType"]] = None,
configuration: Optional["QueryDatasetConfiguration"] = None,
aggregation: Optional[Dict[str, "QueryAggregation"]] = None,
filter: Optional["QueryFilter"] = None,
**kwargs
):
super(ForecastDataset, self).__init__(**kwargs)
self.granularity = granularity
self.configuration = configuration
self.aggregation = aggregation
self.filter = filter
class ForecastDefinition(msrest.serialization.Model):
"""The definition of a forecast.
All required parameters must be populated in order to send to Azure.
:param type: Required. The type of the forecast. Possible values include: "Usage",
"ActualCost", "AmortizedCost".
:type type: str or ~azure.mgmt.costmanagement.models.ForecastType
:param timeframe: Required. The time frame for pulling data for the forecast. If custom, then a
specific time period must be provided. Possible values include: "MonthToDate",
"BillingMonthToDate", "TheLastMonth", "TheLastBillingMonth", "WeekToDate", "Custom".
:type timeframe: str or ~azure.mgmt.costmanagement.models.ForecastTimeframeType
:param time_period: Has time period for pulling data for the forecast.
:type time_period: ~azure.mgmt.costmanagement.models.QueryTimePeriod
:param dataset: Has definition for data in this forecast.
:type dataset: ~azure.mgmt.costmanagement.models.ForecastDataset
:param include_actual_cost: a boolean determining if actualCost will be included.
:type include_actual_cost: bool
:param include_fresh_partial_cost: a boolean determining if FreshPartialCost will be included.
:type include_fresh_partial_cost: bool
"""
_validation = {
'type': {'required': True},
'timeframe': {'required': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'timeframe': {'key': 'timeframe', 'type': 'str'},
'time_period': {'key': 'timePeriod', 'type': 'QueryTimePeriod'},
'dataset': {'key': 'dataset', 'type': 'ForecastDataset'},
'include_actual_cost': {'key': 'includeActualCost', 'type': 'bool'},
'include_fresh_partial_cost': {'key': 'includeFreshPartialCost', 'type': 'bool'},
}
def __init__(
self,
*,
type: Union[str, "ForecastType"],
timeframe: Union[str, "ForecastTimeframeType"],
time_period: Optional["QueryTimePeriod"] = None,
dataset: Optional["ForecastDataset"] = None,
include_actual_cost: Optional[bool] = None,
include_fresh_partial_cost: Optional[bool] = None,
**kwargs
):
super(ForecastDefinition, self).__init__(**kwargs)
self.type = type
self.timeframe = timeframe
self.time_period = time_period
self.dataset = dataset
self.include_actual_cost = include_actual_cost
self.include_fresh_partial_cost = include_fresh_partial_cost
class KpiProperties(msrest.serialization.Model):
"""Each KPI must contain a 'type' and 'enabled' key.
:param type: KPI type (Forecast, Budget). Possible values include: "Forecast", "Budget".
:type type: str or ~azure.mgmt.costmanagement.models.KpiType
:param id: ID of resource related to metric (budget).
:type id: str
:param enabled: show the KPI in the UI?.
:type enabled: bool
"""
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'enabled': {'key': 'enabled', 'type': 'bool'},
}
def __init__(
self,
*,
type: Optional[Union[str, "KpiType"]] = None,
id: Optional[str] = None,
enabled: Optional[bool] = None,
**kwargs
):
super(KpiProperties, self).__init__(**kwargs)
self.type = type
self.id = id
self.enabled = enabled
class Operation(msrest.serialization.Model):
"""A Cost management REST API operation.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar name: Operation name: {provider}/{resource}/{operation}.
:vartype name: str
:param display: The object that represents the operation.
:type display: ~azure.mgmt.costmanagement.models.OperationDisplay
"""
_validation = {
'name': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'display': {'key': 'display', 'type': 'OperationDisplay'},
}
def __init__(
self,
*,
display: Optional["OperationDisplay"] = None,
**kwargs
):
super(Operation, self).__init__(**kwargs)
self.name = None
self.display = display
class OperationDisplay(msrest.serialization.Model):
"""The object that represents the operation.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar provider: Service provider: Microsoft.CostManagement.
:vartype provider: str
:ivar resource: Resource on which the operation is performed: Dimensions, Query.
:vartype resource: str
:ivar operation: Operation type: Read, write, delete, etc.
:vartype operation: str
"""
_validation = {
'provider': {'readonly': True},
'resource': {'readonly': True},
'operation': {'readonly': True},
}
_attribute_map = {
'provider': {'key': 'provider', 'type': 'str'},
'resource': {'key': 'resource', 'type': 'str'},
'operation': {'key': 'operation', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(OperationDisplay, self).__init__(**kwargs)
self.provider = None
self.resource = None
self.operation = None
class OperationListResult(msrest.serialization.Model):
"""Result of listing cost management operations. It contains a list of operations and a URL link to get the next set of results.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: List of cost management operations supported by the Microsoft.CostManagement
resource provider.
:vartype value: list[~azure.mgmt.costmanagement.models.Operation]
:ivar next_link: URL to get the next set of operation list results if there are any.
:vartype next_link: str
"""
_validation = {
'value': {'readonly': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[Operation]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(OperationListResult, self).__init__(**kwargs)
self.value = None
self.next_link = None
class PivotProperties(msrest.serialization.Model):
"""Each pivot must contain a 'type' and 'name'.
:param type: Data type to show in view. Possible values include: "Dimension", "TagKey".
:type type: str or ~azure.mgmt.costmanagement.models.PivotType
:param name: Data field to show in view.
:type name: str
"""
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
}
def __init__(
self,
*,
type: Optional[Union[str, "PivotType"]] = None,
name: Optional[str] = None,
**kwargs
):
super(PivotProperties, self).__init__(**kwargs)
self.type = type
self.name = name
class QueryAggregation(msrest.serialization.Model):
"""The aggregation expression to be used in the query.
All required parameters must be populated in order to send to Azure.
:param name: Required. The name of the column to aggregate.
:type name: str
:param function: Required. The name of the aggregation function to use. Possible values
include: "Sum".
:type function: str or ~azure.mgmt.costmanagement.models.FunctionType
"""
_validation = {
'name': {'required': True},
'function': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'function': {'key': 'function', 'type': 'str'},
}
def __init__(
self,
*,
name: str,
function: Union[str, "FunctionType"],
**kwargs
):
super(QueryAggregation, self).__init__(**kwargs)
self.name = name
self.function = function
class QueryColumn(msrest.serialization.Model):
"""QueryColumn.
:param name: The name of column.
:type name: str
:param type: The type of column.
:type type: str
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(
self,
*,
name: Optional[str] = None,
type: Optional[str] = None,
**kwargs
):
super(QueryColumn, self).__init__(**kwargs)
self.name = name
self.type = type
class QueryComparisonExpression(msrest.serialization.Model):
"""The comparison expression to be used in the query.
All required parameters must be populated in order to send to Azure.
:param name: Required. The name of the column to use in comparison.
:type name: str
:param operator: Required. The operator to use for comparison. Possible values include: "In",
"Contains".
:type operator: str or ~azure.mgmt.costmanagement.models.OperatorType
:param values: Required. Array of values to use for comparison.
:type values: list[str]
"""
_validation = {
'name': {'required': True},
'operator': {'required': True},
'values': {'required': True, 'min_items': 1},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'operator': {'key': 'operator', 'type': 'str'},
'values': {'key': 'values', 'type': '[str]'},
}
def __init__(
self,
*,
name: str,
operator: Union[str, "OperatorType"],
values: List[str],
**kwargs
):
super(QueryComparisonExpression, self).__init__(**kwargs)
self.name = name
self.operator = operator
self.values = values
class QueryDataset(msrest.serialization.Model):
"""The definition of data present in the query.
:param granularity: The granularity of rows in the query. Possible values include: "Daily".
:type granularity: str or ~azure.mgmt.costmanagement.models.GranularityType
:param configuration: Has configuration information for the data in the export. The
configuration will be ignored if aggregation and grouping are provided.
:type configuration: ~azure.mgmt.costmanagement.models.QueryDatasetConfiguration
:param aggregation: Dictionary of aggregation expression to use in the query. The key of each
item in the dictionary is the alias for the aggregated column. Query can have up to 2
aggregation clauses.
:type aggregation: dict[str, ~azure.mgmt.costmanagement.models.QueryAggregation]
:param grouping: Array of group by expression to use in the query. Query can have up to 2 group
by clauses.
:type grouping: list[~azure.mgmt.costmanagement.models.QueryGrouping]
:param filter: Has filter expression to use in the query.
:type filter: ~azure.mgmt.costmanagement.models.QueryFilter
"""
_validation = {
'grouping': {'max_items': 2, 'min_items': 0},
}
_attribute_map = {
'granularity': {'key': 'granularity', 'type': 'str'},
'configuration': {'key': 'configuration', 'type': 'QueryDatasetConfiguration'},
'aggregation': {'key': 'aggregation', 'type': '{QueryAggregation}'},
'grouping': {'key': 'grouping', 'type': '[QueryGrouping]'},
'filter': {'key': 'filter', 'type': 'QueryFilter'},
}
def __init__(
self,
*,
granularity: Optional[Union[str, "GranularityType"]] = None,
configuration: Optional["QueryDatasetConfiguration"] = None,
aggregation: Optional[Dict[str, "QueryAggregation"]] = None,
grouping: Optional[List["QueryGrouping"]] = None,
filter: Optional["QueryFilter"] = None,
**kwargs
):
super(QueryDataset, self).__init__(**kwargs)
self.granularity = granularity
self.configuration = configuration
self.aggregation = aggregation
self.grouping = grouping
self.filter = filter
class QueryDatasetConfiguration(msrest.serialization.Model):
"""The configuration of dataset in the query.
:param columns: Array of column names to be included in the query. Any valid query column name
is allowed. If not provided, then query includes all columns.
:type columns: list[str]
"""
_attribute_map = {
'columns': {'key': 'columns', 'type': '[str]'},
}
def __init__(
self,
*,
columns: Optional[List[str]] = None,
**kwargs
):
super(QueryDatasetConfiguration, self).__init__(**kwargs)
self.columns = columns
class QueryDefinition(msrest.serialization.Model):
"""The definition of a query.
All required parameters must be populated in order to send to Azure.
:param type: Required. The type of the query. Possible values include: "Usage", "ActualCost",
"AmortizedCost".
:type type: str or ~azure.mgmt.costmanagement.models.ExportType
:param timeframe: Required. The time frame for pulling data for the query. If custom, then a
specific time period must be provided. Possible values include: "MonthToDate",
"BillingMonthToDate", "TheLastMonth", "TheLastBillingMonth", "WeekToDate", "Custom".
:type timeframe: str or ~azure.mgmt.costmanagement.models.TimeframeType
:param time_period: Has time period for pulling data for the query.
:type time_period: ~azure.mgmt.costmanagement.models.QueryTimePeriod
:param dataset: Has definition for data in this query.
:type dataset: ~azure.mgmt.costmanagement.models.QueryDataset
"""
_validation = {
'type': {'required': True},
'timeframe': {'required': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'timeframe': {'key': 'timeframe', 'type': 'str'},
'time_period': {'key': 'timePeriod', 'type': 'QueryTimePeriod'},
'dataset': {'key': 'dataset', 'type': 'QueryDataset'},
}
def __init__(
self,
*,
type: Union[str, "ExportType"],
timeframe: Union[str, "TimeframeType"],
time_period: Optional["QueryTimePeriod"] = None,
dataset: Optional["QueryDataset"] = None,
**kwargs
):
super(QueryDefinition, self).__init__(**kwargs)
self.type = type
self.timeframe = timeframe
self.time_period = time_period
self.dataset = dataset
class QueryFilter(msrest.serialization.Model):
"""The filter expression to be used in the export.
:param and_property: The logical "AND" expression. Must have at least 2 items.
:type and_property: list[~azure.mgmt.costmanagement.models.QueryFilter]
:param or_property: The logical "OR" expression. Must have at least 2 items.
:type or_property: list[~azure.mgmt.costmanagement.models.QueryFilter]
:param not_property: The logical "NOT" expression.
:type not_property: ~azure.mgmt.costmanagement.models.QueryFilter
:param dimension: Has comparison expression for a dimension.
:type dimension: ~azure.mgmt.costmanagement.models.QueryComparisonExpression
:param tag: Has comparison expression for a tag.
:type tag: ~azure.mgmt.costmanagement.models.QueryComparisonExpression
"""
_validation = {
'and_property': {'min_items': 2},
'or_property': {'min_items': 2},
}
_attribute_map = {
'and_property': {'key': 'and', 'type': '[QueryFilter]'},
'or_property': {'key': 'or', 'type': '[QueryFilter]'},
'not_property': {'key': 'not', 'type': 'QueryFilter'},
'dimension': {'key': 'dimension', 'type': 'QueryComparisonExpression'},
'tag': {'key': 'tag', 'type': 'QueryComparisonExpression'},
}
def __init__(
self,
*,
and_property: Optional[List["QueryFilter"]] = None,
or_property: Optional[List["QueryFilter"]] = None,
not_property: Optional["QueryFilter"] = None,
dimension: Optional["QueryComparisonExpression"] = None,
tag: Optional["QueryComparisonExpression"] = None,
**kwargs
):
super(QueryFilter, self).__init__(**kwargs)
self.and_property = and_property
self.or_property = or_property
self.not_property = not_property
self.dimension = dimension
self.tag = tag
class QueryGrouping(msrest.serialization.Model):
"""The group by expression to be used in the query.
All required parameters must be populated in order to send to Azure.
:param type: Required. Has type of the column to group. Possible values include: "Tag",
"Dimension".
:type type: str or ~azure.mgmt.costmanagement.models.QueryColumnType
:param name: Required. The name of the column to group.
:type name: str
"""
_validation = {
'type': {'required': True},
'name': {'required': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
}
def __init__(
self,
*,
type: Union[str, "QueryColumnType"],
name: str,
**kwargs
):
super(QueryGrouping, self).__init__(**kwargs)
self.type = type
self.name = name
class QueryResult(Resource):
"""Result of query. It contains all columns listed under groupings and aggregation.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:ivar tags: A set of tags. Resource tags.
:vartype tags: dict[str, str]
:param next_link: The link (url) to the next page of results.
:type next_link: str
:param columns: Array of columns.
:type columns: list[~azure.mgmt.costmanagement.models.QueryColumn]
:param rows: Array of rows.
:type rows: list[list[object]]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'tags': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'next_link': {'key': 'properties.nextLink', 'type': 'str'},
'columns': {'key': 'properties.columns', 'type': '[QueryColumn]'},
'rows': {'key': 'properties.rows', 'type': '[[object]]'},
}
def __init__(
self,
*,
next_link: Optional[str] = None,
columns: Optional[List["QueryColumn"]] = None,
rows: Optional[List[List[object]]] = None,
**kwargs
):
super(QueryResult, self).__init__(**kwargs)
self.next_link = next_link
self.columns = columns
self.rows = rows
class QueryTimePeriod(msrest.serialization.Model):
"""The start and end date for pulling data for the query.
All required parameters must be populated in order to send to Azure.
:param from_property: Required. The start date to pull data from.
:type from_property: ~datetime.datetime
:param to: Required. The end date to pull data to.
:type to: ~datetime.datetime
"""
_validation = {
'from_property': {'required': True},
'to': {'required': True},
}
_attribute_map = {
'from_property': {'key': 'from', 'type': 'iso-8601'},
'to': {'key': 'to', 'type': 'iso-8601'},
}
def __init__(
self,
*,
from_property: datetime.datetime,
to: datetime.datetime,
**kwargs
):
super(QueryTimePeriod, self).__init__(**kwargs)
self.from_property = from_property
self.to = to
class ReportConfigAggregation(msrest.serialization.Model):
"""The aggregation expression to be used in the report.
All required parameters must be populated in order to send to Azure.
:param name: Required. The name of the column to aggregate.
:type name: str
:param function: Required. The name of the aggregation function to use. Possible values
include: "Sum".
:type function: str or ~azure.mgmt.costmanagement.models.FunctionType
"""
_validation = {
'name': {'required': True},
'function': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'function': {'key': 'function', 'type': 'str'},
}
def __init__(
self,
*,
name: str,
function: Union[str, "FunctionType"],
**kwargs
):
super(ReportConfigAggregation, self).__init__(**kwargs)
self.name = name
self.function = function
class ReportConfigComparisonExpression(msrest.serialization.Model):
"""The comparison expression to be used in the report.
All required parameters must be populated in order to send to Azure.
:param name: Required. The name of the column to use in comparison.
:type name: str
:param operator: Required. The operator to use for comparison. Possible values include: "In",
"Contains".
:type operator: str or ~azure.mgmt.costmanagement.models.OperatorType
:param values: Required. Array of values to use for comparison.
:type values: list[str]
"""
_validation = {
'name': {'required': True},
'operator': {'required': True},
'values': {'required': True, 'min_items': 1},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'operator': {'key': 'operator', 'type': 'str'},
'values': {'key': 'values', 'type': '[str]'},
}
def __init__(
self,
*,
name: str,
operator: Union[str, "OperatorType"],
values: List[str],
**kwargs
):
super(ReportConfigComparisonExpression, self).__init__(**kwargs)
self.name = name
self.operator = operator
self.values = values
class ReportConfigDataset(msrest.serialization.Model):
"""The definition of data present in the report.
:param granularity: The granularity of rows in the report. Possible values include: "Daily",
"Monthly".
:type granularity: str or ~azure.mgmt.costmanagement.models.ReportGranularityType
:param configuration: Has configuration information for the data in the report. The
configuration will be ignored if aggregation and grouping are provided.
:type configuration: ~azure.mgmt.costmanagement.models.ReportConfigDatasetConfiguration
:param aggregation: Dictionary of aggregation expression to use in the report. The key of each
item in the dictionary is the alias for the aggregated column. Report can have up to 2
aggregation clauses.
:type aggregation: dict[str, ~azure.mgmt.costmanagement.models.ReportConfigAggregation]
:param grouping: Array of group by expression to use in the report. Report can have up to 2
group by clauses.
:type grouping: list[~azure.mgmt.costmanagement.models.ReportConfigGrouping]
:param sorting: Array of order by expression to use in the report.
:type sorting: list[~azure.mgmt.costmanagement.models.ReportConfigSorting]
:param filter: Has filter expression to use in the report.
:type filter: ~azure.mgmt.costmanagement.models.ReportConfigFilter
"""
_validation = {
'grouping': {'max_items': 2, 'min_items': 0},
}
_attribute_map = {
'granularity': {'key': 'granularity', 'type': 'str'},
'configuration': {'key': 'configuration', 'type': 'ReportConfigDatasetConfiguration'},
'aggregation': {'key': 'aggregation', 'type': '{ReportConfigAggregation}'},
'grouping': {'key': 'grouping', 'type': '[ReportConfigGrouping]'},
'sorting': {'key': 'sorting', 'type': '[ReportConfigSorting]'},
'filter': {'key': 'filter', 'type': 'ReportConfigFilter'},
}
def __init__(
self,
*,
granularity: Optional[Union[str, "ReportGranularityType"]] = None,
configuration: Optional["ReportConfigDatasetConfiguration"] = None,
aggregation: Optional[Dict[str, "ReportConfigAggregation"]] = None,
grouping: Optional[List["ReportConfigGrouping"]] = None,
sorting: Optional[List["ReportConfigSorting"]] = None,
filter: Optional["ReportConfigFilter"] = None,
**kwargs
):
super(ReportConfigDataset, self).__init__(**kwargs)
self.granularity = granularity
self.configuration = configuration
self.aggregation = aggregation
self.grouping = grouping
self.sorting = sorting
self.filter = filter
class ReportConfigDatasetAutoGenerated(msrest.serialization.Model):
"""The definition of data present in the report.
:param granularity: The granularity of rows in the report. Possible values include: "Daily",
"Monthly".
:type granularity: str or ~azure.mgmt.costmanagement.models.ReportGranularityType
:param configuration: Has configuration information for the data in the report. The
configuration will be ignored if aggregation and grouping are provided.
:type configuration: ~azure.mgmt.costmanagement.models.ReportConfigDatasetConfiguration
:param aggregation: Dictionary of aggregation expression to use in the report. The key of each
item in the dictionary is the alias for the aggregated column. Report can have up to 2
aggregation clauses.
:type aggregation: dict[str, ~azure.mgmt.costmanagement.models.ReportConfigAggregation]
:param grouping: Array of group by expression to use in the report. Report can have up to 2
group by clauses.
:type grouping: list[~azure.mgmt.costmanagement.models.ReportConfigGrouping]
:param sorting: Array of order by expression to use in the report.
:type sorting: list[~azure.mgmt.costmanagement.models.ReportConfigSorting]
:param filter: Has filter expression to use in the report.
:type filter: ~azure.mgmt.costmanagement.models.ReportConfigFilterAutoGenerated
"""
_validation = {
'grouping': {'max_items': 2, 'min_items': 0},
}
_attribute_map = {
'granularity': {'key': 'granularity', 'type': 'str'},
'configuration': {'key': 'configuration', 'type': 'ReportConfigDatasetConfiguration'},
'aggregation': {'key': 'aggregation', 'type': '{ReportConfigAggregation}'},
'grouping': {'key': 'grouping', 'type': '[ReportConfigGrouping]'},
'sorting': {'key': 'sorting', 'type': '[ReportConfigSorting]'},
'filter': {'key': 'filter', 'type': 'ReportConfigFilterAutoGenerated'},
}
def __init__(
self,
*,
granularity: Optional[Union[str, "ReportGranularityType"]] = None,
configuration: Optional["ReportConfigDatasetConfiguration"] = None,
aggregation: Optional[Dict[str, "ReportConfigAggregation"]] = None,
grouping: Optional[List["ReportConfigGrouping"]] = None,
sorting: Optional[List["ReportConfigSorting"]] = None,
filter: Optional["ReportConfigFilterAutoGenerated"] = None,
**kwargs
):
super(ReportConfigDatasetAutoGenerated, self).__init__(**kwargs)
self.granularity = granularity
self.configuration = configuration
self.aggregation = aggregation
self.grouping = grouping
self.sorting = sorting
self.filter = filter
class ReportConfigDatasetConfiguration(msrest.serialization.Model):
"""The configuration of dataset in the report.
:param columns: Array of column names to be included in the report. Any valid report column
name is allowed. If not provided, then report includes all columns.
:type columns: list[str]
"""
_attribute_map = {
'columns': {'key': 'columns', 'type': '[str]'},
}
def __init__(
self,
*,
columns: Optional[List[str]] = None,
**kwargs
):
super(ReportConfigDatasetConfiguration, self).__init__(**kwargs)
self.columns = columns
class ReportConfigDefinition(msrest.serialization.Model):
"""The definition of a report config.
All required parameters must be populated in order to send to Azure.
:param type: Required. The type of the report. Usage represents actual usage, forecast
represents forecasted data and UsageAndForecast represents both usage and forecasted data.
Actual usage and forecasted data can be differentiated based on dates. Possible values include:
"Usage".
:type type: str or ~azure.mgmt.costmanagement.models.ReportType
:param timeframe: Required. The time frame for pulling data for the report. If custom, then a
specific time period must be provided. Possible values include: "WeekToDate", "MonthToDate",
"YearToDate", "Custom".
:type timeframe: str or ~azure.mgmt.costmanagement.models.ReportTimeframeType
:param time_period: Has time period for pulling data for the report.
:type time_period: ~azure.mgmt.costmanagement.models.ReportConfigTimePeriod
:param dataset: Has definition for data in this report config.
:type dataset: ~azure.mgmt.costmanagement.models.ReportConfigDatasetAutoGenerated
"""
_validation = {
'type': {'required': True},
'timeframe': {'required': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'timeframe': {'key': 'timeframe', 'type': 'str'},
'time_period': {'key': 'timePeriod', 'type': 'ReportConfigTimePeriod'},
'dataset': {'key': 'dataset', 'type': 'ReportConfigDatasetAutoGenerated'},
}
def __init__(
self,
*,
type: Union[str, "ReportType"],
timeframe: Union[str, "ReportTimeframeType"],
time_period: Optional["ReportConfigTimePeriod"] = None,
dataset: Optional["ReportConfigDatasetAutoGenerated"] = None,
**kwargs
):
super(ReportConfigDefinition, self).__init__(**kwargs)
self.type = type
self.timeframe = timeframe
self.time_period = time_period
self.dataset = dataset
class ReportConfigFilter(msrest.serialization.Model):
"""The filter expression to be used in the report.
:param and_property: The logical "AND" expression. Must have at least 2 items.
:type and_property: list[~azure.mgmt.costmanagement.models.ReportConfigFilter]
:param or_property: The logical "OR" expression. Must have at least 2 items.
:type or_property: list[~azure.mgmt.costmanagement.models.ReportConfigFilter]
:param not_property: The logical "NOT" expression.
:type not_property: ~azure.mgmt.costmanagement.models.ReportConfigFilter
:param dimension: Has comparison expression for a dimension.
:type dimension: ~azure.mgmt.costmanagement.models.ReportConfigComparisonExpression
:param tag: Has comparison expression for a tag.
:type tag: ~azure.mgmt.costmanagement.models.ReportConfigComparisonExpression
"""
_validation = {
'and_property': {'min_items': 2},
'or_property': {'min_items': 2},
}
_attribute_map = {
'and_property': {'key': 'and', 'type': '[ReportConfigFilter]'},
'or_property': {'key': 'or', 'type': '[ReportConfigFilter]'},
'not_property': {'key': 'not', 'type': 'ReportConfigFilter'},
'dimension': {'key': 'dimension', 'type': 'ReportConfigComparisonExpression'},
'tag': {'key': 'tag', 'type': 'ReportConfigComparisonExpression'},
}
def __init__(
self,
*,
and_property: Optional[List["ReportConfigFilter"]] = None,
or_property: Optional[List["ReportConfigFilter"]] = None,
not_property: Optional["ReportConfigFilter"] = None,
dimension: Optional["ReportConfigComparisonExpression"] = None,
tag: Optional["ReportConfigComparisonExpression"] = None,
**kwargs
):
super(ReportConfigFilter, self).__init__(**kwargs)
self.and_property = and_property
self.or_property = or_property
self.not_property = not_property
self.dimension = dimension
self.tag = tag
class ReportConfigFilterAutoGenerated(msrest.serialization.Model):
"""The filter expression to be used in the report.
:param and_property: The logical "AND" expression. Must have at least 2 items.
:type and_property: list[~azure.mgmt.costmanagement.models.ReportConfigFilterAutoGenerated]
:param or_property: The logical "OR" expression. Must have at least 2 items.
:type or_property: list[~azure.mgmt.costmanagement.models.ReportConfigFilterAutoGenerated]
:param not_property: The logical "NOT" expression.
:type not_property: ~azure.mgmt.costmanagement.models.ReportConfigFilterAutoGenerated
:param dimension: Has comparison expression for a dimension.
:type dimension: ~azure.mgmt.costmanagement.models.ReportConfigComparisonExpression
:param tag: Has comparison expression for a tag.
:type tag: ~azure.mgmt.costmanagement.models.ReportConfigComparisonExpression
"""
_validation = {
'and_property': {'min_items': 2},
'or_property': {'min_items': 2},
}
_attribute_map = {
'and_property': {'key': 'and', 'type': '[ReportConfigFilterAutoGenerated]'},
'or_property': {'key': 'or', 'type': '[ReportConfigFilterAutoGenerated]'},
'not_property': {'key': 'not', 'type': 'ReportConfigFilterAutoGenerated'},
'dimension': {'key': 'dimension', 'type': 'ReportConfigComparisonExpression'},
'tag': {'key': 'tag', 'type': 'ReportConfigComparisonExpression'},
}
def __init__(
self,
*,
and_property: Optional[List["ReportConfigFilterAutoGenerated"]] = None,
or_property: Optional[List["ReportConfigFilterAutoGenerated"]] = None,
not_property: Optional["ReportConfigFilterAutoGenerated"] = None,
dimension: Optional["ReportConfigComparisonExpression"] = None,
tag: Optional["ReportConfigComparisonExpression"] = None,
**kwargs
):
super(ReportConfigFilterAutoGenerated, self).__init__(**kwargs)
self.and_property = and_property
self.or_property = or_property
self.not_property = not_property
self.dimension = dimension
self.tag = tag
class ReportConfigGrouping(msrest.serialization.Model):
"""The group by expression to be used in the report.
All required parameters must be populated in order to send to Azure.
:param type: Required. Has type of the column to group. Possible values include: "Tag",
"Dimension".
:type type: str or ~azure.mgmt.costmanagement.models.ReportConfigColumnType
:param name: Required. The name of the column to group. This version supports subscription
lowest possible grain.
:type name: str
"""
_validation = {
'type': {'required': True},
'name': {'required': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
}
def __init__(
self,
*,
type: Union[str, "ReportConfigColumnType"],
name: str,
**kwargs
):
super(ReportConfigGrouping, self).__init__(**kwargs)
self.type = type
self.name = name
class ReportConfigSorting(msrest.serialization.Model):
"""The order by expression to be used in the report.
All required parameters must be populated in order to send to Azure.
:param direction: Direction of sort. Possible values include: "Ascending", "Descending".
:type direction: str or ~azure.mgmt.costmanagement.models.ReportConfigSortingDirection
:param name: Required. The name of the column to sort.
:type name: str
"""
_validation = {
'name': {'required': True},
}
_attribute_map = {
'direction': {'key': 'direction', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
}
def __init__(
self,
*,
name: str,
direction: Optional[Union[str, "ReportConfigSortingDirection"]] = None,
**kwargs
):
super(ReportConfigSorting, self).__init__(**kwargs)
self.direction = direction
self.name = name
class ReportConfigTimePeriod(msrest.serialization.Model):
"""The start and end date for pulling data for the report.
All required parameters must be populated in order to send to Azure.
:param from_property: Required. The start date to pull data from.
:type from_property: ~datetime.datetime
:param to: Required. The end date to pull data to.
:type to: ~datetime.datetime
"""
_validation = {
'from_property': {'required': True},
'to': {'required': True},
}
_attribute_map = {
'from_property': {'key': 'from', 'type': 'iso-8601'},
'to': {'key': 'to', 'type': 'iso-8601'},
}
def __init__(
self,
*,
from_property: datetime.datetime,
to: datetime.datetime,
**kwargs
):
super(ReportConfigTimePeriod, self).__init__(**kwargs)
self.from_property = from_property
self.to = to
class View(ProxyResource):
"""States and configurations of Cost Analysis.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param e_tag: eTag of the resource. To handle concurrent update scenario, this field will be
used to determine whether the user is updating the latest version or not.
:type e_tag: str
:param display_name: User input name of the view. Required.
:type display_name: str
:param scope: Cost Management scope to save the view on. This includes
'subscriptions/{subscriptionId}' for subscription scope,
'subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}' for resourceGroup scope,
'providers/Microsoft.Billing/billingAccounts/{billingAccountId}' for Billing Account scope,
'providers/Microsoft.Billing/billingAccounts/{billingAccountId}/departments/{departmentId}' for
Department scope,
'providers/Microsoft.Billing/billingAccounts/{billingAccountId}/enrollmentAccounts/{enrollmentAccountId}'
for EnrollmentAccount scope,
'providers/Microsoft.Billing/billingAccounts/{billingAccountId}/billingProfiles/{billingProfileId}'
for BillingProfile scope,
'providers/Microsoft.Billing/billingAccounts/{billingAccountId}/invoiceSections/{invoiceSectionId}'
for InvoiceSection scope, 'providers/Microsoft.Management/managementGroups/{managementGroupId}'
for Management Group scope,
'/providers/Microsoft.CostManagement/externalBillingAccounts/{externalBillingAccountName}' for
ExternalBillingAccount scope, and
'/providers/Microsoft.CostManagement/externalSubscriptions/{externalSubscriptionName}' for
ExternalSubscription scope.
:type scope: str
:ivar created_on: Date the user created this view.
:vartype created_on: ~datetime.datetime
:ivar modified_on: Date when the user last modified this view.
:vartype modified_on: ~datetime.datetime
:param chart: Chart type of the main view in Cost Analysis. Required. Possible values include:
"Area", "Line", "StackedColumn", "GroupedColumn", "Table".
:type chart: str or ~azure.mgmt.costmanagement.models.ChartType
:param accumulated: Show costs accumulated over time. Possible values include: "true", "false".
:type accumulated: str or ~azure.mgmt.costmanagement.models.AccumulatedType
:param metric: Metric to use when displaying costs. Possible values include: "ActualCost",
"AmortizedCost", "AHUB".
:type metric: str or ~azure.mgmt.costmanagement.models.MetricType
:param kpis: List of KPIs to show in Cost Analysis UI.
:type kpis: list[~azure.mgmt.costmanagement.models.KpiProperties]
:param pivots: Configuration of 3 sub-views in the Cost Analysis UI.
:type pivots: list[~azure.mgmt.costmanagement.models.PivotProperties]
:param type_properties_query_type: The type of the report. Usage represents actual usage,
forecast represents forecasted data and UsageAndForecast represents both usage and forecasted
data. Actual usage and forecasted data can be differentiated based on dates. Possible values
include: "Usage".
:type type_properties_query_type: str or ~azure.mgmt.costmanagement.models.ReportType
:param timeframe: The time frame for pulling data for the report. If custom, then a specific
time period must be provided. Possible values include: "WeekToDate", "MonthToDate",
"YearToDate", "Custom".
:type timeframe: str or ~azure.mgmt.costmanagement.models.ReportTimeframeType
:param time_period: Has time period for pulling data for the report.
:type time_period: ~azure.mgmt.costmanagement.models.ReportConfigTimePeriod
:param dataset: Has definition for data in this report config.
:type dataset: ~azure.mgmt.costmanagement.models.ReportConfigDataset
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'created_on': {'readonly': True},
'modified_on': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'e_tag': {'key': 'eTag', 'type': 'str'},
'display_name': {'key': 'properties.displayName', 'type': 'str'},
'scope': {'key': 'properties.scope', 'type': 'str'},
'created_on': {'key': 'properties.createdOn', 'type': 'iso-8601'},
'modified_on': {'key': 'properties.modifiedOn', 'type': 'iso-8601'},
'chart': {'key': 'properties.chart', 'type': 'str'},
'accumulated': {'key': 'properties.accumulated', 'type': 'str'},
'metric': {'key': 'properties.metric', 'type': 'str'},
'kpis': {'key': 'properties.kpis', 'type': '[KpiProperties]'},
'pivots': {'key': 'properties.pivots', 'type': '[PivotProperties]'},
'type_properties_query_type': {'key': 'properties.query.type', 'type': 'str'},
'timeframe': {'key': 'properties.query.timeframe', 'type': 'str'},
'time_period': {'key': 'properties.query.timePeriod', 'type': 'ReportConfigTimePeriod'},
'dataset': {'key': 'properties.query.dataset', 'type': 'ReportConfigDataset'},
}
def __init__(
self,
*,
e_tag: Optional[str] = None,
display_name: Optional[str] = None,
scope: Optional[str] = None,
chart: Optional[Union[str, "ChartType"]] = None,
accumulated: Optional[Union[str, "AccumulatedType"]] = None,
metric: Optional[Union[str, "MetricType"]] = None,
kpis: Optional[List["KpiProperties"]] = None,
pivots: Optional[List["PivotProperties"]] = None,
type_properties_query_type: Optional[Union[str, "ReportType"]] = None,
timeframe: Optional[Union[str, "ReportTimeframeType"]] = None,
time_period: Optional["ReportConfigTimePeriod"] = None,
dataset: Optional["ReportConfigDataset"] = None,
**kwargs
):
super(View, self).__init__(e_tag=e_tag, **kwargs)
self.display_name = display_name
self.scope = scope
self.created_on = None
self.modified_on = None
self.chart = chart
self.accumulated = accumulated
self.metric = metric
self.kpis = kpis
self.pivots = pivots
self.type_properties_query_type = type_properties_query_type
self.timeframe = timeframe
self.time_period = time_period
self.dataset = dataset
class ViewListResult(msrest.serialization.Model):
"""Result of listing views. It contains a list of available views.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: The list of views.
:vartype value: list[~azure.mgmt.costmanagement.models.View]
:ivar next_link: The link (url) to the next page of results.
:vartype next_link: str
"""
_validation = {
'value': {'readonly': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[View]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ViewListResult, self).__init__(**kwargs)
self.value = None
self.next_link = None
| 1.90625 | 2 |
test/test_simple_compression.py | jayvdb/brotlipy | 0 | 3735 | <reponame>jayvdb/brotlipy
# -*- coding: utf-8 -*-
"""
test_simple_compression
~~~~~~~~~~~~~~~~~~~~~~~~~
Tests for compression of single chunks.
"""
import brotli
import pytest
from hypothesis import given
from hypothesis.strategies import binary, integers, sampled_from, one_of
def test_roundtrip_compression_with_files(simple_compressed_file):
"""
Roundtripping data through the compressor works correctly.
"""
with open(simple_compressed_file[0], 'rb') as f:
uncompressed_data = f.read()
assert brotli.decompress(
brotli.compress(uncompressed_data)
) == uncompressed_data
@given(
chunk_size=integers(min_value=1, max_value=2**12),
mode=sampled_from(list(brotli.BrotliEncoderMode)),
quality=integers(min_value=0, max_value=11),
lgwin=integers(min_value=10, max_value=24),
lgblock=one_of(
integers(min_value=0, max_value=0),
integers(min_value=16, max_value=24)
),
)
def test_streaming_compression(one_compressed_file,
chunk_size,
mode,
quality,
lgwin,
lgblock):
"""
Confirm that the streaming compressor works as expected.
"""
compressed_chunks = []
c = brotli.Compressor(
mode=mode, quality=quality, lgwin=lgwin, lgblock=lgblock
)
with open(one_compressed_file, 'rb') as f:
while True:
next_data = f.read(chunk_size)
if not next_data:
break
compressed_chunks.append(c.compress(next_data))
compressed_chunks.append(c.finish())
decompressed = brotli.decompress(b''.join(compressed_chunks))
with open(one_compressed_file, 'rb') as f:
assert decompressed == f.read()
@given(
chunk_size=integers(min_value=1, max_value=2**12),
mode=sampled_from(list(brotli.BrotliEncoderMode)),
quality=integers(min_value=0, max_value=11),
lgwin=integers(min_value=10, max_value=24),
lgblock=one_of(
integers(min_value=0, max_value=0),
integers(min_value=16, max_value=24)
),
)
def test_streaming_compression_flush(one_compressed_file,
chunk_size,
mode,
quality,
lgwin,
lgblock):
"""
Confirm that the streaming compressor works as expected, including flushes
after each chunk.
"""
compressed_chunks = []
c = brotli.Compressor(
mode=mode, quality=quality, lgwin=lgwin, lgblock=lgblock
)
with open(one_compressed_file, 'rb') as f:
while True:
next_data = f.read(chunk_size)
if not next_data:
break
compressed_chunks.append(c.compress(next_data))
compressed_chunks.append(c.flush())
compressed_chunks.append(c.finish())
decompressed = brotli.decompress(b''.join(compressed_chunks))
with open(one_compressed_file, 'rb') as f:
assert decompressed == f.read()
@given(binary())
def test_compressed_data_roundtrips(s):
assert brotli.decompress(brotli.compress(s)) == s
@given(binary(), binary())
def test_compressed_data_with_dictionaries(s, dictionary):
d = brotli.Decompressor(dictionary)
compressed = brotli.compress(s, dictionary=dictionary)
uncompressed = d.decompress(compressed)
assert uncompressed == s
@pytest.mark.parametrize(
"params",
[
{"mode": 52},
{"quality": 52},
{"lgwin": 52},
{"lgblock": 52},
]
)
@pytest.mark.parametrize("exception_cls", [brotli.Error, brotli.error])
def test_bad_compressor_parameters(params, exception_cls):
with pytest.raises(exception_cls):
brotli.Compressor(**params)
| 2.234375 | 2 |
wexapi/models/ticker.py | madmis/wexapi | 3 | 3736 | <filename>wexapi/models/ticker.py
from decimal import Decimal
class Ticker(object):
def __init__(
self,
high: float,
low: float,
avg: float,
vol: float,
vol_cur: int,
last: float,
buy: float,
sell: float,
updated: int,
):
self.high = high
self.low = low
self.avg = avg
self.vol = vol
self.vol_cur = vol_cur
self.last = last
self.buy = buy
self.sell = sell
self.updated = updated
@property
def high(self) -> Decimal:
return self._high
@high.setter
def high(self, value: float):
self._high = Decimal(value)
@property
def low(self) -> Decimal:
return self._low
@low.setter
def low(self, value: float):
self._low = Decimal(value)
@property
def avg(self) -> Decimal:
return self._avg
@avg.setter
def avg(self, value: float):
self._avg = Decimal(value)
@property
def vol(self) -> Decimal:
return self._vol
@vol.setter
def vol(self, value: float):
self._vol = Decimal(value)
@property
def vol_cur(self) -> Decimal:
return self._vol_cur
@vol_cur.setter
def vol_cur(self, value: float):
self._vol_cur = Decimal(value)
@property
def last(self) -> Decimal:
return self._last
@last.setter
def last(self, value: float):
self._last = Decimal(value)
@property
def buy(self) -> Decimal:
return self._buy
@buy.setter
def buy(self, value: float):
self._buy = Decimal(value)
@property
def sell(self) -> Decimal:
return self._sell
@sell.setter
def sell(self, value: float):
self._sell = Decimal(value)
@property
def updated(self) -> int:
return self._updated
@updated.setter
def updated(self, value: int):
self._updated = int(value)
| 2.890625 | 3 |
hard-gists/98bb452dc14e8c40e403/snippet.py | jjhenkel/dockerizeme | 21 | 3737 | <filename>hard-gists/98bb452dc14e8c40e403/snippet.py
from scryptos import *
p1 = 32581479300404876772405716877547
p2 = 27038194053540661979045656526063
p3 = 26440615366395242196516853423447
n = p1*p2*p3
e = 3
c = int(open("flag.enc", "rb").read().encode("hex"), 16)
# from User's Guide to PARI/GP, nth_root function
sqrtnall = 'sqrtnall(x,n)={my(V,r,z,r2);r=sqrtn(x,n,&z);if(!z,error("Impossible case in sqrtn"));if(type(x)=="t_INTMOD"||type(x)=="t_PADIC",r2 = r*z;n=1;while(r2!=r,r2*=z;n++));V=vector(n);V[1]=r;for(i=2,n,V[i]=V[i-1]*z);V}'
c1 = eval(parigp([sqrtnall, "Vec(liftall(sqrtnall(Mod(%d, %d), 3)))" % (c, p1)]))
c2 = eval(parigp([sqrtnall, "Vec(liftall(sqrtnall(Mod(%d, %d), 3)))" % (c, p2)]))
c3 = eval(parigp([sqrtnall, "Vec(liftall(sqrtnall(Mod(%d, %d), 3)))" % (c, p3)]))
"""
c1 = [6149264605288583791069539134541, 13404203109409336045283549715377, 13028011585706956936052628027629]
c2 = [19616973567618515464515107624812]
c3 = [13374868592866626517389128266735, 7379361747422713811654086477766, 5686385026105901867473638678946]
"""
for x in c1:
for y in c2:
for z in c3:
crt = chinese_remainder_theorem([(x, p1), (y, p2), (z, p3)])
d = hex(crt, 2)[2:].decode("hex")
if "0ctf" in d:
print d[d.find("0ctf"):].strip()
| 1.984375 | 2 |
musa/migrations/0001_initial.py | ccsreenidhin/Music-Web-Django | 0 | 3738 | <gh_stars>0
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-03-29 06:43
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import musa.models
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='MusicCollection',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(blank=True, max_length=70, null=True)),
('document', models.FileField(upload_to=musa.models.get_upload_path)),
('uploaded_at', models.DateTimeField(auto_now_add=True, null=True)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='UserProfile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('fullname', models.CharField(blank=True, max_length=70)),
('favourite_music', models.CharField(blank=True, max_length=70)),
('about', models.TextField(blank=True, max_length=300)),
('picture', models.ImageField(default='/profile_images/avatar.jpeg', upload_to='profile_images')),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| 1.664063 | 2 |
nuitka/codegen/LoopCodes.py | RESP3CT88/Nuitka | 1 | 3739 | # Copyright 2021, <NAME>, mailto:<EMAIL>
#
# Part of "Nuitka", an optimizing Python compiler that is compatible and
# integrates with CPython, but also works on its own.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Loop codes.
Code generation for loops, breaking them, or continuing them. In Nuitka, there
are no for-loops or while-loops at this point. They have been re-formulated in
a simpler loop without a condition, and statements there-in that break under
certain conditions.
See Developer Manual for how the CPython loops are mapped to these nodes.
"""
from .CodeHelpers import generateStatementSequenceCode
from .ErrorCodes import getErrorExitBoolCode
from .ExceptionCodes import getExceptionUnpublishedReleaseCode
from .LabelCodes import getGotoCode, getLabelCode
def generateLoopBreakCode(statement, emit, context):
# Functions used for generation all accept statement, but this one does
# not use it. pylint: disable=unused-argument
getExceptionUnpublishedReleaseCode(emit, context)
break_target = context.getLoopBreakTarget()
getGotoCode(break_target, emit)
def generateLoopContinueCode(statement, emit, context):
# Functions used for generation all accept statement, but this one does
# not use it. pylint: disable=unused-argument
getExceptionUnpublishedReleaseCode(emit, context)
continue_target = context.getLoopContinueTarget()
getGotoCode(continue_target, emit)
def generateLoopCode(statement, emit, context):
loop_start_label = context.allocateLabel("loop_start")
if not statement.isStatementAborting():
loop_end_label = context.allocateLabel("loop_end")
else:
loop_end_label = None
getLabelCode(loop_start_label, emit)
old_loop_break = context.setLoopBreakTarget(loop_end_label)
old_loop_continue = context.setLoopContinueTarget(loop_start_label)
generateStatementSequenceCode(
statement_sequence=statement.subnode_loop_body,
allow_none=True,
emit=emit,
context=context,
)
context.setLoopBreakTarget(old_loop_break)
context.setLoopContinueTarget(old_loop_continue)
# Note: We are using the wrong line here, but it's an exception, it's unclear what line it would be anyway.
old_source_ref = context.setCurrentSourceCodeReference(
statement.getSourceReference()
)
getErrorExitBoolCode(
condition="CONSIDER_THREADING() == false", emit=emit, context=context
)
context.setCurrentSourceCodeReference(old_source_ref)
getGotoCode(loop_start_label, emit)
if loop_end_label is not None:
getLabelCode(loop_end_label, emit)
| 2.015625 | 2 |
3_module/C_BloomFilter.py | L4mborg1n1-D14610/Algoritms_and_DataStructure | 0 | 3740 | import math
from sys import exit
# итак, n - приблизительное число элементов в массиве, P - вероятность ложноположительного ответа, тогда размер
# структуры m = -(nlog2P) / ln2 (2 - основание), количество хеш-функций будет равно -log2P
# хеш-функции используются вида: (((i + 1)*x + p(i+1)) mod M) mod m,где - x - ключ, i - номер хэш-функции,
# pi - i-тое по счету простое число, а M - 31ое число Мерсенна, M = 2^31 - 1, M = 2 147 483 647, M - простое число.
# При подсчёте хеш-функций необходимо знать первые k простых чисел. Посчитаем их один раз в конструкторе BloomFilter
# и будем хранить в структуре данных.
# Также нам необходимо создать битовый массив размера m, однако по умолчанию в питоне битовый массив отсутствует,
# поэтому будем использовать байтовый массив. Реализуем для удобства отдельную СД, из методов необходимо: изменить
# указанный бит на 1, проверить является ли указанный бит 1 и напечатать (вернуть) сам массив
Mersen_31 = 2147483647
class BitArray:
def __init__(self, size):
self.__array = bytearray(int(math.ceil(size / 8)))
self.__size = size
def add_bit(self, i):
# i-тый бит содержится в i//8 байте на i % 8 месте
self.__array[i // 8] |= 2 ** (7 - (i % 8))
def check_bit(self, i):
if (self.__array[i // 8] & (2 ** (7 - (i % 8)))) == 0:
return False
else:
return True
def print(self):
array_str = ""
for byte in self.__array:
_line = str(bin(byte))[2:]
if len(_line) != 8:
_line = '0' * (8 - len(_line)) + _line
array_str += _line
return array_str[:self.__size]
class BloomFilter:
def __init__(self, n: int, p: float):
self.size = int(-round(n * math.log2(p) / math.log(2)))
self.hash_numbers = int(-round(math.log2(p)))
self.__prime_numbers = list()
self.__get_prime(self.hash_numbers + 1)
self.__bitarray = BitArray(self.size)
def __get_prime(self, prime_size):
# обычный проход по всем числам и их проверка на простоту - сложно по времени
# немного упростим: во-первых будем идти с интервалом 2, начиная от 3, а после новое число проверять на
# делимость на уже найденные простые числа (кроме двойки, мы же рассматриваем нечётные)
if prime_size == 1:
self.__prime_numbers.append(2)
return
self.__prime_numbers.append(2)
i = 3
while len(self.__prime_numbers) < prime_size:
j = 1
prime_flag = True
while j < len(self.__prime_numbers):
if (i % self.__prime_numbers[j]) == 0:
prime_flag = False
break
j += 1
if prime_flag:
self.__prime_numbers.append(i)
i += 2
def __get_hash(self, x, i):
return (((i + 1) * x + self.__prime_numbers[i]) % Mersen_31) % self.size
def add(self, key: int):
i = 0
while i < self.hash_numbers:
self.__bitarray.add_bit(self.__get_hash(key, i))
i += 1
def search(self, key: int):
i = 0
while i < self.hash_numbers:
if not self.__bitarray.check_bit(self.__get_hash(key, i)):
return False
i += 1
return True
def print(self):
return self.__bitarray.print()
bloom_filter = 0
while True:
try:
line = input().split()
if len(line) == 0:
continue
else:
if line[0] == "set":
try:
elements_number = int(line[1])
probability = float(line[2])
if (elements_number <= 0) | (probability <= 0) | (probability >= 1):
print("error")
continue
bloom_filter = BloomFilter(elements_number, probability)
if (bloom_filter.size == 0) | (bloom_filter.hash_numbers == 0):
print("error")
continue
break
except TypeError:
print("error")
continue
else:
print("error")
continue
except EOFError:
exit()
print(bloom_filter.size, bloom_filter.hash_numbers)
while True:
try:
line = input().split()
if len(line) == 0:
continue
elif line[0] == "print":
print(bloom_filter.print())
elif (line[0] == "add") & (line[1].isnumeric()):
bloom_filter.add(int(line[1]))
elif (line[0] == "search") & (line[1].isnumeric()):
print(int(bloom_filter.search(int(line[1]))))
else:
print("error")
except EOFError:
break
| 2.40625 | 2 |
pyzmq/examples/pubsub/subscriber.py | Surfndez/source-publish | 0 | 3741 | """A test that subscribes to NumPy arrays.
Uses REQ/REP (on PUB/SUB socket + 1) to synchronize
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2010 <NAME>
#
# Distributed under the terms of the New BSD License. The full license is in
# the file COPYING.BSD, distributed as part of this software.
#-----------------------------------------------------------------------------
import sys
import time
import zmq
import numpy
def sync(connect_to):
# use connect socket + 1
sync_with = ':'.join(connect_to.split(':')[:-1] +
[str(int(connect_to.split(':')[-1]) + 1)]
)
ctx = zmq.Context.instance()
s = ctx.socket(zmq.REQ)
s.connect(sync_with)
s.send('READY')
s.recv()
def main():
if len (sys.argv) != 3:
print 'usage: subscriber <connect_to> <array-count>'
sys.exit (1)
try:
connect_to = sys.argv[1]
array_count = int (sys.argv[2])
except (ValueError, OverflowError), e:
print 'array-count must be integers'
sys.exit (1)
ctx = zmq.Context()
s = ctx.socket(zmq.SUB)
s.connect(connect_to)
s.setsockopt(zmq.SUBSCRIBE,'')
sync(connect_to)
start = time.clock()
print "Receiving arrays..."
for i in range(array_count):
a = s.recv_pyobj()
print " Done."
end = time.clock()
elapsed = (end - start) * 1000000
if elapsed == 0:
elapsed = 1
throughput = (1000000.0 * float (array_count)) / float (elapsed)
message_size = a.nbytes
megabits = float (throughput * message_size * 8) / 1000000
print "message size: %.0f [B]" % (message_size, )
print "array count: %.0f" % (array_count, )
print "mean throughput: %.0f [msg/s]" % (throughput, )
print "mean throughput: %.3f [Mb/s]" % (megabits, )
time.sleep(1.0)
if __name__ == "__main__":
main()
| 2.625 | 3 |
Doc/includes/sqlite3/load_extension.py | livioso/cpython | 36 | 3742 | import sqlite3
con = sqlite3.connect(":memory:")
# enable extension loading
con.enable_load_extension(True)
# Load the fulltext search extension
con.execute("select load_extension('./fts3.so')")
# alternatively you can load the extension using an API call:
# con.load_extension("./fts3.so")
# disable extension loading again
con.enable_load_extension(False)
# example from SQLite wiki
con.execute("create virtual table recipe using fts3(name, ingredients)")
con.executescript("""
insert into recipe (name, ingredients) values ('broccoli stew', 'broccoli peppers cheese tomatoes');
insert into recipe (name, ingredients) values ('pumpkin stew', 'pumpkin onions garlic celery');
insert into recipe (name, ingredients) values ('broccoli pie', 'broccoli cheese onions flour');
insert into recipe (name, ingredients) values ('pumpkin pie', 'pumpkin sugar flour butter');
""")
for row in con.execute("select rowid, name, ingredients from recipe where name match 'pie'"):
print(row)
| 3.015625 | 3 |
lingvo/core/inference_graph_exporter.py | RunzheYang/lingvo | 1 | 3743 | # Lint as: python3
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utility for exporting an InferenceGraph proto from model params."""
import collections
import contextlib
import re
import lingvo.compat as tf
from lingvo.core import base_model
from lingvo.core import bfloat16_variables
from lingvo.core import inference_graph_pb2
from lingvo.core import py_utils
import six
from google.protobuf import text_format
FLAGS = tf.flags.FLAGS
# InferenceDeviceOptions contains options to configure inference on the device.
# device: Device to infer on.
# retain_device_placement: If true, the specified device in the generated
# inference graph nodes will be retained. Otherwise, the specified device
# will be cleared, so that the runtime can choose automatically.
# var_options: Options on handling variables. For TPUs, variables can be
# either placed on device through 'ON_DEVICE' option, or treated as
# constants with AS_CONSTANTS.
# gen_init_op: Whether to serialize initialization ops for the device. For TPUs,
# servers can be initialized globally once, in which case this should be
# turned off to avoid tripping initialization checks.
# dtype_override: Whether to override the dtype to use for activations and
# weights in the model. Options supported are None or tf.bfloat16.
InferenceDeviceOptions = collections.namedtuple('InferenceDeviceOptions', [
'device', 'retain_device_placement', 'var_options', 'gen_init_op',
'dtype_override', 'fprop_dtype_override'
])
_CONST_GUARANTEE = None
@contextlib.contextmanager
def NoConstGuaranteeScope():
"""Disallow const gauranteeing variable with-in scope."""
global _CONST_GUARANTEE
var_scope = tf.get_variable_scope()
old_caching_device = var_scope.caching_device
old_val = _CONST_GUARANTEE
var_scope.set_caching_device(None)
_CONST_GUARANTEE = False
yield
_CONST_GUARANTEE = old_val
var_scope.set_caching_device(old_caching_device)
# Marks variable as constants for compilation
def MaybeGuaranteeConstGetter(getter, name, *args, **kwargs):
global _CONST_GUARANTEE
if _CONST_GUARANTEE:
with tf.control_dependencies(None):
return tf.guarantee_const(
getter(name, *args, **kwargs), name=name + '/GuaranteeConst')
else:
return getter(name, *args, **kwargs)
@contextlib.contextmanager
def ConstGuaranteeScope():
"""Treats all variables under this scope as constants."""
global _CONST_GUARANTEE
var_scope = tf.get_variable_scope()
old_custom_getter = var_scope.custom_getter
old_caching_device = var_scope.caching_device
old_val = _CONST_GUARANTEE
var_scope.set_custom_getter(MaybeGuaranteeConstGetter)
var_scope.set_caching_device(lambda op: op.device)
_CONST_GUARANTEE = True
yield
_CONST_GUARANTEE = old_val
var_scope.set_custom_getter(old_custom_getter)
var_scope.set_caching_device(old_caching_device)
@contextlib.contextmanager
def _DummyScope():
yield None
def _GetVarName(v):
return v.name[:-len(':0')]
def _MakeVariableDictionary(variables):
"""Returns a dictionary with name -> tf.Variable() mapping."""
vars_dict = {}
for v in variables:
vars_dict[_GetVarName(v)] = v
return vars_dict
def IsTpu(device_options):
return device_options.device == 'tpu'
def ShouldForceBfloat16ForWeightsAndActivations(device_options):
return device_options.dtype_override == tf.bfloat16
def ShouldForceBfloat16ForActivations(device_options):
return device_options.fprop_dtype_override == tf.bfloat16
def ConvertSubgraphDictToProto(subgraphs_dict):
"""Converts dict of subgraphs/feeds/fetches to InferenceGraph.
Args:
subgraphs_dict: Dict of (fetches, feeds) where each fetches/feeds is a
NestedMap.
Returns:
Equivalent InferenceGraph.
"""
# Build the output inference graph.
inference_graph_proto = inference_graph_pb2.InferenceGraph()
for subgraph_name, tensors in subgraphs_dict.items():
fetches = tensors[0]
feeds = tensors[1]
# Rewrite fetches and feeds to map to their tensor name instead of
# Tensor instance.
named_fetches = {k: v.name for k, v in fetches.items() if v is not None}
named_feeds = {k: v.name for k, v in feeds.items() if v is not None}
# Export as subgraph.
inference_graph_proto.subgraphs[subgraph_name].fetches.update(named_fetches)
inference_graph_proto.subgraphs[subgraph_name].feeds.update(named_feeds)
return inference_graph_proto
def GetOutputOpNames(graph,
inference_graph_proto,
subgraphs=None,
preserve_colocation_nodes=True,
preserve_saver_restore_nodes=False,
preserve_extra_ops=None):
"""Gets output op names from an inference graph.
Args:
graph: The tf graph.
inference_graph_proto: an InferenceGraph proto.
subgraphs: an optional list of subgraph names. If provided, only output ops
from these subgraphs are preserved. Otherwise, all subgraphs are included.
preserve_colocation_nodes: a Python bool, default to True. Preserves nodes
colocating with the closure of output ops in the returned array.
preserve_saver_restore_nodes: a Python bool, default to False. Preserves
nodes for restoring according to inference_graph_proto.saver_def.
preserve_extra_ops: an optional list of extra op names to preserve as long
as they present in the graph.
Returns:
Array of tf op names that should be preserved in the graph.
"""
output_op_names = set()
def _GetOpName(tensor_or_op_name):
"""Returns the op name of the given node name."""
# Tensor names have format <op_name>:<output_index>. Some inference
# graphs put tensors and others put ops in the feeds/fetches (depends
# on how it is used). We differentiate here. We still do the lookup in
# the graph to sanity check (versus relying on the text manipulation).
# If this logic ever breaks, TensorFlow will raise a ValueError with
# a description of the syntax of each.
if re.search(r':[0-9]+$', tensor_or_op_name):
# Tensor-name.
t = graph.get_tensor_by_name(tensor_or_op_name)
return t.op.name
else:
op = graph.get_operation_by_name(tensor_or_op_name)
return op.name
for subgraph_name, subgraph in inference_graph_proto.subgraphs.items():
if subgraphs and subgraph_name not in subgraphs:
tf.logging.info('Skip subgraph %s.', subgraph_name)
continue
# Sometimes feeds aren't connected to any outputs but keep them in the graph
# anyways to avoid errors.
for tensor_or_op_name in (list(subgraph.feeds.values()) +
list(subgraph.fetches.values())):
output_op_names.add(_GetOpName(tensor_or_op_name))
if preserve_saver_restore_nodes:
# Only nodes for restoring is preserved. saver_def.save_tensor_name is
# skipped because it's only used for saving.
saver_def = inference_graph_proto.saver_def
for op_name in [saver_def.filename_tensor_name, saver_def.restore_op_name]:
try:
output_op_names.add(_GetOpName(op_name))
except KeyError:
tf.logging.info('Op/tensor %s not in the graph. Ignoring.' % op_name)
if not preserve_colocation_nodes and not preserve_extra_ops:
return sorted(list(output_op_names))
# We also need to preserve any nodes that are used for colocation.
# E.g., a node may have this attr:
# attr {
# key: "_class"
# value {
# list {
# s: "loc:@inference/embedding_lookup/Read/ReadVariableOp"
# }
# }
# }
#
# In this case, we need to make sure the node
# inference/embedding_lookup/Read/ReadVariableOp is not pruned.
#
# TODO(zhifengc): It's possible that it's better to fix in
# tf.graph_util.extract_sub_graph.
graph_def = tf.graph_util.extract_sub_graph(graph.as_graph_def(),
list(output_op_names))
reachable_vars = [node.name for node in graph_def.node]
for node in graph.get_operations():
if preserve_extra_ops and node.name in preserve_extra_ops:
output_op_names.add(node.name)
elif preserve_colocation_nodes and '_class' in node.node_def.attr:
for loc in node.node_def.attr['_class'].list.s:
loc = six.ensure_text(loc, 'utf-8')
if loc.startswith('loc:@'):
loc_name = loc[5:]
if loc_name not in reachable_vars:
# Skip nodes that cannot be reached from the pruned graph.
continue
output_op_names.add(node.name)
return sorted(list(output_op_names))
def _ParamExists(param_obj, param_name):
"""Tests whether param_name is contained in param_obj."""
if not param_obj:
return
for k, _ in param_obj.IterParams():
if k == param_name:
return True
return False
def _FreezeGraphFromCheckpoint(graph, saver, checkpoint, output_op_names):
"""Freezes a graph from a checkpoint.
Args:
graph: tf.Graph.
saver: The tf.Saver to use for restoration.
checkpoint: The checkpoint to restore.
output_op_names: Names of output ops.
Returns:
Resulting tf.GraphDef.
"""
sess = tf.Session(graph=graph, config=py_utils.SessionConfig())
saver.restore(sess, checkpoint)
return tf.graph_util.convert_variables_to_constants(
sess, graph.as_graph_def(), output_op_names)
def _FreezeDefaults(graph, output_op_names):
"""Default initializes a graph and freezes it.
Args:
graph: tf.Graph.
output_op_names: Names of output ops.
Returns:
Resulting tf.GraphDef.
"""
with tf.Session(graph=graph, config=py_utils.SessionConfig()) as sess:
sess.run(graph.get_operation_by_name('init_all_variables'))
return tf.graph_util.convert_variables_to_constants(sess,
graph.as_graph_def(),
output_op_names)
class InferenceGraphExporter:
"""Class for exporting inference graphs."""
@classmethod
def Export(cls,
model_cfg,
model_task_name=None,
device_options=InferenceDeviceOptions(
device='',
retain_device_placement=False,
var_options=None,
gen_init_op=True,
dtype_override=None,
fprop_dtype_override=None),
freeze_checkpoint=None,
freeze_defaults=False,
export_path=None,
subgraph_filter=None,
random_seed=None,
disable_packed_input=True):
"""Exports a InferenceGraph proto with piecewise subgraphs.
Sets FLAGS.enable_asserts to False unless user explicitly sets it to True.
Note: Enable FLAGS.pin_vars_to_cpu (default false) to make weight-sharing
and multi-core inference on TPUs work properly.
Args:
model_cfg: a Params instance as returned by
model_registry.GetParams(modelname, 'Test') or model_params.Model().
model_task_name: The task to generate an inference graph for. Should be
None for single-task models.
device_options: Device options for the accelerator used for serving.
freeze_checkpoint: The checkpoint to load. Loads and freezes the model if
given.
freeze_defaults: Default initializes the graph and freeze. Useful for
early testing of downstream tools without having a checkpoint.
export_path: If not None, write the inference graph in ASCII to this path.
subgraph_filter: A string or a list of subgraph names. If not None or
empty, export only this list of inference subgraphs.
random_seed: Fixes the random seed in the exported inference graph.
disable_packed_input: Disable packed input for inference writing purposes.
Returns:
InferenceGraph proto.
Raises:
ValueError: if the model does not support the listed subgraphs.
"""
assert issubclass(model_cfg.cls, base_model.BaseModel)
if device_options.dtype_override and device_options.fprop_dtype_override:
raise ValueError(
'device_options{dtype_override,fprop_dtype_override) can not both be'
'set.')
if subgraph_filter and not isinstance(subgraph_filter, (tuple, list)):
subgraph_filter = [subgraph_filter]
# Disable assertions unless user explicitly enables it.
if FLAGS['enable_asserts'].using_default_value:
FLAGS.enable_asserts = False
# TODO(laurenzo): Work out how much we need to specify here in terms of
# cluster configuration.
cls._SetClusterParams(model_cfg.cluster, device_options)
# Configure the model.
model_cfg.random_seed = random_seed
model_cfg.is_inference = True
if disable_packed_input:
def _DisablePackedInput(task):
if (_ParamExists(task, 'encoder') and
_ParamExists(task.encoder, 'packed_input')):
task.encoder.packed_input = False
if (_ParamExists(task, 'decoder') and
_ParamExists(task.decoder, 'packed_input')):
task.decoder.packed_input = False
if issubclass(model_cfg.cls, base_model.MultiTaskModel):
for _, task_param in model_cfg.task_params.IterParams():
_DisablePackedInput(task_param)
else:
_DisablePackedInput(model_cfg.task)
tf.logging.debug('Model %s params:', model_cfg.name)
for line in model_cfg.ToText().split('\n'):
tf.logging.debug('%s', line)
# Instantiate the graph.
graph = tf.Graph()
with graph.as_default():
tf.random.set_seed(random_seed)
cluster = model_cfg.cluster.Instantiate()
device = cluster.GetPlacer()
tpu_const_scope = _DummyScope()
if (IsTpu(device_options) and
device_options.var_options == 'AS_CONSTANTS'):
# Do not specify devices for variables if we are marking them as
# constants.
device = ''
tpu_const_scope = ConstGuaranteeScope()
with cluster, tf.device(device), tpu_const_scope:
bfloat16_override = ShouldForceBfloat16ForWeightsAndActivations(
device_options)
if bfloat16_override:
py_utils.UpdateDtype(model_cfg, tf.bfloat16)
py_utils.UpdateFpropDtype(model_cfg, tf.bfloat16)
act_bfloat16_override = ShouldForceBfloat16ForActivations(
device_options)
if act_bfloat16_override:
py_utils.UpdateFpropDtype(model_cfg, tf.bfloat16)
# Hard-code TPU-related flags prior to instantiating model.
old_enable_asserts = FLAGS.enable_asserts
old_xla_device = FLAGS.xla_device
if IsTpu(device_options):
FLAGS.enable_asserts = False
FLAGS.xla_device = 'tpu'
try:
mdl = model_cfg.Instantiate()
task = mdl.GetTask(model_task_name)
variables_to_restore = (
_MakeVariableDictionary(tf.global_variables()) if not mdl.ema else
mdl.ema.variables_to_restore(mdl.variables_for_ema))
if bfloat16_override:
saver_var_spec = (
bfloat16_variables
.get_saver_spec_for_variables_with_bf16_overrides(
variables_to_restore))
else:
saver_var_spec = variables_to_restore
saver = tf.train.Saver(saver_var_spec)
tf.variables_initializer(
tf.global_variables(), name='init_all_variables')
if IsTpu(device_options) and device_options.gen_init_op:
tf.group(tf.tpu.initialize_system(), name='tpu_init_op')
if freeze_checkpoint or freeze_defaults:
# Replace variables with tensors using tf.identity in theta before
# freezing to avoid the graph referencing types of DT_RESOURCE.
def AddIdentityToTheta(layer):
layer._private_theta = layer._private_theta.Transform(tf.identity) # pylint: disable=protected-access
layer.children.Transform(AddIdentityToTheta)
AddIdentityToTheta(task)
inference_graph_proto = inference_graph_pb2.InferenceGraph()
subgraphs_proto = task.Inference()
if isinstance(subgraphs_proto, dict):
subgraphs_proto = ConvertSubgraphDictToProto(subgraphs_proto)
for name, subgraph in subgraphs_proto.subgraphs.items():
if not subgraph_filter or name in subgraph_filter:
inference_graph_proto.subgraphs[name].CopyFrom(subgraph)
# Yes, graph collections are bad, however this seems to be the
# easiest way to get this assets registered from
# TextFileInitializer.
assets_collection = tf.compat.v1.get_collection(
tf.compat.v1.GraphKeys.ASSET_FILEPATHS)
for asset in assets_collection:
if asset.op.type == 'Const' and asset.op.get_attr(
'dtype') == tf.dtypes.string:
constant_value = asset.op.get_attr('value')
if constant_value.string_val:
tf.logging.info('Found asset file_path: %s',
constant_value.string_val[0])
asset_file_def = inference_graph_proto.asset_file_def.add()
asset_file_def.tensor_info.name = asset.name
asset_file_def.filename = constant_value.string_val[0]
# Add a table init op and global variable init op to the graph.
# Tables can be declared anywhere in the graph, so this op has to be
# added last.
tf.tables_initializer(name='init_all_tables')
finally:
# Reset TPU-related flags after model instantiation.
FLAGS.enable_asserts = old_enable_asserts
FLAGS.xla_device = old_xla_device
tf.logging.info('Graph contains ops: %r',
[op.name for op in graph.get_operations()])
# Collection defs
if not tf.executing_eagerly():
meta_graph = tf.train.export_meta_graph(graph=graph)
for key in meta_graph.collection_def:
tf.logging.info('copying collection %s', key)
inference_graph_proto.collection_def[key].CopyFrom(
meta_graph.collection_def[key])
else:
tf.logging.warning('Not exporting collection defs '
'since operating in eager mode.')
# Freezing.
if freeze_defaults or freeze_checkpoint:
output_op_names = GetOutputOpNames(
graph,
inference_graph_proto,
preserve_colocation_nodes=False,
preserve_saver_restore_nodes=False)
if cls._DeviceSupportsFreezing(device_options):
raise ValueError('freeze_checkpoint cannot be used with device ' +
device_options.device)
if freeze_checkpoint:
tf.logging.info('Freezing graph from checkpoint: %s', freeze_checkpoint)
graph_def = _FreezeGraphFromCheckpoint(graph, saver, freeze_checkpoint,
output_op_names)
elif freeze_defaults:
tf.logging.info('Default initializing graph and freezing.')
graph_def = _FreezeDefaults(graph, output_op_names)
else:
inference_graph_proto.saver_def.CopyFrom(saver.as_saver_def())
output_op_names = GetOutputOpNames(graph, inference_graph_proto)
# Prune the graph to just the parts we need.
# To support restoring, we have to not prune out the restore node.
output_op_names.append('init_all_tables')
output_op_names.append('init_all_variables')
output_op_names.append('save/control_dependency')
output_op_names.append('save/restore_all')
if IsTpu(device_options) and device_options.gen_init_op:
output_op_names.append('tpu_init_op')
graph_def = graph.as_graph_def()
tf.logging.info('Pruning graph to output ops: %r', output_op_names)
graph_def = tf.graph_util.extract_sub_graph(graph_def, output_op_names)
if not device_options.retain_device_placement:
# Clear the device so that the runtime can choose.
tf.logging.info('Clearing device placement for: %s',
device_options.device)
for node in graph_def.node:
node.ClearField('device')
for function in graph_def.library.function:
for node_def in function.node_def:
node_def.ClearField('device')
inference_graph_proto.graph_def.CopyFrom(graph_def)
if export_path:
with tf.io.gfile.GFile(export_path, 'w') as f:
f.write(text_format.MessageToString(inference_graph_proto))
return inference_graph_proto
@classmethod
def _SetClusterParams(cls, cluster_params, device_options):
"""Sets cluster params.
Args:
cluster_params: Model().cluster config.
device_options: InferenceDeviceOptions.
"""
def Update(p):
"""Update cluster params `p`."""
p.name = '/job:localhost'
p.replicas = 1
p.tpus_per_replica = 1 if IsTpu(device_options) else 0
p.gpus_per_replica = 0
p.devices_per_split = 1
cluster_params.mode = 'sync'
cluster_params.job = 'decoder'
cluster_params.add_summary = False
cluster_params.do_eval = True
Update(cluster_params.controller)
Update(cluster_params.worker)
Update(cluster_params.ps)
Update(cluster_params.evaler)
Update(cluster_params.decoder)
Update(cluster_params.input)
@classmethod
def _DeviceSupportsFreezing(cls, device_options):
return IsTpu(device_options)
| 1.617188 | 2 |
src/preprocessing/annual_hc_by_crime_loc.py | VijayKalmath/USCrimeAnalysis | 0 | 3744 | <filename>src/preprocessing/annual_hc_by_crime_loc.py
#! usr/env/bin python
import glob
import numpy as np
import pandas as pd
from tqdm import tqdm
def main():
# Fetch File Paths
file_paths = glob.glob(r'./data/raw/ucr/hc_count_by_place/*.xls')
# Sort them according to year
file_paths.sort(key = lambda x: int(x[-8:-4]))
# Create a result dataframe to store the data
df_res = get_place_crime_count(file_paths[0])
# Iterate over the rest of the files
for p in tqdm(file_paths[1:]):
df_temp = get_place_crime_count(p)
df_res = pd.merge(df_res, df_temp, on = "Place", how = "left")
# Save the result to disk
df_res.to_csv('./data/processed/ucr/annual_hc_count_by_place.csv',index=False)
def get_place_crime_count(path:str)->pd.DataFrame:
"""
Function to return
"""
# Extracting the table name from and year from the given file path
t_name = " ".join(path[path.index("Table"):path.index("_Incidents")].split("_"))
t_year = path[path.index(".xls")-4:path.index(".xls")]
try:
# Read the Excel spreadsheet
df = pd.read_excel(path,sheet_name=t_name)
# Get the start and end indices of the interested datapoints
start = df.index[df[t_name] == "Total"][0] + 1
end = df.index[df[t_name] == "Multiple locations"][0]
# Slice the dataset
df = df.iloc[start:end,0:2]
# Reset the index for the reduced dataframe
df.reset_index(drop = True, inplace = True)
# Rename the columns
df.rename(columns={t_name: "Place", "Unnamed: 1": t_year}, inplace = True)
# Return the value
return df
except:
# If there is no such data return an empty dataframe
i_list = list(range(0,47))
return pd.DataFrame(np.nan, index= i_list, columns=['Place', t_year])
if __name__ == '__main__':
main()
| 3.09375 | 3 |
allennlp/tests/modules/token_embedders/bag_of_word_counts_token_embedder_test.py | ethanjperez/allennlp | 24 | 3745 | # pylint: disable=no-self-use,invalid-name
import numpy as np
from numpy.testing import assert_almost_equal
import torch
from allennlp.common import Params
from allennlp.data import Vocabulary
from allennlp.modules.token_embedders import BagOfWordCountsTokenEmbedder
from allennlp.common.testing import AllenNlpTestCase
class TestBagOfWordCountsTokenEmbedder(AllenNlpTestCase):
def setUp(self):
super(TestBagOfWordCountsTokenEmbedder, self).setUp()
self.vocab = Vocabulary()
self.vocab.add_token_to_namespace("1")
self.vocab.add_token_to_namespace("2")
self.vocab.add_token_to_namespace("3")
self.vocab.add_token_to_namespace("4")
def test_forward_calculates_bow_properly(self):
params = Params({})
embedder = BagOfWordCountsTokenEmbedder.from_params(self.vocab, params=params)
numpy_tensor = np.array([[2, 0], [3, 0], [4, 4]])
inputs = torch.from_numpy(numpy_tensor).unsqueeze(1)
embedder_output = embedder(inputs)
numpy_tensor = np.array([[1, 0, 1, 0, 0, 0], [1, 0, 0, 1, 0, 0], [0, 0, 0, 0, 2, 0]])
manual_output = torch.from_numpy(numpy_tensor).float()
assert_almost_equal(embedder_output.data.numpy(), manual_output.data.numpy())
def test_projects_properly(self):
params = Params({"projection_dim": 50})
embedder = BagOfWordCountsTokenEmbedder.from_params(self.vocab, params=params)
numpy_tensor = np.array([self.vocab.get_token_index(x) for x in ["1", "2", "3"]])
inputs = torch.from_numpy(numpy_tensor).unsqueeze(1)
embedder_output = embedder(inputs)
assert embedder_output.shape[1] == 50
| 2.1875 | 2 |
demo/demo_shapenet.py | hengkaiz/meshrcnn | 0 | 3746 | import argparse
import logging
import multiprocessing as mp
import logging
import os
from detectron2.evaluation import inference_context
import torch
import torch.distributed as dist
import torch.multiprocessing as mp
from detectron2.utils.collect_env import collect_env_info
from detectron2.utils.logger import setup_logger
from fvcore.common.file_io import PathManager
from pathlib import Path
from pytorch3d.io import save_obj
from shapenet.config.config import get_shapenet_cfg
from shapenet.data.utils import imagenet_preprocess
from shapenet.modeling.heads import voxel_head
from shapenet.modeling.mesh_arch import build_model
from shapenet.utils.checkpoint import clean_state_dict
import torchvision.transforms as T
import glob
from PIL import Image
import trimesh
import pyvista as pv
import pyacvd
import numpy as np
logger = logging.getLogger('demo')
def setup_cfgs(args):
cfg = get_shapenet_cfg()
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
return cfg
def get_parser():
parser = argparse.ArgumentParser(description="MeshRCNN Demo")
parser.add_argument(
"--config-file",
default="configs/shapenet/voxmesh_R50.yaml",
metavar="FILE",
help="path to config file",
)
parser.add_argument("--input", help="A path to an input main folder")
# parser.add_argument("--output", help="A directory to save output visualizations")
parser.add_argument(
"--focal-length", type=float, default=20.0, help="Focal length for the image"
)
parser.add_argument(
"--onlyhighest", action="store_true", help="will return only the highest scoring detection"
)
parser.add_argument(
"opts",
help="Modify model config options using the command-line",
default=None,
nargs=argparse.REMAINDER,
)
return parser
def resample_mesh(mesh, count=2466):
pv_mesh = pv.wrap(mesh)
# logger.info('Original mesh:')
# print(pv_mesh)
clus = pyacvd.Clustering(pv_mesh)
clus.subdivide(3)
clus.cluster(count)
# remesh
remesh = clus.create_mesh()
# verts = remesh.points
# faces = remesh.faces.reshape((-1, 4))[:, 1:]
return remesh
if __name__ == "__main__":
mp.set_start_method("spawn", force=True)
args = get_parser().parse_args()
device = torch.device("cuda:%d" % 0)
logger = setup_logger(name="demo shapenet")
logger.info("Arguments: " + str(args))
cfg = setup_cfgs(args)
# load checkpoing and build model
if cfg.MODEL.CHECKPOINT == "":
raise ValueError("Invalid checkpoing provided")
logger.info("Loading model from checkpoint: %s" % (cfg.MODEL.CHECKPOINT))
cp = torch.load(PathManager.get_local_path(cfg.MODEL.CHECKPOINT))
state_dict = clean_state_dict(cp["best_states"]["model"])
model = build_model(cfg)
model.load_state_dict(state_dict)
logger.info("Model loaded")
model.to(device)
sub_dir = sorted(os.listdir(args.input))
for sd in sub_dir:
curr_path = os.path.join(args.input, sd)
images = glob.glob(curr_path + "/*.png")
for img_dir in images:
# load image
transform = [T.ToTensor()]
transform.append(imagenet_preprocess())
transform = T.Compose(transform)
im_name = img_dir.split("/")[-1].split(".")[0]
with PathManager.open(img_dir, "rb") as f:
img = Image.open(f).convert("RGB")
img = transform(img)
img = img[None, :, :, :]
img = img.to(device)
with inference_context(model):
img_feats, voxel_scores, meshes_pred, P, cubified_meshes = model(img)
# Save voxel_score
voxel_odir = os.path.join(curr_path, "voxel_score")
if not Path(voxel_odir).is_dir():
os.mkdir(voxel_odir)
voxel_file = os.path.join(voxel_odir, "%s.pt" % (im_name))
torch.save(voxel_scores, voxel_file)
# Save image features
imgfeat_odir = os.path.join(curr_path, "img_feat")
if not Path(imgfeat_odir).is_dir():
os.mkdir(imgfeat_odir)
img_feat_file = os.path.join(imgfeat_odir, "%s.pt" % (im_name))
torch.save(img_feats, img_feat_file)
# Save P
p_odir = os.path.join(curr_path, "P")
if not Path(p_odir).is_dir():
os.mkdir(p_odir)
p_file = os.path.join(p_odir, "%s.pt" % (im_name))
torch.save(P, p_file)
# Save cubified mesh
cmesh_odir = os.path.join(curr_path, "cube_mesh")
if not Path(cmesh_odir).is_dir():
os.mkdir(cmesh_odir)
cube_mesh_file = os.path.join(cmesh_odir, "%s_cube.obj" % (im_name))
c_verts, c_faces = cubified_meshes[-1].get_mesh_verts_faces(0)
save_obj(cube_mesh_file, c_verts, c_faces)
# Save predicted mesh
mesh_odir = os.path.join(curr_path, "final_mesh")
if not Path(mesh_odir).is_dir():
os.mkdir(mesh_odir)
save_file = os.path.join(mesh_odir, "%s.obj" % (im_name))
verts, faces = meshes_pred[-1].get_mesh_verts_faces(0)
save_obj(save_file, verts, faces)
logger.info("Predictions saved for %s/%s" % (curr_path.split('/')[-1], im_name))
| 1.820313 | 2 |
proglearn/voters.py | jshin13/progressive-learning | 0 | 3747 | <gh_stars>0
import numpy as np
# from sklearn.ensemble import BaggingClassifier
# from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.utils.validation import (
check_X_y,
check_array,
NotFittedError,
)
from sklearn.utils.multiclass import check_classification_targets, type_of_target
from .base import BaseVoter
from tensorflow import keras
from keras import layers
class TreeClassificationVoter(BaseVoter):
def __init__(self, finite_sample_correction=False):
"""
Doc strings here.
"""
self.finite_sample_correction = finite_sample_correction
self._is_fitted = False
self.multilabel = False
def fit(self, X, y):
"""
Doc strings here.
"""
check_classification_targets(y)
if type_of_target(y) == 'multilabel-indicator':
# Fit multilabel binary task.
self.multilabel = True
return self.fit_multilabel(X, y)
num_classes = len(np.unique(y))
self.uniform_posterior = np.ones(num_classes) / num_classes
self.leaf_to_posterior = {}
for leaf_id in np.unique(X):
idxs_in_leaf = np.where(X == leaf_id)[0]
class_counts = [
len(np.where(y[idxs_in_leaf] == y_val)[0]) for y_val in np.unique(y)
]
posteriors = np.nan_to_num(np.array(class_counts) / np.sum(class_counts))
if self.finite_sample_correction:
posteriors = self._finite_sample_correction(
posteriors, len(idxs_in_leaf), len(np.unique(y))
)
self.leaf_to_posterior[leaf_id] = posteriors
self._is_fitted = True
return self
def fit_multilabel(self, X, y):
num_labels = y.shape[1]
self.uniform_posterior = y.sum(axis=0) / len(y)
# Each posterior is now a num_labels size vector or binary probabilities.
self.leaf_to_posterior = {}
for leaf_id in np.unique(X):
idxs_in_leaf = np.where(X == leaf_id)[0]
label_counts = [
len(np.where(y[idxs_in_leaf, j] == 1)[0]) for j in range(num_labels)
]
posteriors = np.nan_to_num(np.array(label_counts) / np.sum(label_counts))
# TODO: multilabel finite sample correction.
self.leaf_to_posterior[leaf_id] = posteriors
self._is_fitted = True
return self
def vote(self, X):
"""
Doc strings here.
"""
if not self.is_fitted():
msg = (
"This %(name)s instance is not fitted yet. Call 'fit' with "
"appropriate arguments before using this voter."
)
raise NotFittedError(msg % {"name": type(self).__name__})
votes_per_example = []
for x in X:
if x in list(self.leaf_to_posterior.keys()):
votes_per_example.append(self.leaf_to_posterior[x])
else:
votes_per_example.append(self.uniform_posterior)
return np.array(votes_per_example)
def is_fitted(self):
"""
Doc strings here.
"""
return self._is_fitted
def _finite_sample_correction(posteriors, num_points_in_partition, num_classes):
"""
encourage posteriors to approach uniform when there is low data
"""
correction_constant = 1 / (num_classes * num_points_in_partition)
zero_posterior_idxs = np.where(posteriors == 0)[0]
posteriors[zero_posterior_idxs] = correction_constant
posteriors /= sum(posteriors)
return posteriors
class KNNClassificationVoter(BaseVoter):
def __init__(self, k, kwargs={}):
"""
Doc strings here.
"""
self._is_fitted = False
self.k = k
self.kwargs = kwargs
def fit(self, X, y):
"""
Doc strings here.
"""
X, y = check_X_y(X, y)
self.knn = KNeighborsClassifier(self.k, **self.kwargs)
self.knn.fit(X, y)
self._is_fitted = True
return self
def vote(self, X):
"""
Doc strings here.
"""
if not self.is_fitted():
msg = (
"This %(name)s instance is not fitted yet. Call 'fit' with "
"appropriate arguments before using this transformer."
)
raise NotFittedError(msg % {"name": type(self).__name__})
X = check_array(X)
return self.knn.predict_proba(X)
def is_fitted(self):
"""
Doc strings here.
"""
return self._is_fitted
class NeuralRegressionVoter(BaseVoter):
def __init__(
self, validation_split=0.25, loss="mse", epochs=100, lr=1e-4, verbose=False,
):
"""
Doc strings here.
"""
self.validation_split = validation_split
self.loss = loss
self.epochs = epochs
self.lr = lr
self.verbose = verbose
self._is_fitted = False
def fit(self, X, y):
"""
Doc strings here.
"""
X, y = check_X_y(X, y)
self.voter = keras.Sequential()
self.voter.add(
layers.Dense(
1,
activation="linear",
input_shape=(X.shape[1],),
name="transform_to_vote",
)
)
self.voter.compile(
loss=self.loss, metrics=["mae"], optimizer=keras.optimizers.Adam(self.lr)
)
self.voter.fit(
X,
y,
epochs=self.epochs,
callbacks=[keras.callbacks.EarlyStopping(patience=20, monitor="val_loss")],
verbose=self.verbose,
validation_split=self.validation_split,
shuffle=True,
)
self._is_fitted = True
return self
def vote(self, X):
"""
Doc strings here.
"""
if not self.is_fitted():
msg = (
"This %(name)s instance is not fitted yet. Call 'fit' with "
"appropriate arguments before using this transformer."
)
raise NotFittedError(msg % {"name": type(self).__name__})
X = check_array(X)
return self.voter.predict(X)
def is_fitted(self):
"""
Doc strings here.
"""
return self._is_fitted
class TreeRegressionVoter(BaseVoter):
def __init__(self):
"""
Doc strings here.
"""
self._is_fitted = False
def fit(self, X, y):
"""
Doc strings here.
"""
self.leaf_to_yhat = {}
self.global_yhat = np.mean(y)
for leaf_id in np.unique(X):
idxs_in_leaf = np.where(X == leaf_id)[0]
# class_counts = [len(np.where(y[idxs_in_leaf] == y_val)[0]) for y_val in np.unique(y)]
self.leaf_to_yhat[leaf_id] = np.nan_to_num(np.mean(y[idxs_in_leaf]))
self._is_fitted = True
return self
def vote(self, X):
"""
Doc strings here.
"""
if not self.is_fitted():
msg = (
"This %(name)s instance is not fitted yet. Call 'fit' with "
"appropriate arguments before using this voter."
)
raise NotFittedError(msg % {"name": type(self).__name__})
votes_per_example = []
for x in X:
if x in list(self.leaf_to_yhat.keys()):
votes_per_example.append(self.leaf_to_yhat[x])
else:
votes_per_example.append(self.global_yhat)
return np.array(votes_per_example)
def is_fitted(self):
"""
Doc strings here.
"""
return self._is_fitted | 2.4375 | 2 |
config.py | jhattat/photoBooth | 0 | 3748 | <filename>config.py
# Tumblr Setup
# Replace the values with your information
# OAuth keys can be generated from https://api.tumblr.com/console/calls/user/info
consumer_key='<KEY>' #replace with your key
consumer_secret='<KEY>' #replace with your secret code
oath_token='<KEY>' #replace with your oath token
oath_secret='<KEY>' #replace with your oath secret code
tumblr_blog = 'soniaetjeremie' # replace with your tumblr account name without .tumblr.com
tagsForTumblr = "photobooth" # change to tags you want, separated with commas
#Config settings to change behavior of photo booth
monitor_w = 800 # width of the display monitor
monitor_h = 480 # height of the display monitor
file_path = '/home/pi/photobooth/pics/' # path to save images
clear_on_startup = False # True will clear previously stored photos as the program launches. False will leave all previous photos.
debounce = 0.3 # how long to debounce the button. Add more time if the button triggers too many times.
post_online = True # True to upload images. False to store locally only.
capture_count_pics = True # if true, show a photo count between taking photos. If false, do not. False is faster.
make_gifs = True # True to make an animated gif. False to post 4 jpgs into one post.
hi_res_pics = False # True to save high res pics from camera.
# If also uploading, the program will also convert each image to a smaller image before making the gif.
# False to first capture low res pics. False is faster.
# Careful, each photo costs against your daily Tumblr upload max.
camera_iso = 400 # adjust for lighting issues. Normal is 100 or 200. Sort of dark is 400. Dark is 800 max.
# available options: 100, 200, 320, 400, 500, 640, 800 | 2.375 | 2 |
accounts/admin.py | GuilhemN/site-interludes | 0 | 3749 | from django.contrib import admin
from django.contrib.auth.models import Group
from accounts.models import EmailUser
from shared.admin import ExportCsvMixin
# no need for groups - we only have regular users and superusers
admin.site.unregister(Group)
@admin.register(EmailUser)
class EmailUserAdmin(ExportCsvMixin, admin.ModelAdmin):
"""option d'affichage des activités dans la vue django admin"""
filename = "export_utilisateurs.csv"
list_display = ("email", "last_name", "first_name", "is_superuser", "is_active", "email_confirmed",)
list_filter = ("is_superuser","is_active", "email_confirmed",)
fields = ("email", "last_name", "first_name", "is_superuser", "is_staff", "is_active", "email_confirmed",
("date_joined", "last_login",),
)
ordering = ("last_name", "first_name")
readonly_fields = ("date_joined", "last_login",)
list_per_page = 200
csv_export_exclude = ["password"]
| 2 | 2 |
rotkehlchen/exchanges/coinbase.py | vnavascues/rotki | 0 | 3750 | <reponame>vnavascues/rotki
import hashlib
import hmac
import logging
import time
from json.decoder import JSONDecodeError
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
from urllib.parse import urlencode
import requests
from rotkehlchen.assets.asset import Asset
from rotkehlchen.assets.converters import asset_from_coinbase
from rotkehlchen.constants.misc import ZERO
from rotkehlchen.errors import DeserializationError, RemoteError, UnknownAsset, UnsupportedAsset
from rotkehlchen.exchanges.data_structures import AssetMovement, Trade
from rotkehlchen.exchanges.exchange import ExchangeInterface
from rotkehlchen.exchanges.utils import deserialize_asset_movement_address, get_key_if_has_val
from rotkehlchen.inquirer import Inquirer
from rotkehlchen.logging import RotkehlchenLogsAdapter
from rotkehlchen.serialization.deserialize import (
deserialize_asset_amount,
deserialize_asset_amount_force_positive,
deserialize_asset_movement_category,
deserialize_fee,
deserialize_timestamp_from_date,
deserialize_trade_type,
)
from rotkehlchen.typing import (
ApiKey,
ApiSecret,
AssetMovementCategory,
Fee,
Location,
Price,
Timestamp,
TradePair,
)
from rotkehlchen.user_messages import MessagesAggregator
from rotkehlchen.utils.interfaces import cache_response_timewise, protect_with_lock
from rotkehlchen.utils.serialization import rlk_jsonloads_dict
if TYPE_CHECKING:
from rotkehlchen.db.dbhandler import DBHandler
logger = logging.getLogger(__name__)
log = RotkehlchenLogsAdapter(logger)
def trade_from_coinbase(raw_trade: Dict[str, Any]) -> Optional[Trade]:
"""Turns a coinbase transaction into a rotkehlchen Trade.
https://developers.coinbase.com/api/v2?python#buys
If the coinbase transaction is not a trade related transaction returns None
Throws:
- UnknownAsset due to Asset instantiation
- DeserializationError due to unexpected format of dict entries
- KeyError due to dict entires missing an expected entry
"""
if raw_trade['status'] != 'completed':
# We only want to deal with completed trades
return None
if raw_trade['instant']:
raw_time = raw_trade['created_at']
else:
raw_time = raw_trade['payout_at']
timestamp = deserialize_timestamp_from_date(raw_time, 'iso8601', 'coinbase')
trade_type = deserialize_trade_type(raw_trade['resource'])
tx_amount = deserialize_asset_amount(raw_trade['amount']['amount'])
tx_asset = asset_from_coinbase(raw_trade['amount']['currency'], time=timestamp)
native_amount = deserialize_asset_amount(raw_trade['subtotal']['amount'])
native_asset = asset_from_coinbase(raw_trade['subtotal']['currency'], time=timestamp)
# in coinbase you are buying/selling tx_asset for native_asset
pair = TradePair(f'{tx_asset.identifier}_{native_asset.identifier}')
amount = tx_amount
# The rate is how much you get/give in quotecurrency if you buy/sell 1 unit of base currency
rate = Price(native_amount / tx_amount)
fee_amount = deserialize_fee(raw_trade['fee']['amount'])
fee_asset = asset_from_coinbase(raw_trade['fee']['currency'], time=timestamp)
return Trade(
timestamp=timestamp,
location=Location.COINBASE,
pair=pair,
trade_type=trade_type,
amount=amount,
rate=rate,
fee=fee_amount,
fee_currency=fee_asset,
link=str(raw_trade['id']),
)
class CoinbasePermissionError(Exception):
pass
class Coinbase(ExchangeInterface):
def __init__(
self,
api_key: ApiKey,
secret: ApiSecret,
database: 'DBHandler',
msg_aggregator: MessagesAggregator,
):
super(Coinbase, self).__init__('coinbase', api_key, secret, database)
self.apiversion = 'v2'
self.base_uri = 'https://api.coinbase.com'
self.msg_aggregator = msg_aggregator
def first_connection(self) -> None:
self.first_connection_made = True
def _validate_single_api_key_action(
self,
method_str: str,
ignore_pagination: bool = False,
) -> Tuple[Optional[List[Any]], str]:
try:
result = self._api_query(method_str, ignore_pagination=ignore_pagination)
except CoinbasePermissionError as e:
error = str(e)
if 'transactions' in method_str:
permission = 'wallet:transactions:read'
elif 'buys' in method_str:
permission = 'wallet:buys:read'
elif 'sells' in method_str:
permission = 'wallet:sells:read'
elif 'deposits' in method_str:
permission = 'wallet:deposits:read'
elif 'withdrawals' in method_str:
permission = 'wallet:withdrawals:read'
elif 'trades' in method_str:
permission = 'wallet:trades:read'
# the accounts elif should be at the end since the word appears
# in other endpoints
elif 'accounts' in method_str:
permission = 'wallet:accounts:read'
else:
raise AssertionError(
f'Unexpected coinbase method {method_str} at API key validation',
)
msg = (
f'Provided Coinbase API key needs to have {permission} permission activated. '
f'Please log into your coinbase account and set all required permissions: '
f'wallet:accounts:read, wallet:transactions:read, '
f'wallet:buys:read, wallet:sells:read, wallet:withdrawals:read, '
f'wallet:deposits:read, wallet:trades:read'
)
return None, msg
except RemoteError as e:
error = str(e)
if 'invalid signature' in error:
return None, 'Failed to authenticate with the Provided API key/secret'
elif 'invalid api key' in error:
return None, 'Provided API Key is invalid'
else:
# any other remote error
return None, error
return result, ''
def validate_api_key(self) -> Tuple[bool, str]:
"""Validates that the Coinbase API key is good for usage in Rotki
Makes sure that the following permissions are given to the key:
wallet:accounts:read, wallet:transactions:read,
wallet:buys:read, wallet:sells:read, wallet:withdrawals:read,
wallet:deposits:read
"""
result, msg = self._validate_single_api_key_action('accounts')
if result is None:
return False, msg
# now get the account ids
account_ids = self._get_account_ids(result)
if len(account_ids) != 0:
# and now try to get all transactions of an account to see if that's possible
method = f'accounts/{account_ids[0]}/transactions'
result, msg = self._validate_single_api_key_action(method)
if result is None:
return False, msg
# and now try to get all buys of an account to see if that's possible
method = f'accounts/{account_ids[0]}/buys'
result, msg = self._validate_single_api_key_action(method)
if result is None:
return False, msg
# and now try to get all sells of an account to see if that's possible
method = f'accounts/{account_ids[0]}/sells'
result, msg = self._validate_single_api_key_action(method)
if result is None:
return False, msg
# and now try to get all deposits of an account to see if that's possible
method = f'accounts/{account_ids[0]}/deposits'
result, msg = self._validate_single_api_key_action(method)
if result is None:
return False, msg
# and now try to get all withdrawals of an account to see if that's possible
method = f'accounts/{account_ids[0]}/withdrawals'
result, msg = self._validate_single_api_key_action(method)
if result is None:
return False, msg
return True, ''
def _get_account_ids(self, accounts: List[Dict[str, Any]]) -> List[str]:
"""Gets the account ids out of the accounts response"""
account_ids = []
for account_data in accounts:
if 'id' not in account_data:
self.msg_aggregator.add_error(
'Found coinbase account entry without an id key. Skipping it. ',
)
continue
if not isinstance(account_data['id'], str):
self.msg_aggregator.add_error(
f'Found coinbase account entry with a non string id: '
f'{account_data["id"]}. Skipping it. ',
)
continue
account_ids.append(account_data['id'])
return account_ids
def _api_query(
self,
endpoint: str,
options: Optional[Dict[str, Any]] = None,
pagination_next_uri: str = None,
ignore_pagination: bool = False,
) -> List[Any]:
"""Performs a coinbase API Query for endpoint
You can optionally provide extra arguments to the endpoint via the options argument.
If this is an ongoing paginating call then provide pagination_next_uri.
If you want just the first results then set ignore_pagination to True.
"""
request_verb = "GET"
if pagination_next_uri:
request_url = pagination_next_uri
else:
request_url = f'/{self.apiversion}/{endpoint}'
if options:
request_url += urlencode(options)
timestamp = str(int(time.time()))
message = timestamp + request_verb + request_url
signature = hmac.new(
self.secret,
message.encode(),
hashlib.sha256,
).hexdigest()
log.debug('Coinbase API query', request_url=request_url)
self.session.headers.update({
'CB-ACCESS-SIGN': signature,
'CB-ACCESS-TIMESTAMP': timestamp,
'CB-ACCESS-KEY': self.api_key,
# This is needed to guarantee the up to the given date
# API version response.
'CB-VERSION': '2019-08-25',
})
full_url = self.base_uri + request_url
try:
response = self.session.get(full_url)
except requests.exceptions.RequestException as e:
raise RemoteError(f'Coinbase API request failed due to {str(e)}')
if response.status_code == 403:
raise CoinbasePermissionError(f'API key does not have permission for {endpoint}')
if response.status_code != 200:
raise RemoteError(
f'Coinbase query {full_url} responded with error status code: '
f'{response.status_code} and text: {response.text}',
)
try:
json_ret = rlk_jsonloads_dict(response.text)
except JSONDecodeError:
raise RemoteError(f'Coinbase returned invalid JSON response: {response.text}')
if 'data' not in json_ret:
raise RemoteError(f'Coinbase json response does not contain data: {response.text}')
final_data = json_ret['data']
# If we got pagination and this is the first query, gather all the subsequent queries
if 'pagination' in json_ret and not pagination_next_uri and not ignore_pagination:
if 'next_uri' not in json_ret['pagination']:
raise RemoteError('Coinbase json response contained no "next_uri" key')
next_uri = json_ret['pagination']['next_uri']
if not next_uri:
# As per the docs: https://developers.coinbase.com/api/v2?python#pagination
# once we get an empty next_uri we are done
return final_data
additional_data = self._api_query(
endpoint=endpoint,
options=options,
pagination_next_uri=next_uri,
)
final_data.extend(additional_data)
return final_data
@protect_with_lock()
@cache_response_timewise()
def query_balances(self) -> Tuple[Optional[Dict[Asset, Dict[str, Any]]], str]:
try:
resp = self._api_query('accounts')
except RemoteError as e:
msg = (
'Coinbase API request failed. Could not reach coinbase due '
'to {}'.format(e)
)
log.error(msg)
return None, msg
returned_balances: Dict[Asset, Dict[str, Any]] = {}
for account in resp:
try:
if not account['balance']:
continue
amount = deserialize_asset_amount(account['balance']['amount'])
# ignore empty balances. Coinbase returns zero balances for everything
# a user does not own
if amount == ZERO:
continue
asset = asset_from_coinbase(account['balance']['currency'])
try:
usd_price = Inquirer().find_usd_price(asset=asset)
except RemoteError as e:
self.msg_aggregator.add_error(
f'Error processing coinbase balance entry due to inability to '
f'query USD price: {str(e)}. Skipping balance entry',
)
continue
if asset in returned_balances:
amount = returned_balances[asset]['amount'] + amount
else:
returned_balances[asset] = {}
returned_balances[asset]['amount'] = amount
usd_value = returned_balances[asset]['amount'] * usd_price
returned_balances[asset]['usd_value'] = usd_value
except UnknownAsset as e:
self.msg_aggregator.add_warning(
f'Found coinbase balance result with unknown asset '
f'{e.asset_name}. Ignoring it.',
)
continue
except UnsupportedAsset as e:
self.msg_aggregator.add_warning(
f'Found coinbase balance result with unsupported asset '
f'{e.asset_name}. Ignoring it.',
)
continue
except (DeserializationError, KeyError) as e:
msg = str(e)
if isinstance(e, KeyError):
msg = f'Missing key entry for {msg}.'
self.msg_aggregator.add_error(
'Error processing a coinbase account balance. Check logs '
'for details. Ignoring it.',
)
log.error(
'Error processing a coinbase account balance',
account_balance=account,
error=msg,
)
continue
return returned_balances, ''
def query_online_trade_history(
self,
start_ts: Timestamp,
end_ts: Timestamp,
) -> List[Trade]:
account_data = self._api_query('accounts')
# now get the account ids and for each one query buys/sells
# Looking at coinbase's API no other type of transaction
# https://developers.coinbase.com/api/v2?python#list-transactions
# consitutes something that Rotkehlchen would need to return in query_trade_history
account_ids = self._get_account_ids(account_data)
raw_data = []
for account_id in account_ids:
raw_data.extend(self._api_query(f'accounts/{account_id}/buys'))
raw_data.extend(self._api_query(f'accounts/{account_id}/sells'))
log.debug('coinbase buys/sells history result', results_num=len(raw_data))
trades = []
for raw_trade in raw_data:
try:
trade = trade_from_coinbase(raw_trade)
except UnknownAsset as e:
self.msg_aggregator.add_warning(
f'Found coinbase transaction with unknown asset '
f'{e.asset_name}. Ignoring it.',
)
continue
except UnsupportedAsset as e:
self.msg_aggregator.add_warning(
f'Found coinbase trade with unsupported asset '
f'{e.asset_name}. Ignoring it.',
)
continue
except (DeserializationError, KeyError) as e:
msg = str(e)
if isinstance(e, KeyError):
msg = f'Missing key entry for {msg}.'
self.msg_aggregator.add_error(
'Error processing a coinbase trade. Check logs '
'for details. Ignoring it.',
)
log.error(
'Error processing a coinbase trade',
trade=raw_trade,
error=msg,
)
continue
# limit coinbase trades in the requested time range here since there
# is no argument in the API call
if trade and trade.timestamp >= start_ts and trade.timestamp <= end_ts:
trades.append(trade)
return trades
def _deserialize_asset_movement(self, raw_data: Dict[str, Any]) -> Optional[AssetMovement]:
"""Processes a single deposit/withdrawal from coinbase and deserializes it
Can log error/warning and return None if something went wrong at deserialization
"""
try:
if raw_data['status'] != 'completed':
return None
payout_date = raw_data.get('payout_at', None)
if payout_date:
timestamp = deserialize_timestamp_from_date(payout_date, 'iso8601', 'coinbase')
else:
timestamp = deserialize_timestamp_from_date(
raw_data['created_at'],
'iso8601',
'coinbase',
)
# Only get address/transaction id for "send" type of transactions
address = None
transaction_id = None
# movement_category: Union[Literal['deposit'], Literal['withdrawal']]
if 'type' in raw_data:
# Then this should be a "send" which is the way Coinbase uses to send
# crypto outside of the exchange
# https://developers.coinbase.com/api/v2?python#transaction-resource
msg = 'Non "send" type found in coinbase deposit/withdrawal processing'
assert raw_data['type'] == 'send', msg
movement_category = AssetMovementCategory.WITHDRAWAL
# Can't see the fee being charged from the "send" resource
amount = deserialize_asset_amount_force_positive(raw_data['amount']['amount'])
asset = asset_from_coinbase(raw_data['amount']['currency'], time=timestamp)
# Fees dont appear in the docs but from an experiment of sending ETH
# to an address from coinbase there is the network fee in the response
fee = Fee(ZERO)
raw_network = raw_data.get('network', None)
if raw_network:
raw_fee = raw_network.get('transaction_fee', None)
if raw_fee:
# Since this is a withdrawal the fee should be the same as the moved asset
if asset != asset_from_coinbase(raw_fee['currency'], time=timestamp):
# If not we set ZERO fee and ignore
log.error(
f'In a coinbase withdrawal of {asset.identifier} the fee'
f'is denoted in {raw_fee["currency"]}',
)
else:
fee = deserialize_fee(raw_fee['amount'])
if 'network' in raw_data:
transaction_id = get_key_if_has_val(raw_data['network'], 'hash')
if 'to' in raw_data:
address = deserialize_asset_movement_address(raw_data['to'], 'address', asset)
else:
movement_category = deserialize_asset_movement_category(raw_data['resource'])
amount = deserialize_asset_amount_force_positive(raw_data['amount']['amount'])
fee = deserialize_fee(raw_data['fee']['amount'])
asset = asset_from_coinbase(raw_data['amount']['currency'], time=timestamp)
return AssetMovement(
location=Location.COINBASE,
category=movement_category,
address=address,
transaction_id=transaction_id,
timestamp=timestamp,
asset=asset,
amount=amount,
fee_asset=asset,
fee=fee,
link=str(raw_data['id']),
)
except UnknownAsset as e:
self.msg_aggregator.add_warning(
f'Found coinbase deposit/withdrawal with unknown asset '
f'{e.asset_name}. Ignoring it.',
)
except UnsupportedAsset as e:
self.msg_aggregator.add_warning(
f'Found coinbase deposit/withdrawal with unsupported asset '
f'{e.asset_name}. Ignoring it.',
)
except (DeserializationError, KeyError) as e:
msg = str(e)
if isinstance(e, KeyError):
msg = f'Missing key entry for {msg}.'
self.msg_aggregator.add_error(
'Unexpected data encountered during deserialization of a coinbase '
'asset movement. Check logs for details and open a bug report.',
)
log.error(
f'Unexpected data encountered during deserialization of coinbase '
f'asset_movement {raw_data}. Error was: {str(e)}',
)
return None
def query_online_deposits_withdrawals(
self,
start_ts: Timestamp,
end_ts: Timestamp,
) -> List[AssetMovement]:
account_data = self._api_query('accounts')
account_ids = self._get_account_ids(account_data)
raw_data = []
for account_id in account_ids:
raw_data.extend(self._api_query(f'accounts/{account_id}/deposits'))
raw_data.extend(self._api_query(f'accounts/{account_id}/withdrawals'))
# also get transactions to get the "sends", which in Coinbase is the
# way to send Crypto out of the exchange
txs = self._api_query(f'accounts/{account_id}/transactions')
for tx in txs:
if 'type' not in tx:
continue
if tx['type'] == 'send':
raw_data.append(tx)
log.debug('coinbase deposits/withdrawals history result', results_num=len(raw_data))
movements = []
for raw_movement in raw_data:
movement = self._deserialize_asset_movement(raw_movement)
# limit coinbase deposit/withdrawals in the requested time range
# here since there is no argument in the API call
if movement and movement.timestamp >= start_ts and movement.timestamp <= end_ts:
movements.append(movement)
return movements
| 2.140625 | 2 |
lib/python3.7/site-packages/ldap/controls/deref.py | aonrobot/MSC-thug-auth-provider | 1 | 3751 | <gh_stars>1-10
# -*- coding: utf-8 -*-
"""
ldap.controls.deref - classes for
(see https://tools.ietf.org/html/draft-masarati-ldap-deref)
See https://www.python-ldap.org/ for project details.
"""
__all__ = [
'DEREF_CONTROL_OID',
'DereferenceControl',
]
import ldap.controls
from ldap.controls import LDAPControl,KNOWN_RESPONSE_CONTROLS
import pyasn1_modules.rfc2251
from pyasn1.type import namedtype,univ,tag
from pyasn1.codec.ber import encoder,decoder
from pyasn1_modules.rfc2251 import LDAPDN,AttributeDescription,AttributeDescriptionList,AttributeValue
DEREF_CONTROL_OID = '1.3.6.1.4.1.4203.666.5.16'
# Request types
#---------------------------------------------------------------------------
# For compatibility with ASN.1 declaration in I-D
AttributeList = AttributeDescriptionList
class DerefSpec(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType(
'derefAttr',
AttributeDescription()
),
namedtype.NamedType(
'attributes',
AttributeList()
),
)
class DerefSpecs(univ.SequenceOf):
componentType = DerefSpec()
# Response types
#---------------------------------------------------------------------------
class AttributeValues(univ.SetOf):
componentType = AttributeValue()
class PartialAttribute(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('type', AttributeDescription()),
namedtype.NamedType('vals', AttributeValues()),
)
class PartialAttributeList(univ.SequenceOf):
componentType = PartialAttribute()
tagSet = univ.Sequence.tagSet.tagImplicitly(
tag.Tag(tag.tagClassContext,tag.tagFormatConstructed,0)
)
class DerefRes(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('derefAttr', AttributeDescription()),
namedtype.NamedType('derefVal', LDAPDN()),
namedtype.OptionalNamedType('attrVals', PartialAttributeList()),
)
class DerefResultControlValue(univ.SequenceOf):
componentType = DerefRes()
class DereferenceControl(LDAPControl):
controlType = DEREF_CONTROL_OID
def __init__(self,criticality=False,derefSpecs=None):
LDAPControl.__init__(self,self.controlType,criticality)
self.derefSpecs = derefSpecs or {}
def _derefSpecs(self):
deref_specs = DerefSpecs()
i = 0
for deref_attr,deref_attribute_names in self.derefSpecs.items():
deref_spec = DerefSpec()
deref_attributes = AttributeList()
for j in range(len(deref_attribute_names)):
deref_attributes.setComponentByPosition(j,deref_attribute_names[j])
deref_spec.setComponentByName('derefAttr',AttributeDescription(deref_attr))
deref_spec.setComponentByName('attributes',deref_attributes)
deref_specs.setComponentByPosition(i,deref_spec)
i += 1
return deref_specs
def encodeControlValue(self):
return encoder.encode(self._derefSpecs())
def decodeControlValue(self,encodedControlValue):
decodedValue,_ = decoder.decode(encodedControlValue,asn1Spec=DerefResultControlValue())
self.derefRes = {}
for deref_res in decodedValue:
deref_attr,deref_val,deref_vals = deref_res[0],deref_res[1],deref_res[2]
partial_attrs_dict = {
str(tv[0]): [str(v) for v in tv[1]]
for tv in deref_vals or []
}
try:
self.derefRes[str(deref_attr)].append((str(deref_val),partial_attrs_dict))
except KeyError:
self.derefRes[str(deref_attr)] = [(str(deref_val),partial_attrs_dict)]
KNOWN_RESPONSE_CONTROLS[DereferenceControl.controlType] = DereferenceControl
| 1.695313 | 2 |
emoji.py | notagoat/Deepmoji | 1 | 3752 | import requests
import urllib.request
import os.path
import shutil
import csv
def main():
with open("data.csv") as i: #Open the data.csv file
instances = i.readlines() #Write them into memory
instances = [x.strip() for x in instances] #Strip any weird issues from writing
instances.sort() #Sort them alphabetically
setup(instances) #Run setup to create all the necessary files and subfolders
count = len(instances) #Get the count just for fun
i = 0
try:
for name in instances:
try:
i += 1
print("-----!"+name+"!-----")
print(str(i) +" of " + str(count) + " remaining!")
fetch(name) #Run the fetching code
except Exception as e:
print(e) #Print the error. We catch errors here for pleroma instances, weirdly encoded urls, etc
pass #Don't stop the beat
except Exception as e:
print("Instance Error")
print(e)
pass
clone(instances) #Clone all of them into one big folder for ease of access
def fetch(name):
r = requests.get('https://%s/api/v1/custom_emojis'% name, allow_redirects=True) #Throw the instance name into the standard url for fetching data
path = "emoji/%s/" % name #Because of the clone function we know all of these folders will exist
try:
for emoji in r.json(): #Emoji = the json code from the request
try:
if os.path.isfile(path+emoji['shortcode']+".png"): #Check to see if it exists.
pass
else:
if "ms_" not in emoji['shortcode']: #Cut out Mutant Standard Emojis (Or at least most of them). #Mutant standard is huge and common
#print(emoji['shortcode'] + " found!")
emojiimage = requests.get(emoji['static_url'],allow_redirects=True) #Get the image from the json
open(path + emoji['shortcode']+".png",'wb').write(emojiimage.content) #Now save it as an image in the filesystem
except Exception as e:
print("Did not get: " + emoji['url']) #If somethings fucky throw a nice error then keep going.
print(e)
pass
except Exception as e:
print(e)
def setup(instances):
if (os.path.isdir("emoji/")): #Check to see if emoji/ exists
pass
else:
os.mkdir("emoji/") #make it if it doesnt
for name in instances:
if (os.path.isdir("emoji/%s/"%name)):
pass
else: os.mkdir("emoji/%s/"%name)
if (os.path.isdir("emoji/all")):
pass
else:
os.mkdir("emoji/all")
def clone(instances):
for name in instances:
print("Copying emoji for: %s"% name)
path = "emoji/%s/" % name
files = os.listdir(path)
for name in files: #This gets alll files
try:
shutil.copyfile(path+name,"emoji/all/"+name) #Then copies them into the all folder
except Exception as e:
print(e)
pass
if __name__ == '__main__':
main()
| 3.359375 | 3 |
String/640.One Edit Distance/Solution_DP.py | Zhenye-Na/LxxxCode | 12 | 3753 | class Solution:
"""
@param s: a string
@param t: a string
@return: true if they are both one edit distance apart or false
"""
def isOneEditDistance(self, s, t):
# write your code here
if s == t:
return False
if abs(len(s) - len(t)) > 1:
return False
n, m = len(s), len(t)
f = [[0] * (m + 1) for _ in range(2)]
for j in range(m + 1):
f[0][j] = j
for i in range(1, n + 1):
f[i % 2][0] = i
for j in range(1, m + 1):
if s[i - 1] == t[j - 1]:
f[i % 2][j] = min(f[(i - 1) % 2][j - 1],
f[(i - 1) % 2][j] + 1, f[i % 2][j - 1] + 1)
else:
f[i % 2][j] = min(f[(i - 1) % 2][j - 1] + 1,
f[(i - 1) % 2][j] + 1, f[i % 2][j - 1] + 1)
return f[n % 2][m] == 1
| 3.53125 | 4 |
nsq/__init__.py | jehiah/pynsq | 1 | 3754 | <gh_stars>1-10
from __future__ import absolute_import
import signal
import tornado.ioloop
import logging
from .protocol import (
Error,
unpack_response,
decode_message,
valid_topic_name,
valid_channel_name,
identify,
subscribe,
ready,
finish,
touch,
requeue,
nop,
pub,
mpub,
FRAME_TYPE_RESPONSE,
FRAME_TYPE_ERROR,
FRAME_TYPE_MESSAGE,
)
from .message import Message
from .backoff_timer import BackoffTimer
from .sync import SyncConn
from .async import AsyncConn
from .reader import Reader
from .legacy_reader import LegacyReader
from .writer import Writer
from .version import __version__ # NOQA
def _handle_term_signal(sig_num, frame):
logging.getLogger(__name__).info(
'TERM Signal handler called with signal %r', sig_num)
tornado.ioloop.IOLoop.instance().stop()
def run():
"""
Starts any instantiated :class:`nsq.Reader` or :class:`nsq.Writer`
"""
signal.signal(signal.SIGTERM, _handle_term_signal)
tornado.ioloop.IOLoop.instance().start()
__author__ = "<NAME> <<EMAIL>>"
__all__ = ["Reader", "Writer", "run", "BackoffTimer", "Message", "Error", "LegacyReader",
"SyncConn", "AsyncConn", "unpack_response", "decode_message",
"identify", "subscribe", "ready", "finish", "touch", "requeue", "nop", "pub", "mpub",
"valid_topic_name", "valid_channel_name",
"FRAME_TYPE_RESPONSE", "FRAME_TYPE_ERROR", "FRAME_TYPE_MESSAGE"]
| 2.0625 | 2 |
scripts/summaryPlot.py | Hespian/ParFastKer | 3 | 3755 | import get_data_ours
import get_data_akiba
import get_data_NearLinear
import get_data_LinearTime
import os
import matplotlib.pyplot as plt
# graphs = ["uk-2002", "arabic-2005", "gsh-2015-tpd", "uk-2005", "it-2004", "sk-2005", "uk-2007-05", "webbase-2001", "asia.osm", "road_usa", "europe.osm", "rgg_n26_s0", "RHG-100000000-nodes-2000000000-edges", "delaunay_n24", "del26"]
graphs = ["uk-2002", "arabic-2005", "gsh-2015-tpd", "uk-2005", "it-2004", "sk-2005", "uk-2007-05", "webbase-2001", "asia.osm", "road_usa", "europe.osm", "rgg_n26_s0", "delaunay_n24", "del26"]
linearTimeDir = "../../../triangle_counting_paper/MIS_sigmod_pub/results/LinearTimeKernels/logs"
partitioningDir = "../../LinearTimeKernels/partitions"
ourTimeDir = "../../results/LinearTimeKernelsScalingAll"
nearLinearDir = "../../../triangle_counting_paper/MIS_sigmod_pub/results/NearLinear"
akibaDir = "../../akiba_vertex_cover/results"
def getOurTimeAndSizeSequential(graph):
res = get_data_ours.getOurTimeAndSizeUltrafast(graph, linearTimeDir, partitioningDir, ourTimeDir)
result = dict()
result["time"] = res["sequential_quasikernel_time"] + res["lineartime_time"]
result["size"] = res["sequential_quasikernel_size"]
return result
def getOurTimeAndSizeParallel(graph):
res = get_data_ours.getOurTimeAndSizeUltrafast(graph, linearTimeDir, partitioningDir, ourTimeDir)
result = dict()
result["time"] = res["parallel_quasikernel_time"] + res["lineartime_time"] + res["partitioning_time"]
result["size"] = res["parallel_quasikernel_size"]
return result
def getAkibaTimeAndSize(graph):
return get_data_akiba.getAkibaTimeAndSize(graph, akibaDir)
def getNearLinearTimeAndSize(graph):
return get_data_NearLinear.getNearLinearTimeAndSize(graph, nearLinearDir)
def getLinearTimeTimeAndSize(graph):
return get_data_LinearTime.getLinearTimeTimeAndSize(graph, linearTimeDir)
def minProperty(graph, prop):
oursequential = getOurTimeAndSizeSequential(graph)[prop]
ourparallel = getOurTimeAndSizeParallel(graph)[prop]
akiba = getAkibaTimeAndSize(graph)[prop]
nearLinear = getNearLinearTimeAndSize(graph)[prop]
linearTime = getLinearTimeTimeAndSize(graph)[prop]
data = [oursequential, ourparallel, akiba, nearLinear, linearTime]
# data = [oursequential, ourparallel, akiba, nearLinear]
data = filter(lambda x : x >= 0, data)
minimum = min(data)
if minimum == 0:
return 1
return minimum
oursizeSequential = []
ourtimeSequential = []
oursizeParallel = []
ourtimeParallel = []
akibasize = []
akibatime = []
nearlinearsize = []
nearlineartime = []
lineartimesize = []
lineartimetime = []
for graph in graphs:
minsize = getAkibaTimeAndSize(graph)["size"]
mintime = getAkibaTimeAndSize(graph)["time"]
oss = getOurTimeAndSizeSequential(graph)["size"] / minsize
# print(graph + "(sequential): " + str(getOurTimeAndSizeSequential(graph)["size"]))
ots = getOurTimeAndSizeSequential(graph)["time"] / mintime
if oss > 0 and ots > 0:
oursizeSequential.append(oss)
ourtimeSequential.append(ots)
osp = getOurTimeAndSizeParallel(graph)["size"] / minsize
# print(graph + "(parallel): " + str(getOurTimeAndSizeParallel(graph)["size"]))
otp = getOurTimeAndSizeParallel(graph)["time"] / mintime
if osp > 0 and otp > 0:
oursizeParallel.append(osp)
ourtimeParallel.append(otp)
aks = getAkibaTimeAndSize(graph)["size"] / minsize
akt = getAkibaTimeAndSize(graph)["time"] / mintime
if aks > 0 and akt > 0:
akibasize.append(aks)
akibatime.append(akt)
nls = getNearLinearTimeAndSize(graph)["size"] / minsize
nlt = getNearLinearTimeAndSize(graph)["time"] / mintime
if nls > 0 and nlt > 0:
nearlinearsize.append(nls)
nearlineartime.append(nlt)
lts = getLinearTimeTimeAndSize(graph)["size"] / minsize
ltt = getLinearTimeTimeAndSize(graph)["time"] / mintime
if nls > 0 and nlt > 0:
lineartimesize.append(lts)
lineartimetime.append(ltt)
# print("We")
# print(oursizeSequential)
# print(ourtimeSequential)
# print("We (parallel)")
# print(oursizeParallel)
# print(ourtimeParallel)
# print("Akiba")
# print(akibasize)
# print(akibatime)
# print("NearLinear")
# print(nearlinearsize)
# print(nearlineartime)
# print("LinearTime")
# print(lineartimesize)
# print(lineartimetime)
plt.rc('font', size=14)
fig = plt.figure(figsize=(3.2, 2.4))
ax = fig.add_subplot(1,1,1)
plt.title("Summary", fontsize=14)
ax.set_yscale("log")
ax.set_xscale("log")
ax.scatter(ourtimeSequential, oursizeSequential, label="FastKer", marker="x", color="green")
ax.scatter(ourtimeParallel, oursizeParallel, label="ParFastKer", marker="+", color="black")
# ax.scatter(akibatime, akibasize, label="VCSolver", marker="^", edgecolors="blue", facecolors="none")
ax.scatter(nearlineartime, nearlinearsize, label="NearLinear", marker="o", edgecolors="red", facecolors="none")
ax.scatter(lineartimetime, lineartimesize, label="LinearTime", marker="^", edgecolors="magenta", facecolors="none")
plt.xlabel("time / VCSolver time")
plt.ylabel("size / VCSolver size")
plt.xticks([0.0001, 0.01, 1])
ax.legend(bbox_to_anchor=(0.35,-0.7), ncol=2, loc='lower center', frameon=False, borderaxespad=0., mode="expand")
plt.savefig("summaryplot_vcsolver_baseline.pdf", bbox_inches="tight")
# plt.show()
| 1.945313 | 2 |
bouncer/cli/base.py | lrnt/git-bouncer | 0 | 3756 | <gh_stars>0
import configparser
import sys
import inspect
from argparse import ArgumentParser, RawDescriptionHelpFormatter
def opt(*args, **kwargs):
def decorator(method):
if not hasattr(method, 'options'):
method.options = []
method.options.append((args, kwargs))
return method
return decorator
def noopts(method):
method.options = []
return method
class HelpMixin(object):
def help(self):
print('available commands:')
for name, command in self.commands.items():
description = str(command.__doc__ or '').strip('\n')
print(' ', name.ljust(10), description)
return 1
class SubParser(HelpMixin):
def __init__(self, commands):
self.commands = self._commands(commands)
def _commands(self, commands):
prog = sys.argv[0]
result = {}
for cmd in commands:
name = getattr(cmd, '_name', None)
if not name:
continue
cmd.prog = prog
result[name] = cmd
return result
def run(self):
args = sys.argv[1:]
for index, arg in enumerate(args):
if arg in self.commands.keys():
args.pop(index)
return self.commands[arg](args)
return self.help()
class Command(HelpMixin):
def __init__(self):
self.global_options = []
self.commands = self._methods_with_opts()
def _methods_with_opts(self):
result = {}
for name in dir(self):
if name.startswith('__'):
continue
method = getattr(self, name)
if not hasattr(method, 'options'):
continue
result[name] = method
return result
def _parse_args(self, method, args):
prog = '{} {} {}'.format(self.prog, self._name, method.__name__)
parser = ArgumentParser(
prog=prog,
description=(method.__doc__ or ''),
formatter_class=RawDescriptionHelpFormatter
)
for opt in method.options + self.global_options:
parser.add_argument(*opt[0], **opt[1])
return vars(parser.parse_args(args))
def _call_method(self, method, args):
# Find out which arguments the method expects
expected_args, _, _, _ = inspect.getargspec(method)
expected_args.remove('self')
self_args = self._parse_args(method, args)
method_args = {}
# Get the expected method arguments, ignore rest
for name in expected_args:
if name in args:
method_args[name] = args.pop(name)
# Put rest of the arguments in self
for name, value in self_args.items():
setattr(self, name, value)
self.pre_command()
return method(**method_args)
def __call__(self, args):
for index, arg in enumerate(args):
if arg in self.commands.keys():
args.pop(index)
return self._call_method(self.commands[arg], args)
return self.help()
def opt(self, *args, **kwargs):
self.global_options.append((args, kwargs))
def pre_command(self):
pass
class BaseCommand(Command):
def __init__(self):
super(BaseCommand, self).__init__()
self.opt(
'-c', dest='config_path', help='Configuration file',
default='~/.test.conf'
)
def pre_command(self):
config = configparser.ConfigParser()
config.read(self.config_path)
print(config.sections())
| 2.609375 | 3 |
Examples/ExampleCodes_ssccoorriinngg.py | MahdadJafarzadeh/ssccoorriinngg | 2 | 3757 | <filename>Examples/ExampleCodes_ssccoorriinngg.py
#%% Import libs
import numpy as np
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import cross_validate
from sklearn.metrics import make_scorer, accuracy_score, precision_score, recall_score, f1_score
import h5py
import time
from ssccoorriinngg import ssccoorriinngg
import numpy as np
from sklearn.model_selection import cross_validate
#%% Picking featureset of interest and apply classification
Object = ssccoorriinngg(filename='', channel='', fs = 200, T = 30)
path = 'C:/PhD/ML in depression/'
fname = 'feat42_Fp1-Fp2_train'
feats = 'featureset'
labels = 'labels'
# Train set
X_train, y_train = Object.LoadFeatureSet(path, fname, feats, labels)
# Test set
fname = 'feat42_Fp1-Fp2_test'
X_test, y_test = Object.LoadFeatureSet(path, fname, feats, labels)
# Define the scoring criteria:
scoring = {'accuracy' : make_scorer(accuracy_score),
'precision' : make_scorer(precision_score),
'recall' : make_scorer(recall_score),
'f1_score' : make_scorer(f1_score)}
# Cross-validation using logistic Random Forests
y_pred_RF = Object.RandomForest_Modelling(X_train, y_train, X_test, y_test, scoring = scoring, n_estimators = 500, cv = 10)
Acc, Recall, prec, f1_sc = Object.multi_label_confusion_matrix(y_test, y_pred_RF)
# Cross-validation using XGBoost
y_pred_xgb = Object.XGB_Modelling(X_train, y_train,X_test, y_test, scoring, n_estimators = 1000,
cv = 10 , max_depth=3, learning_rate=.1)
Acc, Recall, prec, f1_sc = Object.multi_label_confusion_matrix(y_test, y_pred_xgb)
#%% Outcome measures
# Defien required metrics here:
Metrics = ['test_accuracy', 'test_precision', 'test_recall', 'test_f1_score']
for metric in Metrics:
#RF
r1 = results_RF[metric].mean()
std1 = results_RF[metric].std()
print(f'{metric} for RF is: {round(r1*100, 2)}+- {round(std1*100, 2)}')
# xgb
r2 = results_xgb[metric].mean()
std2 = results_xgb[metric].std()
print(f'{metric} for xgb is: {round(r2*100, 2)}+- {round(std2*100, 2)}')
# SVM
r3 = results_SVM[metric].mean()
std3 = results_SVM[metric].std()
print(f'{metric} for SVM is: {round(r3*100, 2)}+- {round(std3*100, 2)}')
# LR
r4 = results_LR[metric].mean()
std4 = results_LR[metric].std()
print(f'{metric} for LR is: {round(r4*100, 2)}+- {round(std4*100, 2)}')
#%% Applying Randomized grid search to find the best config. of RF
BestParams_RandomSearch, Bestsocre_RandomSearch ,means, stds, params= Object.RandomSearchRF(X, y,
estimator = RandomForestClassifier(), scoring = scoring,
n_estimators = [int(x) for x in np.arange(10, 500, 20)],
max_features = ['log2', 'sqrt'],
max_depth = [int(x) for x in np.arange(10, 100, 30)],
min_samples_split = [2, 5, 10],
min_samples_leaf = [1, 2, 4],
bootstrap = [True, False],
n_iter = 100, cv = 10)
#%% Test feature selection methods ##
# PCA
PCA_out = Object.FeatSelect_PCA(X, y, n_components = 5)
# Boruta
ranks_Boruta, Feat_selected_Boruta = Object.FeatSelect_Boruta(X, y, max_depth = 7)
# Lasso
Feat_selected_lasso = Object.FeatSelect_LASSO(X, y, C = 1)
#ANOVA
Feat_selected_ANOVA = Object.FeatSelect_ANOVA(X,y, k = 80)
#Recruisive
ranks_rec, Feat_selected_rec = Object.FeatSelect_Recrusive(X, y, k = 20)
#### NOW TEST CLASSIFIERS WITH SELECTED FEATS
results_RF = Object.RandomForest_Modelling(Feat_selected_Boruta, y, scoring = scoring, n_estimators = 200, cv = 10)
#%% Example save featureset
path = 'P:/3013080.02/ml_project/scripts/1D_TimeSeries/features/'
Object.SaveFeatureSet(X, y, path = path, filename = 'feat42_N3')
#%% Example load features:
X, y= Object.LoadFeatureSet(path = 'P:/3013080.02/ml_project/scripts/1D_TimeSeries/features/',
fname = 'feat42_N3_fp2-M1',
feats = 'featureset',
labels = 'labels')
#%% Combining some REM and SWS epochs
Object.CombineEpochs(directory = 'P:/3013080.02/ml_project/scripts/1D_TimeSeries/train_test/',
ch = 'fp1-M2', N3_fname = 'tr90_N3_fp1-M2_fp2-M1',
REM_fname = 'tr90_fp1-M2_fp2-M1',
saving = True, fname_save = 'tr90_N3&REM_fp1-M2')
#%% How to save some results?
directory = 'P:/3013080.02/ml_project/scripts/1D_TimeSeries/results/'
fname = '42feats_N3'
with h5py.File((directory+fname + '.h5'), 'w') as wf:
# Accuracies
dset = wf.create_dataset('acc_SVM', results_SVM['test_accuracy'].shape, data = results_SVM['test_accuracy'])
dset = wf.create_dataset('acc_LR' , results_LR['test_accuracy'].shape, data = results_LR['test_accuracy'])
dset = wf.create_dataset('acc_RF' , results_RF['test_accuracy'].shape, data = results_RF['test_accuracy'])
dset = wf.create_dataset('acc_xgb', results_xgb['test_accuracy'].shape, data = results_xgb['test_accuracy'])
# Precision
dset = wf.create_dataset('prec_SVM', results_SVM['test_precision'].shape, data = results_SVM['test_precision'])
dset = wf.create_dataset('prec_LR' , results_LR['test_precision'].shape, data = results_LR['test_precision'])
dset = wf.create_dataset('prec_RF' , results_RF['test_precision'].shape, data = results_RF['test_precision'])
dset = wf.create_dataset('prec_xgb', results_xgb['test_precision'].shape, data = results_xgb['test_precision'])
# Recall
dset = wf.create_dataset('rec_SVM', results_SVM['test_recall'].shape, data = results_SVM['test_recall'])
dset = wf.create_dataset('rec_LR' , results_LR['test_recall'].shape, data = results_LR['test_recall'])
dset = wf.create_dataset('rec_RF' , results_RF['test_recall'].shape, data = results_RF['test_recall'])
dset = wf.create_dataset('rec_xgb', results_xgb['test_recall'].shape, data = results_xgb['test_recall'])
# f1-score
dset = wf.create_dataset('f1_SVM', results_SVM['test_f1_score'].shape, data = results_SVM['test_f1_score'])
dset = wf.create_dataset('f1_LR' , results_LR['test_f1_score'].shape, data = results_LR['test_f1_score'])
dset = wf.create_dataset('f1_RF' , results_RF['test_f1_score'].shape, data = results_RF['test_f1_score'])
dset = wf.create_dataset('f1_xgb', results_xgb['test_f1_score'].shape, data = results_xgb['test_f1_score'])
#%% Extracting features from more than one channel:
tic = time.time()
########### Central electrodes #############
main_path = "D:/1D_TimeSeries/raw_EEG/without artefact/train_test/"
save_path = 'P:/3013080.02/ml_project/scripts/1D_TimeSeries/features/'
fname_C_N3 = (main_path+"tr90_N3_C3-M2_C4-M1.h5")
fname_C_REM = (main_path+"tr90_REM_C3-M2_C4-M1.h5")
ch_C4 = 'C4-M1'
ch_C3 = 'C3-M2'
Object_C3_REM = ML_Depression(filename=fname_C_REM, channel = ch_C3, fs = 200, T = 30)
X_C3_REM,y_C3_REM = Object_C3_REM.FeatureExtraction()
Object_C3_REM.SaveFeatureSet(X = X_C3_REM, y=y_C3_REM, path = save_path, filename = 'feat42_C3_REM')
Object_C4_REM = ML_Depression(filename=fname_C_REM, channel = ch_C4, fs = 200, T = 30)
X_C4_REM,y_C4_REM = Object_C4_REM.FeatureExtraction()
Object_C4_REM.SaveFeatureSet(X = X_C4_REM, y=y_C4_REM, path = save_path, filename = 'feat42_C4_REM')
Object_C3_N3 = ML_Depression(filename=fname_C_N3, channel = ch_C3, fs = 200, T = 30)
X_C3_N3,y_C3_N3 = Object_C3_N3.FeatureExtraction()
Object_C3_N3.SaveFeatureSet(X = X_C3_N3, y=y_C3_N3, path = save_path, filename = 'feat42_C3_N3')
Object_C4_N3 = ML_Depression(filename=fname_C_N3, channel = ch_C4, fs = 200, T = 30)
X_C4_N3,y_C4_N3 = Object_C4_N3.FeatureExtraction()
Object_C4_N3.SaveFeatureSet(X = X_C4_N3, y=y_C4_N3, path = save_path, filename = 'feat42_C4_N3')
########### Occipital electrodes #############
main_path = "D:/1D_TimeSeries/raw_EEG/without artefact/train_test/"
fname_O_N3 = (main_path+"tr90_N3_O1-M2_O2-M1.h5")
fname_O_REM = (main_path+"tr90_REM_O1-M2_O2-M1.h5")
ch_O2 = 'O2-M1'
ch_O1 = 'O1-M2'
Object_O1_REM = ML_Depression(filename=fname_O_REM, channel = ch_O1, fs = 200, T = 30)
X_O1_REM,y_O1_REM = Object_O1_REM.FeatureExtraction()
Object_O1_REM.SaveFeatureSet(X = X_O1_REM, y=y_O1_REM, path = save_path, filename = 'feat42_O1_REM')
Object_O2_REM = ML_Depression(filename=fname_O_REM, channel = ch_O2, fs = 200, T = 30)
X_O2_REM,y_O2_REM = Object_O2_REM.FeatureExtraction()
Object_O2_REM.SaveFeatureSet(X = X_O2_REM, y=y_O2_REM, path = save_path, filename = 'feat42_O2_REM')
Object_O1_N3 = ML_Depression(filename=fname_O_N3, channel = ch_O1, fs = 200, T = 30)
X_O1_N3,y_O1_N3 = Object_O1_N3.FeatureExtraction()
Object_O1_N3.SaveFeatureSet(X = X_O1_N3, y=y_O1_N3, path = save_path, filename = 'feat42_O1_N3')
Object_O2_N3 = ML_Depression(filename=fname_O_N3, channel = ch_O2, fs = 200, T = 30)
X_O2_N3,y_O2_N3 = Object_O2_N3.FeatureExtraction()
Object_O2_N3.SaveFeatureSet(X = X_O2_N3, y=y_O2_N3, path = save_path, filename = 'feat42_O2_N3')
########### Fp electrodes #############
main_path = "D:/1D_TimeSeries/raw_EEG/without artefact/train_test/"
fname_fp_N3 = (main_path+"tr90_N3_fp1-M2_fp2-M1.h5")
fname_fp_REM = (main_path+"tr90_REM_fp1-M2_fp2-M1.h5")
ch_fp2 = 'fp2-M1'
ch_fp1 = 'fp1-M2'
Object_fp1_REM = ML_Depression(filename=fname_fp_REM, channel = ch_fp1, fs = 200, T = 30)
X_fp1_REM,y_fp1_REM = Object_fp1_REM.FeatureExtraction()
Object_fp1_REM.SaveFeatureSet(X = X_fp1_REM, y=y_fp1_REM, path = save_path, filename = 'feat42_fp1_REM')
Object_fp2_REM = ML_Depression(filename=fname_fp_REM, channel = ch_fp2, fs = 200, T = 30)
X_fp2_REM,y_fp2_REM = Object_fp2_REM.FeatureExtraction()
Object_fp2_REM.SaveFeatureSet(X = X_fp2_REM, y=y_fp2_REM, path = save_path, filename = 'feat42_fp2_REM')
Object_fp1_N3 = ML_Depression(filename=fname_fp_N3, channel = ch_fp1, fs = 200, T = 30)
X_fp1_N3,y_fp1_N3 = Object_fp1_N3.FeatureExtraction()
Object_fp1_N3.SaveFeatureSet(X = X_fp1_N3, y=y_fp1_N3, path = save_path, filename = 'feat42_fp1_N3')
Object_fp2_N3 = ML_Depression(filename=fname_fp_N3, channel = ch_fp2, fs = 200, T = 30)
X_fp2_N3,y_fp2_N3 = Object_fp2_N3.FeatureExtraction()
Object_fp2_N3.SaveFeatureSet(X = X_fp2_N3, y=y_fp2_N3, path = save_path, filename = 'feat42_fp2_N3')
toc = time.time()
print(f'time taken: {toc - tic}')
########## Concatenate all features #########
# RIGHT hemisphere - REM
X_rh_REM = np.column_stack((X_fp2_REM,X_C4_REM))
X_rh_REM = np.column_stack((X_rh_REM,X_O2_REM))
# RIGHT hemisphere - N3
X_rh_N3 = np.column_stack((X_fp2_N3,X_C4_N3))
X_rh_N3 = np.column_stack((X_rh_N3,X_O2_N3))
# LEFT hemisphere - REM
X_lh_REM = np.column_stack((X_fp1_REM,X_C3_REM))
X_lh_REM = np.column_stack((X_lh_REM,X_O1_REM))
# LEFT hemisphere - N3
X_lh_N3 = np.column_stack((X_fp1_N3,X_C3_N3))
X_lh_N3 = np.column_stack((X_lh_N3,X_O1_N3))
# Both sides - REM
X_REM = np.column_stack((X_rh_REM, X_lh_REM))
# Both sides - N3
X_N3 = np.column_stack((X_rh_N3, X_lh_N3))
# Combine SWS and REM
X_SWS_REM = np.row_stack((X_N3, X_REM))
y_SWS_REM = np.concatenate((y_fp2_N3, y_fp2_REM))
# SAVE ALL COMBINATIONS
Object = ML_Depression(filename='', channel='', fs = 200, T = 30)
# one hemisphere
Object.SaveFeatureSet(X = X_rh_REM, y=y_fp2_REM, path = save_path, filename = 'feat42_rh_REM')
Object.SaveFeatureSet(X = X_lh_REM, y=y_fp2_REM, path = save_path, filename = 'feat42_lh_REM')
Object.SaveFeatureSet(X = X_rh_N3 , y=y_fp2_N3 , path = save_path, filename = 'feat42_rh_N3')
Object.SaveFeatureSet(X = X_lh_N3 , y=y_fp2_N3 , path = save_path, filename = 'feat42_lh_N3')
# Both hemisphere
Object.SaveFeatureSet(X = X_N3 , y=y_fp2_N3 , path = save_path, filename = 'feat42_l&rh_N3')
Object.SaveFeatureSet(X = X_REM , y=y_fp2_N3 , path = save_path, filename = 'feat42_l&rh_REM')
# Both hemispheres- SWS &REM combination
Object.SaveFeatureSet(X = X_SWS_REM , y=y_SWS_REM , path = save_path, filename = 'feat42_l&rh_N3&REM')
#%% Load features from different brain regions, sleep stage and combine them
Object = ML_Depression(filename='', channel='', fs = 200, T = 30)
path = 'P:/3013080.02/ml_project/scripts/1D_TimeSeries/features/'
save_path = 'P:/3013080.02/ml_project/scripts/1D_TimeSeries/features/'
feats = 'featureset'
labels = 'labels'
# Pick right hemisphere N3
fname_rh_N3 = 'feat42_rh_N3'
X_rh_N3, y_rh_N3 = Object.LoadFeatureSet(path, fname_rh_N3, feats, labels)
# Pick left hemisphere N3
fname_lh_N3 = 'feat42_lh_N3'
X_lh_N3, y_lh_N3 = Object.LoadFeatureSet(path, fname_lh_N3, feats, labels)
# Pick right hemisphere REM
fname_rh_REM = 'feat42_rh_REM'
X_rh_REM, y_rh_REM = Object.LoadFeatureSet(path, fname_rh_REM, feats, labels)
# Pick LEFT hemisphere REM
fname_lh_REM = 'feat42_lh_REM'
X_lh_REM, y_lh_REM = Object.LoadFeatureSet(path, fname_lh_REM, feats, labels)
# Combine them
X_N3 = np.column_stack((X_rh_N3, X_lh_N3))
X_REM = np.column_stack((X_rh_REM, X_lh_REM))
# Save combination
Object.SaveFeatureSet(X = X_N3 , y=y_lh_N3 , path = save_path, filename = 'feat42_l&rh_N3')
Object.SaveFeatureSet(X = X_REM , y=y_lh_REM , path = save_path, filename = 'feat42_l&rh_REM')
| 2.5 | 2 |
igibson/examples/behavior/behavior_demo_collection.py | suresh-guttikonda/iGibson | 0 | 3758 | <reponame>suresh-guttikonda/iGibson
"""
Main BEHAVIOR demo collection entrypoint
"""
import argparse
import copy
import datetime
import os
import bddl
import numpy as np
import igibson
from igibson.activity.activity_base import iGBEHAVIORActivityInstance
from igibson.render.mesh_renderer.mesh_renderer_cpu import MeshRendererSettings
from igibson.render.mesh_renderer.mesh_renderer_vr import VrConditionSwitcher, VrSettings
from igibson.simulator import Simulator
from igibson.utils.ig_logging import IGLogWriter
POST_TASK_STEPS = 200
PHYSICS_WARMING_STEPS = 200
def parse_args():
scene_choices = [
"Beechwood_0_int",
"Beechwood_1_int",
"Benevolence_0_int",
"Benevolence_1_int",
"Benevolence_2_int",
"Ihlen_0_int",
"Ihlen_1_int",
"Merom_0_int",
"Merom_1_int",
"Pomaria_0_int",
"Pomaria_1_int",
"Pomaria_2_int",
"Rs_int",
"Wainscott_0_int",
"Wainscott_1_int",
]
task_id_choices = [0, 1]
parser = argparse.ArgumentParser(description="Run and collect an ATUS demo")
parser.add_argument(
"--task", type=str, required=True, nargs="?", help="Name of ATUS activity matching parent folder in bddl."
)
parser.add_argument(
"--task_id",
type=int,
required=True,
choices=task_id_choices,
nargs="?",
help="BDDL integer ID, matching suffix of bddl.",
)
parser.add_argument("--vr_log_path", type=str, help="Path (and filename) of vr log")
parser.add_argument(
"--scene", type=str, choices=scene_choices, nargs="?", help="Scene name/ID matching iGibson interactive scenes."
)
parser.add_argument("--disable_save", action="store_true", help="Whether to disable saving logfiles.")
parser.add_argument(
"--disable_scene_cache", action="store_true", help="Whether to disable using pre-initialized scene caches."
)
parser.add_argument("--profile", action="store_true", help="Whether to print profiling data.")
parser.add_argument(
"--no_vr", action="store_true", help="Whether to turn off VR recording and save random actions."
)
parser.add_argument("--max_steps", type=int, default=-1, help="Maximum number of steps to record before stopping.")
return parser.parse_args()
def main():
args = parse_args()
bddl.set_backend("iGibson")
collect_demo(
args.task,
args.task_id,
args.scene,
args.vr_log_path,
args.disable_save,
args.max_steps,
args.no_vr,
args.disable_scene_cache,
args.profile,
)
def collect_demo(
task,
task_id,
scene,
vr_log_path=None,
disable_save=False,
max_steps=-1,
no_vr=False,
disable_scene_cache=False,
profile=False,
):
# HDR files for PBR rendering
hdr_texture = os.path.join(igibson.ig_dataset_path, "scenes", "background", "probe_02.hdr")
hdr_texture2 = os.path.join(igibson.ig_dataset_path, "scenes", "background", "probe_03.hdr")
light_modulation_map_filename = os.path.join(
igibson.ig_dataset_path, "scenes", "Rs_int", "layout", "floor_lighttype_0.png"
)
background_texture = os.path.join(igibson.ig_dataset_path, "scenes", "background", "urban_street_01.jpg")
# VR rendering settings
vr_rendering_settings = MeshRendererSettings(
optimized=True,
fullscreen=False,
env_texture_filename=hdr_texture,
env_texture_filename2=hdr_texture2,
env_texture_filename3=background_texture,
light_modulation_map_filename=light_modulation_map_filename,
enable_shadow=True,
enable_pbr=True,
msaa=False,
light_dimming_factor=1.0,
)
# VR system settings
mode = "headless" if no_vr else "vr"
s = Simulator(
mode=mode,
rendering_settings=vr_rendering_settings,
vr_settings=VrSettings(use_vr=True),
physics_timestep=1 / 300.0,
render_timestep=1 / 30.0,
)
igbhvr_act_inst = iGBEHAVIORActivityInstance(task, task_id)
scene_kwargs = None
online_sampling = True
if not disable_scene_cache:
scene_kwargs = {
"urdf_file": "{}_task_{}_{}_0_fixed_furniture".format(scene, task, task_id),
}
online_sampling = False
igbhvr_act_inst.initialize_simulator(
simulator=s, scene_id=scene, scene_kwargs=scene_kwargs, load_clutter=True, online_sampling=online_sampling
)
vr_agent = igbhvr_act_inst.simulator.robots[0]
if not no_vr:
vr_cs = VrConditionSwitcher(
igbhvr_act_inst.simulator, igbhvr_act_inst.show_instruction, igbhvr_act_inst.iterate_instruction
)
log_writer = None
if not disable_save:
timestamp = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
if vr_log_path is None:
vr_log_path = "{}_{}_{}_{}.hdf5".format(task, task_id, scene, timestamp)
log_writer = IGLogWriter(
s,
log_filepath=vr_log_path,
task=igbhvr_act_inst,
store_vr=False if no_vr else True,
vr_robot=vr_agent,
profiling_mode=profile,
filter_objects=True,
)
log_writer.set_up_data_storage()
satisfied_predicates_cached = {}
post_task_steps = copy.deepcopy(POST_TASK_STEPS)
physics_warming_steps = copy.deepcopy(PHYSICS_WARMING_STEPS)
steps = 0
while max_steps < 0 or steps < max_steps:
igbhvr_act_inst.simulator.step(print_stats=profile)
task_done, satisfied_predicates = igbhvr_act_inst.check_success()
if no_vr:
if steps < 2:
action = np.zeros((28,))
action[19] = 1
action[27] = 1
else:
action = np.random.uniform(-0.01, 0.01, size=(28,))
else:
action = igbhvr_act_inst.simulator.gen_vr_robot_action()
if steps < physics_warming_steps:
action = np.zeros_like(action)
vr_agent.update(action)
if not no_vr:
if satisfied_predicates != satisfied_predicates_cached:
vr_cs.refresh_condition(switch=False)
satisfied_predicates_cached = satisfied_predicates
if igbhvr_act_inst.simulator.query_vr_event("right_controller", "overlay_toggle"):
vr_cs.refresh_condition()
if igbhvr_act_inst.simulator.query_vr_event("left_controller", "overlay_toggle"):
vr_cs.toggle_show_state()
if log_writer and not disable_save:
log_writer.process_frame()
if task_done:
post_task_steps -= 1
if post_task_steps == 0:
break
steps += 1
if log_writer and not disable_save:
log_writer.end_log_session()
s.disconnect()
if __name__ == "__main__":
main()
| 2 | 2 |
wagtail/wagtailadmin/menu.py | digitalmarmalade/wagtail | 1 | 3759 | <gh_stars>1-10
from django.utils.text import slugify
from django.utils.html import format_html
class MenuItem(object):
def __init__(self, label, url, name=None, classnames='', order=1000):
self.label = label
self.url = url
self.classnames = classnames
self.name = (name or slugify(unicode(label)))
self.order = order
def render_html(self):
return format_html(
u"""<li class="menu-{0}"><a href="{1}" class="{2}">{3}</a></li>""",
self.name, self.url, self.classnames, self.label)
| 2.375 | 2 |
django_mfa/migrations/0001_initial.py | timgates42/django-mfa | 0 | 3760 | # Generated by Django 2.1.5 on 2019-03-26 11:35
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='U2FKey',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('last_used_at', models.DateTimeField(null=True)),
('public_key', models.TextField(unique=True)),
('key_handle', models.TextField()),
('app_id', models.TextField()),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='u2f_keys', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='UserOTP',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('otp_type', models.CharField(choices=[('HOTP', 'hotp'), ('TOTP', 'totp')], max_length=20)),
('secret_key', models.CharField(blank=True, max_length=100)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.PROTECT, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='UserRecoveryCodes',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('secret_code', models.CharField(max_length=10)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='django_mfa.UserOTP')),
],
),
]
| 1.851563 | 2 |
app/logger_example/main.py | khanh-nguyen-code/my-collection | 0 | 3761 | from my_collection import logger
if __name__ == "__main__":
logger.now().debug("debug1")
logger.now().debug("debug2")
logger.now().info("hello1")
logger.now().info("hello2")
logger.now().with_field("key", "val").error("with field1")
logger.now().with_field("key", "val").error("with field2")
| 2.28125 | 2 |
robotframework_iperf3/__main__.py | scathaig/robotframework-iperf3 | 0 | 3762 | <gh_stars>0
import argparse
from robotremoteserver import RobotRemoteServer
from .iperf3 import Iperf3
if __name__ == '__main__':
# create commandline parser
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.prog = 'python3 -m robotframework_iperf3'
# add parser options
parser.add_argument(
"-a",
"--address",
type=str,
help="server listen address",
default='0.0.0.0')
parser.add_argument(
"-p",
"--port",
type=int,
help="server listen port",
default=8270)
args = parser.parse_args()
server = RobotRemoteServer(
Iperf3(),
host=args.address,
port=args.port
)
server.serve()
| 2.59375 | 3 |
h2o-docs/src/booklets/v2_2015/source/Python_Vignette_code_examples/python_combine_frames_append_one_as_columns.py | ahmedengu/h2o-3 | 6,098 | 3763 | <filename>h2o-docs/src/booklets/v2_2015/source/Python_Vignette_code_examples/python_combine_frames_append_one_as_columns.py
df8.cbind(df9)
# A B C D A0 B0 C0 D0
# ----- ------ ------ ------ ------ ----- ----- -----
# -0.09 0.944 0.160 0.271 -0.351 1.66 -2.32 -0.86
# -0.95 0.669 0.664 1.535 -0.633 -1.78 0.32 1.27
# 0.17 0.657 0.970 -0.419 -1.413 -0.51 0.64 -1.25
# 0.58 -0.516 -1.598 -1.346 0.711 1.09 0.05 0.63
# 1.04 -0.281 -0.411 0.959 -0.009 -0.47 0.41 -0.52
# 0.49 0.170 0.124 -0.170 -0.722 -0.79 -0.91 -2.09
# 1.42 -0.409 -0.525 2.155 -0.841 -0.19 0.13 0.63
# 0.94 1.192 -1.075 0.017 0.167 0.54 0.52 1.42
# -0.53 0.777 -1.090 -2.237 -0.693 0.24 -0.56 1.45
# 0.34 -0.456 -1.220 -0.456 -0.315 1.10 1.38 -0.05
#
# [100 rows x 8 columns] | 3.03125 | 3 |
FluentPython/dynamic_attr_and_prop/frozen_json.py | xu6148152/Binea_Python_Project | 0 | 3764 | <gh_stars>0
#!/usr/bin/env python3
# -*- encoding: utf-8 -*-
from collections import abc
from keyword import iskeyword
class FronzenJSON:
def __init__(self, mapping):
self._data = {}
for key, value in mapping.items():
if iskeyword(key):
key += '_'
# self._data = dict(mapping)
self._data[key] = value
def __getattr__(self, name):
if hasattr(self._data, name):
return getattr(self._data, name)
else:
# return FronzenJSON.build(self._data[name])
return FronzenJSON(self._data[name])
@classmethod
def build(cls, obj):
if isinstance(obj, abc.Mapping):
return cls(obj)
elif isinstance(obj, abc.MutableMapping):
return [cls.build(item) for item in obj]
else:
return obj
def __new__(cls, arg):
if isinstance(arg, abc.Mapping):
return super().__new__(cls)
elif isinstance(arg, abc.MutableSequence):
return [cls[item] for item in arg]
else:
return arg
| 2.8125 | 3 |
pomdp_problems/tag/models/transition_model.py | Semanti1/pomdp_findit | 0 | 3765 | <reponame>Semanti1/pomdp_findit
"""The Tag problem. Implemented according to the paper `Anytime Point-Based
Approximations for Large POMDPs <https://arxiv.org/pdf/1110.0027.pdf>`_.
Transition model: the robot moves deterministically. The target's movement
depends on the robot; With Pr=0.8 the target moves away from the robot,
and with Pr=0.2, the target stays at the same place. The target never
moves closer to the robot.
"""
import copy
import pomdp_py
import pomdp_problems.util as util
import pomdp_problems.tag.constants as constants
from pomdp_problems.tag.domain.action import *
class TagTransitionModel(pomdp_py.TransitionModel):
def __init__(self,
grid_map,
target_motion_policy):
self._grid_map = grid_map
self.target_motion_policy = target_motion_policy
@classmethod
def if_move_by(cls, grid_map, position, action):
if isinstance(action, MotionAction):
dx, dy = action.motion
next_position = (position[0] + dx,
position[1] + dy)
if grid_map.valid_pose(next_position):
return next_position
return position
def probability(self, next_state, state, action, **kwargs):
# Robot motion
expected_robot_position = TagTransitionModel.if_move_by(self._grid_map,
state.robot_position,
action)
if expected_robot_position != next_state.robot_position:
return constants.EPSILON
if isinstance(action, TagAction):
if next_state.target_position == next_state.robot_position:
if next_state.target_found:
return 1.0 - constants.EPSILON
else:
return constants.EPSILON
else:
if next_state.target_found:
return constants.EPSILON
else:
return 1.0 - constants.EPSILON
# Target motion
valid_target_motion_actions = self._grid_map.valid_motions(state.target_position)
return self.target_motion_policy.probability(next_state.target_position,
state.target_position,
state.robot_position,
valid_target_motion_actions)
def sample(self, state, action, argmax=False):
# Robot motion
next_state = copy.deepcopy(state)
next_state.robot_position = TagTransitionModel.if_move_by(self._grid_map,
state.robot_position,
action)
# If Tag action
if isinstance(action, TagAction):
if not state.target_found:
if state.robot_position == state.target_position:
next_state.target_found = True
return next_state
# Target motion
valid_target_motion_actions = self._grid_map.valid_motions(state.target_position)
if not argmax:
next_state.target_position = self.target_motion_policy.random(state.robot_position,
state.target_position,
valid_target_motion_actions)
else:
next_state.target_position = self.target_motion_policy.mpe(state.robot_position,
state.target_position,
valid_target_motion_actions)
return next_state
def argmax(self, state, action, **kwargs):
return self.sample(state, action, argmax=True)
| 2.40625 | 2 |
packit/fedpkg.py | bocekm/packit | 0 | 3766 | <gh_stars>0
# MIT License
#
# Copyright (c) 2019 Red Hat, Inc.
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from pathlib import Path
from typing import Optional
from packit.exceptions import PackitCommandFailedError
from packit.utils import commands # so we can mock utils
from packit.utils.logging import logger
class FedPKG:
"""
Part of the code is from release-bot:
https://github.com/user-cont/release-bot/blob/master/release_bot/fedora.py
"""
def __init__(
self, fas_username: str = None, directory: str = None, stage: bool = False
):
self.fas_username = fas_username
self.directory = directory
self.stage = stage
self.fedpkg_exec = "fedpkg-stage" if stage else "fedpkg"
def __repr__(self):
return (
"FedPKG("
f"fas_username='{self.fas_username}', "
f"directory='{self.directory}', "
f"stage='{self.stage}')"
)
def new_sources(self, sources="", fail=True):
if not Path(self.directory).is_dir():
raise Exception("Cannot access fedpkg repository:")
return commands.run_command_remote(
cmd=[self.fedpkg_exec, "new-sources", sources],
cwd=self.directory,
error_message="Adding new sources failed:",
fail=fail,
)
def build(
self,
scratch: bool = False,
nowait: bool = False,
koji_target: Optional[str] = None,
srpm_path: Optional[Path] = None,
):
"""
build in koji
:param scratch: scratch (temporary) build or not?
:param nowait: False == wait for the build to finish
:param koji_target: koji target to build in (`koji list-targets`)
:param srpm_path: use selected SRPM for build, not dist-git repo & ref
:return:
"""
cmd = [self.fedpkg_exec, "build"]
if scratch:
cmd.append("--scratch")
if nowait:
cmd.append("--nowait")
if koji_target:
cmd += ["--target", koji_target]
if srpm_path:
cmd += ["--srpm", str(srpm_path)]
try:
commands.run_command_remote(
cmd=cmd,
cwd=self.directory,
error_message="Submission of build to koji failed.",
fail=True,
)
except PackitCommandFailedError as ex:
# fail on the fedpkg side, the build is triggered
if (
"watch_tasks() got an unexpected keyword argument 'ki_handler'"
in ex.stderr_output
):
logger.info(
"The 'fedpkg build' command crashed which is a known issue: "
"the build is submitted in koji anyway."
)
logger.debug(ex.stdout_output)
else:
raise
def clone(self, package_name: str, target_path: str, anonymous: bool = False):
"""
clone a dist-git repo; this has to be done in current env
b/c we don't have the keytab in sandbox
"""
cmd = [self.fedpkg_exec]
if self.fas_username:
cmd += ["--user", self.fas_username]
cmd += ["-q", "clone"]
if anonymous:
cmd += ["-a"]
cmd += [package_name, target_path]
error_msg = (
f"Packit failed to clone the repository {package_name}; "
"please make sure that you are authorized to clone repositories "
"from Fedora dist-git - this may require SSH keys set up or "
"Kerberos ticket being active."
)
commands.run_command(cmd=cmd, error_message=error_msg)
| 1.742188 | 2 |
tests/test_MaskedArrayCollection.py | ahaldane/NDducktype_tests | 3 | 3767 | #!/usr/bin/env python
from ndarray_ducktypes.ArrayCollection import ArrayCollection
from ndarray_ducktypes.MaskedArray import MaskedArray
from ndarray_ducktypes.MaskedArrayCollection import MaskedArrayCollection
import numpy as np
# Tests for Masked ArrayCollections.
#
# First try: Simply make an arraycollection of MaskedArrays. Downside: this
# strategy does not give a "filled" method. Probably to get a masked
# ArrayCollection we should really subclass ArrayCollection to have a
# fill_value and a filled() method
#a = MaskedArray(np.arange(10), np.arange(10)%3)
#b = MaskedArray(np.arange(10.) + 13, np.arange(10)%2)
#c = ArrayCollection([('age', a), ('weight', b)])
#print(repr(c))
#c['age'] += 100
#print(repr(c))
## second try: Subclass of ArrayCollection
#c = MaskedArrayCollection([('age', a), ('weight', b)])
#print(repr(c))
#c['age'] += 100
#print(repr(c))
#print(repr(c.filled()))
| 3.09375 | 3 |
src/models/text_node.py | moevm/nosql1h19-text-graph | 0 | 3768 | <gh_stars>0
from neomodel import StructuredNode, StringProperty, JSONProperty, \
Relationship, IntegerProperty
import numpy as np
import re
from models.text_relation import TextRelation
__all__ = ['TextNode']
class TextNode(StructuredNode):
order_id = IntegerProperty(required=True, unique_index=True)
label = StringProperty(required=True)
text = StringProperty(required=True)
alg_results = JSONProperty()
link = Relationship('TextNode', 'ALG', model=TextRelation)
def short(self):
res = ''.join([word.strip() + ' '
for word in re.split(r'[\n ]', self.text, 5)[:5]])
return res
def describe(self):
return f"""
<h1>Фрагмент: {self.order_id} </h1>
<table border="1" width=100%>
<caption>
Информация о вершине
</caption>
<tr>
<th>Количество символов</th>
<td>{self.character_num()}</td>
</tr>
<tr>
<th>Количество слов</th>
<td>{self.words_num()}</td>
</tr>
<tr>
<th>Количество предложений</th>
<td>{self.sentences_num()}</td>
</tr>
<tr>
<th>Количество связей</th>
<td>{len(self.link)}</td>
</tr>
</table>
"""
def preview(self, frag_num=0):
leading = 3
if frag_num > 0:
leading = int(np.floor(np.log10(frag_num))) + 1
if str(self.order_id) != str(self.label):
return f"{str(self.order_id).zfill(leading)}: " \
+ f"[{self.label}] {self.short()}..."
else:
return f"{str(self.order_id).zfill(leading)}: " \
+ f"[{self.label}] {self.short()}..."
return f"[{self.label}] {self.short()}..."
def words_num(self):
return len(self.text.split())
def character_num(self):
return len(self.text)
def sentences_num(self):
return len([s for s in self.text.split('.') if len(s) > 2])
| 2.390625 | 2 |
tests/test_bishop_generate.py | otaviocarvalho/chess-negamax | 6 | 3769 | import unittest
from .helpers import StubBoard, StubPiece, C, WHITE, BLACK
class TestBishopGenerate(unittest.TestCase):
def get_bishop(self, board, team, position):
from chess.models import Bishop
return Bishop(board, team, position)
def compare_list(self, expected, results):
compared = []
for e in expected:
for r in results:
if e[0] == r[0] and e[1] == r[1]:
compared.append(True)
break
else:
compared.append(False)
return compared
def test_generate_topright(self):
board = StubBoard()
board[C('h7')] = StubPiece(board, BLACK, C('h7'))
bishop = self.get_bishop(board, WHITE, C('e4'))
results = bishop.generate()
expected = [C('f5'), C('g6'), C('h7')]
correct = self.compare_list(expected, results)
self.assertTrue(all(correct))
def test_generate_topleft(self):
board = StubBoard()
board[C('c6')] = StubPiece(board, WHITE, C('c6'))
bishop = self.get_bishop(board, WHITE, C('e4'))
results = bishop.generate()
expected = [C('d5')]
correct = self.compare_list(expected, results)
self.assertTrue(all(correct))
expected = [C('c6')]
correct = self.compare_list(expected, results)
self.assertFalse(any(correct))
def test_generate_bottomleft(self):
board = StubBoard()
board[C('c2')] = StubPiece(board, BLACK, C('c2'))
bishop = self.get_bishop(board, WHITE, C('e4'))
results = bishop.generate()
expected = [C('d3'), C('c2')]
correct = self.compare_list(expected, results)
self.assertTrue(all(correct))
expected = [C('b1')]
correct = self.compare_list(expected, results)
self.assertFalse(any(correct))
def test_generate_bottomright(self):
board = StubBoard()
bishop = self.get_bishop(board, WHITE, C('e4'))
results = bishop.generate()
expected = [C('f3'), C('g2'), C('h1')]
correct = self.compare_list(expected, results)
self.assertTrue(all(correct))
def test_generate_amount(self):
board = StubBoard()
bishop = self.get_bishop(board, WHITE, C('e4'))
results = bishop.generate()
self.assertEqual(len(results), 13)
board = StubBoard()
board[C('c6')] = StubPiece(board, WHITE, C('c6'))
bishop = self.get_bishop(board, WHITE, C('e4'))
results = bishop.generate()
self.assertEqual(len(results), 10)
if __name__ == '__main__':
unittest.main() | 3 | 3 |
lib/fmdplugins/list_records.py | GonzaloAlvarez/py-ga-sysadmin | 2 | 3770 | from lib.fmd.namedentity import NamedEntity
from lib.fmd.decorators import Action, ListStage, GetStage
from lib.exceptions.workflow import EntryException
@Action(ListStage.DATAGATHERING)
def list_records(context, output):
output = []
if hasattr(context, 'filter'):
context.log.debug('Using filter [%s]' % context.filter)
entries = context.ddb.list(context.filter)
else:
entries = context.ddb.list()
return NamedEntity('records', entries)
| 2.125 | 2 |
pysoa/server/action/switched.py | zetahernandez/pysoa | 0 | 3771 | <gh_stars>0
from __future__ import (
absolute_import,
unicode_literals,
)
import abc
import six
from pysoa.server.internal.types import is_switch
__all__ = (
'SwitchedAction',
)
def _len(item):
# Safe length that won't raise an error on values that don't support length
return getattr(item, '__len__', lambda *_: -1)()
class _DefaultAction(object):
def __int__(self):
d = id(self)
return d if d < 0 else -d
def __eq__(self, other):
return getattr(other, '__class__', None) == _DefaultAction
class _SwitchedActionMetaClass(abc.ABCMeta):
def __new__(mcs, name, bases, body):
"""
Validate the switch_to_action_map when the class is created, instead of doing it every time the class
is instantiated. This identifies problems earlier (on import) and improves performance by not performing this
validation every time the action is called.
"""
cls = super(_SwitchedActionMetaClass, mcs).__new__(mcs, name, bases, body)
# noinspection PyUnresolvedReferences
if bases[0] is not object and (
not cls.switch_to_action_map or
not hasattr(cls.switch_to_action_map, '__iter__') or
_len(cls.switch_to_action_map) < 2 or
any(
True for i in cls.switch_to_action_map
if not hasattr(i, '__getitem__') or _len(i) != 2 or not is_switch(i[0]) or not callable(i[1])
)
):
raise ValueError(
'Class attribute switch_to_action_map must be an iterable of at least two indexable items, each '
'with exactly two indexes, where the first element is a switch and the second element is an action '
'(callable).'
)
return cls
@six.add_metaclass(_SwitchedActionMetaClass)
class SwitchedAction(object):
"""
A specialized action that defers to other, concrete actions based on request switches. Subclasses must not
override any methods and must override `switch_to_action_map`. `switch_to_action_map` should be some iterable
object that provides `__len__` (such as a tuple [recommended] or list). Its items must be indexable objects that
provide `__len__` (such as a tuple [recommended] or list) and have exactly two elements.
For each item in `switch_to_action_map`, the first element must be a switch that provides `__int__` (such as an
actual integer) or a switch that provides an attribute `value` which, itself, provides `__int__` (or is an int).
The second element must be an action, such as an action class (e.g. one that extends `Action`) or any callable
that accepts a server settings object and returns a new callable that, itself, accepts an `ActionRequest` object
and returns an `ActionResponse` object or raises an `ActionError`.
`switch_to_action_map` must have at least two items in it. `SwitchedAction` will iterate over that list, checking
the first element (switch) of each item to see if it is enabled in the request. If it is, the second element (the
action) of that item will be deferred to. If it finds no items whose switches are enabled, it will use the very
last action in `switch_to_action_map`. As such, you can treat the last item as a default, and its switch could
simply be `SwitchedAction.DEFAULT_ACTION` (although, this is not required: it could also be a valid switch, and
it would still be treated as the default in the case that no other items matched).
Example usage:
.. code-block:: python
class UserActionV1(Action):
...
class UserActionV2(Action):
...
class UserTransitionAction(SwitchedAction):
switch_to_action_map = (
(USER_VERSION_2_ENABLED, UserActionV2),
(SwitchedAction.DEFAULT_ACTION, UserActionV1),
)
"""
DEFAULT_ACTION = _DefaultAction()
switch_to_action_map = ()
def __init__(self, settings=None):
"""
Construct a new action. Concrete classes should not override this.
:param settings: The server settings object
:type settings: dict
"""
if self.__class__ is SwitchedAction:
raise TypeError('Cannot instantiate abstract SwitchedAction')
self.settings = settings
def get_uninitialized_action(self, action_request):
"""
Get the raw action (such as the action class or the base action callable) without instantiating/calling
it, based on the switches in the action request, or the default raw action if no switches were present or
no switches matched.
:param action_request: The request object
:type action_request: EnrichedActionRequest
:return: The action
:rtype: callable
"""
last_action = None
matched_action = None
default_action = None
for switch, action in self.switch_to_action_map:
if switch == self.DEFAULT_ACTION:
default_action = action
elif switch and action_request.switches.is_active(switch):
matched_action = action
break
else:
last_action = action
return matched_action or default_action or last_action
def __call__(self, action_request):
"""
Main entry point for actions from the `Server` (or potentially from tests). Finds the appropriate real action
to invoke based on the switches enabled in the request, initializes the action with the server settings, and
then calls the action with the request object, returning its response directly.
:param action_request: The request object
:type action_request: EnrichedActionRequest
:return: The response object
:rtype: ActionResponse
:raise: ActionError, ResponseValidationError
"""
return self.get_uninitialized_action(action_request)(self.settings)(action_request)
| 2.53125 | 3 |
Seeder/settings/tests.py | WebarchivCZ/Seeder | 8 | 3772 | from .base import *
SECRET_KEY = 'test'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = ['127.0.0.1']
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'sqlite3.db',
'USER': '',
'PASSWORD': '',
'HOST': '',
},
}
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
HAYSTACK_CONNECTIONS = {
'default': {
'ENGINE': 'haystack.backends.simple_backend.SimpleEngine',
},
} | 1.46875 | 1 |
blobStore.py | odeke-em/resty | 0 | 3773 | #!/usr/bin/env python3
# Author: <NAME> <<EMAIL>>
# This example steps you through using resty & restAssured to save pickled/serialized
# data as a blob and then later re-using it in after deserialization.
# Sample usage might be in collaborative computing ie publish results from an expensive
# computation on one machine so that other machines can load it as live data.
def testSerializer():
import Serializer
bs = Serializer.BinarySerializer()
js = Serializer.JSONSerializer()
data = dict((i, i) for i in range(10))
bserial = bs.serialize(data)
jserial = js.serialize(data)
bdserial = bs.deserialize(bserial)
jdserial = js.deserialize(jserial)
print('bdserial', bdserial)
ioS = bs.ioStream(bserial)
ioR = ioS.read()
print('ioS data from the stream', ioR)
def testCloudPassagePickledVersion():
from entrails.cloudPassage import CloudPassageHandler
cc = CloudPassageHandler()
data = dict((i, i*10) for i in range(9))
title = 'Dict of items 0-8999, keys i*10'
res = cc.push(data, title=title, asPickle=True)
pulledObj = cc.pull(metaData='pickle')
print('PulledObj', pulledObj, data)
assert(pulledObj == data)
rmTry = cc.removeTrace(data, asPickle=True)
print(rmTry)
def testCloudPassageJSONVersion():
from entrails.cloudPassage import CloudPassageHandler
cc = CloudPassageHandler()
data = dict((str(i), i*10) for i in range(9))
title = 'Dict of items 0-8999, keys i*10'
res = cc.push(data, title=title, asPickle=False)
pulledObj = cc.pull(metaData='json')
print('PulledObj', pulledObj, data)
assert(pulledObj == data)
rmTry = cc.removeTrace(data)
print(rmTry)
def main():
testSerializer()
testCloudPassageJSONVersion()
testCloudPassagePickledVersion()
if __name__ == '__main__':
main()
| 2.96875 | 3 |
venv/Lib/site-packages/dataframe/_dataframe_column_set.py | kavanAdeshara/Expense_Tracker | 0 | 3774 | # dataframe: a data-frame implementation using method piping
#
# Copyright (C) 2016 <NAME>
#
# This file is part of dataframe.
#
# dataframe is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# dataframe is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with dataframe. If not, see <http://www.gnu.org/licenses/>.
#
#
# @author = '<NAME>'
# @email = '<EMAIL>'
from itertools import chain
import tabulate
from ._dataframe_column import DataFrameColumn
from ._dataframe_row import DataFrameRow
class DataFrameColumnSet:
def __init__(self, **kwargs):
self.__data_columns = []
self.__nrow = -1
self.cbind(**kwargs)
def __getitem__(self, item):
if isinstance(item, int):
return self.__data_columns[item]
raise ValueError("Item should be integer!")
def __iter__(self):
for col in self.__data_columns:
yield col
def __str__(self):
stri = "\nA dataframe"
ta = []
for col in self.__data_columns:
vals = col.values
if len(vals) > 10:
vals = list(chain(vals[:3], "...", vals[-3:]))
ta.append(vals)
ta = tabulate.tabulate(zip(*ta), headers=self.colnames)
return stri + "\n\n" + ta.__str__()
@property
def nrow(self):
return self.__nrow
@property
def ncol(self):
return len(self.colnames)
@property
def colnames(self):
return [x.colname for x in self.__data_columns]
def rows(self, idxs):
return [self.row(i) for i in idxs]
def row(self, idx):
"""
Returns DataFrameRow of the DataFrame given its index.
:param idx: the index of the row in the DataFrame.
:return: returns a DataFrameRow
"""
return DataFrameRow(idx, [x[idx] for x in self], self.colnames)
def which_colnames(self, *args):
idx = []
for i in range(len(self.__data_columns)):
if self.colnames[i] in args:
idx.append(i)
return idx
def cbind(self, **columns):
keys = sorted([x for x in columns.keys()])
for k in keys:
self.__cbind(DataFrameColumn(str(k), columns.get(k)))
def __cbind(self, column):
if column.colname in self.colnames:
ValueError("Appending duplicate col-name!")
self.__data_columns.append(column)
self.__nrow = self.__data_columns[-1].size()
for col in self.__data_columns:
if col.size() != self.__nrow:
raise ValueError("Columns do not have equal lengths!")
| 3.359375 | 3 |
java/image.bzl | Springworks/rules_docker | 0 | 3775 | # Copyright 2017 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A rule for creating a Java container image.
The signature of java_image is compatible with java_binary.
The signature of war_image is compatible with java_library.
"""
load(
"//container:container.bzl",
"container_pull",
_repositories = "repositories",
)
# Load the resolved digests.
load(
":java.bzl",
_JAVA_DIGESTS = "DIGESTS",
)
load(
":jetty.bzl",
_JETTY_DIGESTS = "DIGESTS",
)
def repositories():
# Call the core "repositories" function to reduce boilerplate.
# This is idempotent if folks call it themselves.
_repositories()
excludes = native.existing_rules().keys()
if "java_image_base" not in excludes:
container_pull(
name = "java_image_base",
registry = "gcr.io",
repository = "distroless/java",
digest = _JAVA_DIGESTS["latest"],
)
if "java_debug_image_base" not in excludes:
container_pull(
name = "java_debug_image_base",
registry = "gcr.io",
repository = "distroless/java",
digest = _JAVA_DIGESTS["debug"],
)
if "jetty_image_base" not in excludes:
container_pull(
name = "jetty_image_base",
registry = "gcr.io",
repository = "distroless/java/jetty",
digest = _JETTY_DIGESTS["latest"],
)
if "jetty_debug_image_base" not in excludes:
container_pull(
name = "jetty_debug_image_base",
registry = "gcr.io",
repository = "distroless/java/jetty",
digest = _JETTY_DIGESTS["debug"],
)
if "servlet_api" not in excludes:
native.maven_jar(
name = "javax_servlet_api",
artifact = "javax.servlet:javax.servlet-api:3.0.1",
)
DEFAULT_JAVA_BASE = select({
"@io_bazel_rules_docker//:fastbuild": "@java_image_base//image",
"@io_bazel_rules_docker//:debug": "@java_debug_image_base//image",
"@io_bazel_rules_docker//:optimized": "@java_image_base//image",
"//conditions:default": "@java_image_base//image",
})
DEFAULT_JETTY_BASE = select({
"@io_bazel_rules_docker//:fastbuild": "@jetty_image_base//image",
"@io_bazel_rules_docker//:debug": "@jetty_debug_image_base//image",
"@io_bazel_rules_docker//:optimized": "@jetty_image_base//image",
"//conditions:default": "@jetty_image_base//image",
})
load(
"//container:container.bzl",
_container = "container",
)
def java_files(f):
files = []
if java_common.provider in f:
java_provider = f[java_common.provider]
files += list(java_provider.transitive_runtime_jars)
if hasattr(f, "files"): # a jar file
files += list(f.files)
return files
load(
"//lang:image.bzl",
"dep_layer_impl",
"layer_file_path",
)
def _jar_dep_layer_impl(ctx):
"""Appends a layer for a single dependency's runfiles."""
return dep_layer_impl(ctx, runfiles = java_files)
jar_dep_layer = rule(
attrs = dict(_container.image.attrs.items() + {
# The base image on which to overlay the dependency layers.
"base": attr.label(mandatory = True),
# The dependency whose runfiles we're appending.
"dep": attr.label(mandatory = True),
# Whether to lay out each dependency in a manner that is agnostic
# of the binary in which it is participating. This can increase
# sharing of the dependency's layer across images, but requires a
# symlink forest in the app layers.
"agnostic_dep_layout": attr.bool(default = True),
# Override the defaults.
"directory": attr.string(default = "/app"),
# https://github.com/bazelbuild/bazel/issues/2176
"data_path": attr.string(default = "."),
}.items()),
executable = True,
outputs = _container.image.outputs,
implementation = _jar_dep_layer_impl,
)
def _jar_app_layer_impl(ctx):
"""Appends the app layer with all remaining runfiles."""
available = depset()
for jar in ctx.attr.jar_layers:
available += java_files(jar)
# We compute the set of unavailable stuff by walking deps
# in the same way, adding in our binary and then subtracting
# out what it available.
unavailable = depset()
for jar in ctx.attr.deps + ctx.attr.runtime_deps:
unavailable += java_files(jar)
unavailable += java_files(ctx.attr.binary)
unavailable = [x for x in unavailable if x not in available]
classpath = ":".join([
layer_file_path(ctx, x)
for x in available + unavailable
])
# Classpaths can grow long and there is a limit on the length of a
# command line, so mitigate this by always writing the classpath out
# to a file instead.
classpath_file = ctx.new_file(ctx.attr.name + ".classpath")
ctx.actions.write(classpath_file, classpath)
binary_path = layer_file_path(ctx, ctx.files.binary[0])
classpath_path = layer_file_path(ctx, classpath_file)
entrypoint = [
"/usr/bin/java",
"-cp",
# Support optionally passing the classpath as a file.
"@" + classpath_path if ctx.attr._classpath_as_file else classpath,
] + ctx.attr.jvm_flags + [ctx.attr.main_class] + ctx.attr.args
file_map = {
layer_file_path(ctx, f): f
for f in unavailable + [classpath_file]
}
return _container.image.implementation(
ctx,
# We use all absolute paths.
directory = "/",
file_map = file_map,
entrypoint = entrypoint,
)
jar_app_layer = rule(
attrs = dict(_container.image.attrs.items() + {
# The binary target for which we are synthesizing an image.
"binary": attr.label(mandatory = True),
# The full list of dependencies that have their own layers
# factored into our base.
"jar_layers": attr.label_list(),
# The rest of the dependencies.
"deps": attr.label_list(),
"runtime_deps": attr.label_list(),
"jvm_flags": attr.string_list(),
# The base image on which to overlay the dependency layers.
"base": attr.label(mandatory = True),
# The main class to invoke on startup.
"main_class": attr.string(mandatory = True),
# Whether to lay out each dependency in a manner that is agnostic
# of the binary in which it is participating. This can increase
# sharing of the dependency's layer across images, but requires a
# symlink forest in the app layers.
"agnostic_dep_layout": attr.bool(default = True),
# Whether the classpath should be passed as a file.
"_classpath_as_file": attr.bool(default = False),
# Override the defaults.
"directory": attr.string(default = "/app"),
# https://github.com/bazelbuild/bazel/issues/2176
"data_path": attr.string(default = "."),
"legacy_run_behavior": attr.bool(default = False),
}.items()),
executable = True,
outputs = _container.image.outputs,
implementation = _jar_app_layer_impl,
)
def java_image(
name,
base = None,
main_class = None,
deps = [],
runtime_deps = [],
layers = [],
jvm_flags = [],
**kwargs):
"""Builds a container image overlaying the java_binary.
Args:
layers: Augments "deps" with dependencies that should be put into
their own layers.
**kwargs: See java_binary.
"""
binary_name = name + ".binary"
native.java_binary(
name = binary_name,
main_class = main_class,
# If the rule is turning a JAR built with java_library into
# a binary, then it will appear in runtime_deps. We are
# not allowed to pass deps (even []) if there is no srcs
# kwarg.
deps = (deps + layers) or None,
runtime_deps = runtime_deps,
jvm_flags = jvm_flags,
**kwargs
)
base = base or DEFAULT_JAVA_BASE
for index, dep in enumerate(layers):
this_name = "%s.%d" % (name, index)
jar_dep_layer(name = this_name, base = base, dep = dep)
base = this_name
visibility = kwargs.get("visibility", None)
jar_app_layer(
name = name,
base = base,
binary = binary_name,
main_class = main_class,
jvm_flags = jvm_flags,
deps = deps,
runtime_deps = runtime_deps,
jar_layers = layers,
visibility = visibility,
args = kwargs.get("args"),
)
def _war_dep_layer_impl(ctx):
"""Appends a layer for a single dependency's runfiles."""
# TODO(mattmoor): Today we run the risk of filenames colliding when
# they get flattened. Instead of just flattening and using basename
# we should use a file_map based scheme.
return _container.image.implementation(
ctx,
files = java_files(ctx.attr.dep),
)
_war_dep_layer = rule(
attrs = dict(_container.image.attrs.items() + {
# The base image on which to overlay the dependency layers.
"base": attr.label(mandatory = True),
# The dependency whose runfiles we're appending.
"dep": attr.label(mandatory = True),
# Whether to lay out each dependency in a manner that is agnostic
# of the binary in which it is participating. This can increase
# sharing of the dependency's layer across images, but requires a
# symlink forest in the app layers.
"agnostic_dep_layout": attr.bool(default = True),
# Override the defaults.
"directory": attr.string(default = "/jetty/webapps/ROOT/WEB-INF/lib"),
# WE WANT PATHS FLATTENED
# "data_path": attr.string(default = "."),
}.items()),
executable = True,
outputs = _container.image.outputs,
implementation = _war_dep_layer_impl,
)
def _war_app_layer_impl(ctx):
"""Appends the app layer with all remaining runfiles."""
available = depset()
for jar in ctx.attr.jar_layers:
available += java_files(jar)
# This is based on rules_appengine's WAR rules.
transitive_deps = depset()
transitive_deps += java_files(ctx.attr.library)
# TODO(mattmoor): Handle data files.
# If we start putting libs in servlet-agnostic paths,
# then consider adding symlinks here.
files = [d for d in transitive_deps if d not in available]
return _container.image.implementation(ctx, files = files)
_war_app_layer = rule(
attrs = dict(_container.image.attrs.items() + {
# The library target for which we are synthesizing an image.
"library": attr.label(mandatory = True),
# The full list of dependencies that have their own layers
# factored into our base.
"jar_layers": attr.label_list(),
# The base image on which to overlay the dependency layers.
"base": attr.label(mandatory = True),
"entrypoint": attr.string_list(default = []),
# Whether to lay out each dependency in a manner that is agnostic
# of the binary in which it is participating. This can increase
# sharing of the dependency's layer across images, but requires a
# symlink forest in the app layers.
"agnostic_dep_layout": attr.bool(default = True),
# Override the defaults.
"directory": attr.string(default = "/jetty/webapps/ROOT/WEB-INF/lib"),
# WE WANT PATHS FLATTENED
# "data_path": attr.string(default = "."),
"legacy_run_behavior": attr.bool(default = False),
}.items()),
executable = True,
outputs = _container.image.outputs,
implementation = _war_app_layer_impl,
)
def war_image(name, base = None, deps = [], layers = [], **kwargs):
"""Builds a container image overlaying the java_library as an exploded WAR.
TODO(mattmoor): For `bazel run` of this to be useful, we need to be able
to ctrl-C it and have the container actually terminate. More information:
https://github.com/bazelbuild/bazel/issues/3519
Args:
layers: Augments "deps" with dependencies that should be put into
their own layers.
**kwargs: See java_library.
"""
library_name = name + ".library"
native.java_library(name = library_name, deps = deps + layers, **kwargs)
base = base or DEFAULT_JETTY_BASE
for index, dep in enumerate(layers):
this_name = "%s.%d" % (name, index)
_war_dep_layer(name = this_name, base = base, dep = dep)
base = this_name
visibility = kwargs.get("visibility", None)
tags = kwargs.get("tags", None)
_war_app_layer(
name = name,
base = base,
library = library_name,
jar_layers = layers,
visibility = visibility,
tags = tags,
)
| 1.789063 | 2 |
cupyx/jit/_builtin_funcs.py | khushi-411/cupy | 0 | 3776 | import warnings
import cupy
from cupy_backends.cuda.api import runtime
from cupy.cuda import device
from cupyx.jit import _cuda_types
from cupyx.jit._internal_types import BuiltinFunc
from cupyx.jit._internal_types import Data
from cupyx.jit._internal_types import Constant
from cupyx.jit._internal_types import Range
from cupyx.jit import _compile
from functools import reduce
class RangeFunc(BuiltinFunc):
def __call__(self, *args, unroll=None):
"""Range with loop unrolling support.
Args:
start (int):
Same as that of built-in :obj:`range`.
stop (int):
Same as that of built-in :obj:`range`.
step (int):
Same as that of built-in :obj:`range`.
unroll (int or bool or None):
- If `True`, add ``#pragma unroll`` directive before the
loop.
- If `False`, add ``#pragma unroll(1)`` directive before
the loop to disable unrolling.
- If an `int`, add ``#pragma unroll(n)`` directive before
the loop, where the integer ``n`` means the number of
iterations to unroll.
- If `None` (default), leave the control of loop unrolling
to the compiler (no ``#pragma``).
.. seealso:: `#pragma unroll`_
.. _#pragma unroll:
https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#pragma-unroll
"""
super().__call__()
def call(self, env, *args, unroll=None):
if len(args) == 0:
raise TypeError('range expected at least 1 argument, got 0')
elif len(args) == 1:
start, stop, step = Constant(0), args[0], Constant(1)
elif len(args) == 2:
start, stop, step = args[0], args[1], Constant(1)
elif len(args) == 3:
start, stop, step = args
else:
raise TypeError(
f'range expected at most 3 argument, got {len(args)}')
if unroll is not None:
if not all(isinstance(x, Constant)
for x in (start, stop, step, unroll)):
raise TypeError(
'loop unrolling requires constant start, stop, step and '
'unroll value')
unroll = unroll.obj
if not (isinstance(unroll, int) or isinstance(unroll, bool)):
raise TypeError(
'unroll value expected to be of type int, '
f'got {type(unroll).__name__}')
if unroll is False:
unroll = 1
if not (unroll is True or 0 < unroll < 1 << 31):
warnings.warn(
'loop unrolling is ignored as the unroll value is '
'non-positive or greater than INT_MAX')
if isinstance(step, Constant):
step_is_positive = step.obj >= 0
elif step.ctype.dtype.kind == 'u':
step_is_positive = True
else:
step_is_positive = None
stop = Data.init(stop, env)
start = Data.init(start, env)
step = Data.init(step, env)
if start.ctype.dtype.kind not in 'iu':
raise TypeError('range supports only for integer type.')
if stop.ctype.dtype.kind not in 'iu':
raise TypeError('range supports only for integer type.')
if step.ctype.dtype.kind not in 'iu':
raise TypeError('range supports only for integer type.')
if env.mode == 'numpy':
ctype = _cuda_types.Scalar(int)
elif env.mode == 'cuda':
ctype = stop.ctype
else:
assert False
return Range(start, stop, step, ctype, step_is_positive, unroll=unroll)
class LenFunc(BuiltinFunc):
def call(self, env, *args, **kwds):
if len(args) != 1:
raise TypeError(f'len() expects only 1 argument, got {len(args)}')
if kwds:
raise TypeError('keyword arguments are not supported')
arg = args[0]
if not isinstance(arg.ctype, _cuda_types.CArray):
raise TypeError('len() supports only array type')
if not arg.ctype.ndim:
raise TypeError('len() of unsized array')
return Data(f'static_cast<long long>({arg.code}.shape()[0])',
_cuda_types.Scalar('q'))
class MinFunc(BuiltinFunc):
def call(self, env, *args, **kwds):
if len(args) < 2:
raise TypeError(
f'min() expects at least 2 arguments, got {len(args)}')
if kwds:
raise TypeError('keyword arguments are not supported')
return reduce(lambda a, b: _compile._call_ufunc(
cupy.minimum, (a, b), None, env), args)
class MaxFunc(BuiltinFunc):
def call(self, env, *args, **kwds):
if len(args) < 2:
raise TypeError(
f'max() expects at least 2 arguments, got {len(args)}')
if kwds:
raise TypeError('keyword arguments are not supported')
return reduce(lambda a, b: _compile._call_ufunc(
cupy.maximum, (a, b), None, env), args)
class SyncThreads(BuiltinFunc):
def __call__(self):
"""Calls ``__syncthreads()``.
.. seealso:: `Synchronization functions`_
.. _Synchronization functions:
https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#synchronization-functions
"""
super().__call__()
def call_const(self, env):
return Data('__syncthreads()', _cuda_types.void)
class SyncWarp(BuiltinFunc):
def __call__(self, *, mask=0xffffffff):
"""Calls ``__syncwarp()``.
Args:
mask (int): Active threads in a warp. Default is 0xffffffff.
.. seealso:: `Synchronization functions`_
.. _Synchronization functions:
https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#synchronization-functions
"""
super().__call__()
def call(self, env, *, mask=None):
if runtime.is_hip:
if mask is not None:
warnings.warn(f'mask {mask} is ignored on HIP', RuntimeWarning)
mask = None
if mask:
if isinstance(mask, Constant):
if not (0x0 <= mask.obj <= 0xffffffff):
raise ValueError('mask is out of range')
mask = _compile._astype_scalar(
mask, _cuda_types.int32, 'same_kind', env)
mask = Data.init(mask, env)
code = f'__syncwarp({mask.code})'
else:
code = '__syncwarp()'
return Data(code, _cuda_types.void)
class SharedMemory(BuiltinFunc):
def __call__(self, dtype, size, alignment=None):
"""Allocates shared memory and returns it as a 1-D array.
Args:
dtype (dtype):
The dtype of the returned array.
size (int or None):
If ``int`` type, the size of static shared memory.
If ``None``, declares the shared memory with extern specifier.
alignment (int or None): Enforce the alignment via __align__(N).
"""
super().__call__()
def call_const(self, env, dtype, size, alignment=None):
name = env.get_fresh_variable_name(prefix='_smem')
child_type = _cuda_types.Scalar(dtype)
while env[name] is not None:
name = env.get_fresh_variable_name(prefix='_smem') # retry
var = Data(name, _cuda_types.SharedMem(child_type, size, alignment))
env.decls[name] = var
env.locals[name] = var
return Data(name, _cuda_types.Ptr(child_type))
class AtomicOp(BuiltinFunc):
def __init__(self, op, dtypes):
self._op = op
self._name = 'atomic' + op
self._dtypes = dtypes
doc = f"""Calls the ``{self._name}`` function to operate atomically on
``array[index]``. Please refer to `Atomic Functions`_ for detailed
explanation.
Args:
array: A :class:`cupy.ndarray` to index over.
index: A valid index such that the address to the corresponding
array element ``array[index]`` can be computed.
value: Represent the value to use for the specified operation. For
the case of :obj:`atomic_cas`, this is the value for
``array[index]`` to compare with.
alt_value: Only used in :obj:`atomic_cas` to represent the value
to swap to.
.. seealso:: `Numba's corresponding atomic functions`_
.. _Atomic Functions:
https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#atomic-functions
.. _Numba's corresponding atomic functions:
https://numba.readthedocs.io/en/stable/cuda-reference/kernel.html#synchronization-and-atomic-operations
"""
self.__doc__ = doc
def __call__(self, array, index, value, alt_value=None):
super().__call__()
def call(self, env, array, index, value, value2=None):
name = self._name
op = self._op
array = Data.init(array, env)
if not isinstance(array.ctype, (_cuda_types.CArray, _cuda_types.Ptr)):
raise TypeError('The first argument must be of array type.')
target = _compile._indexing(array, index, env)
ctype = target.ctype
if ctype.dtype.name not in self._dtypes:
raise TypeError(f'`{name}` does not support {ctype.dtype} input.')
# On HIP, 'e' is not supported and we will never reach here
if (op == 'Add' and ctype.dtype.char == 'e'
and runtime.runtimeGetVersion() < 10000):
raise RuntimeError(
'float16 atomic operation is not supported before CUDA 10.0.')
value = _compile._astype_scalar(value, ctype, 'same_kind', env)
value = Data.init(value, env)
if op == 'CAS':
assert value2 is not None
# On HIP, 'H' is not supported and we will never reach here
if ctype.dtype.char == 'H':
if runtime.runtimeGetVersion() < 10010:
raise RuntimeError(
'uint16 atomic operation is not supported before '
'CUDA 10.1')
if int(device.get_compute_capability()) < 70:
raise RuntimeError(
'uint16 atomic operation is not supported before '
'sm_70')
value2 = _compile._astype_scalar(value2, ctype, 'same_kind', env)
value2 = Data.init(value2, env)
code = f'{name}(&{target.code}, {value.code}, {value2.code})'
else:
assert value2 is None
code = f'{name}(&{target.code}, {value.code})'
return Data(code, ctype)
class GridFunc(BuiltinFunc):
def __init__(self, mode):
if mode == 'grid':
self._desc = 'Compute the thread index in the grid.'
self._eq = 'jit.threadIdx.x + jit.blockIdx.x * jit.blockDim.x'
self._link = 'numba.cuda.grid'
self._code = 'threadIdx.{n} + blockIdx.{n} * blockDim.{n}'
elif mode == 'gridsize':
self._desc = 'Compute the grid size.'
self._eq = 'jit.blockDim.x * jit.gridDim.x'
self._link = 'numba.cuda.gridsize'
self._code = 'blockDim.{n} * gridDim.{n}'
else:
raise ValueError('unsupported function')
doc = f""" {self._desc}
Computation of the first integer is as follows::
{self._eq}
and for the other two integers the ``y`` and ``z`` attributes are used.
Args:
ndim (int): The dimension of the grid. Only 1, 2, or 3 is allowed.
Returns:
int or tuple:
If ``ndim`` is 1, an integer is returned, otherwise a tuple.
.. note::
This function follows the convention of Numba's
:func:`{self._link}`.
"""
self.__doc__ = doc
def __call__(self, ndim):
super().__call__()
def call_const(self, env, ndim):
if not isinstance(ndim, int):
raise TypeError('ndim must be an integer')
# Numba convention: for 1D we return a single variable,
# otherwise a tuple
if ndim == 1:
return Data(self._code.format(n='x'), _cuda_types.uint32)
elif ndim == 2:
dims = ('x', 'y')
elif ndim == 3:
dims = ('x', 'y', 'z')
else:
raise ValueError('Only ndim=1,2,3 are supported')
elts_code = ', '.join(self._code.format(n=n) for n in dims)
ctype = _cuda_types.Tuple([_cuda_types.uint32]*ndim)
return Data(f'thrust::make_tuple({elts_code})', ctype)
class WarpShuffleOp(BuiltinFunc):
def __init__(self, op, dtypes):
self._op = op
self._name = '__shfl_' + (op + '_' if op else '') + 'sync'
self._dtypes = dtypes
doc = f"""Calls the ``{self._name}`` function. Please refer to
`Warp Shuffle Functions`_ for detailed explanation.
.. _Warp Shuffle Functions:
https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#warp-shuffle-functions
"""
self.__doc__ = doc
def __call__(self, mask, var, val_id, *, width=32):
super().__call__()
def call(self, env, mask, var, val_id, *, width=None):
name = self._name
var = Data.init(var, env)
ctype = var.ctype
if ctype.dtype.name not in self._dtypes:
raise TypeError(f'`{name}` does not support {ctype.dtype} input.')
try:
mask = mask.obj
except Exception:
raise TypeError('mask must be an integer')
if runtime.is_hip:
warnings.warn(f'mask {mask} is ignored on HIP', RuntimeWarning)
elif not (0x0 <= mask <= 0xffffffff):
raise ValueError('mask is out of range')
# val_id refers to "delta" for shfl_{up, down}, "srcLane" for shfl, and
# "laneMask" for shfl_xor
if self._op in ('up', 'down'):
val_id_t = _cuda_types.uint32
else:
val_id_t = _cuda_types.int32
val_id = _compile._astype_scalar(val_id, val_id_t, 'same_kind', env)
val_id = Data.init(val_id, env)
if width:
if isinstance(width, Constant):
if width.obj not in (2, 4, 8, 16, 32):
raise ValueError('width needs to be power of 2')
else:
width = Constant(64) if runtime.is_hip else Constant(32)
width = _compile._astype_scalar(
width, _cuda_types.int32, 'same_kind', env)
width = Data.init(width, env)
code = f'{name}({hex(mask)}, {var.code}, {val_id.code}'
code += f', {width.code})'
return Data(code, ctype)
class LaneID(BuiltinFunc):
def __call__(self):
"""Returns the lane ID of the calling thread, ranging in
``[0, jit.warpsize)``.
.. note::
Unlike :obj:`numba.cuda.laneid`, this is a callable function
instead of a property.
"""
super().__call__()
def _get_preamble(self):
preamble = '__device__ __forceinline__ unsigned int LaneId() {'
if not runtime.is_hip:
# see https://github.com/NVIDIA/cub/blob/main/cub/util_ptx.cuh#L419
preamble += """
unsigned int ret;
asm ("mov.u32 %0, %%laneid;" : "=r"(ret) );
return ret; }
"""
else:
# defined in hip/hcc_detail/device_functions.h
preamble += """
return __lane_id(); }
"""
return preamble
def call_const(self, env):
env.generated.add_code(self._get_preamble())
return Data('LaneId()', _cuda_types.uint32)
builtin_functions_dict = {
range: RangeFunc(),
len: LenFunc(),
min: MinFunc(),
max: MaxFunc(),
}
range_ = RangeFunc()
syncthreads = SyncThreads()
syncwarp = SyncWarp()
shared_memory = SharedMemory()
grid = GridFunc('grid')
gridsize = GridFunc('gridsize')
laneid = LaneID()
# atomic functions
atomic_add = AtomicOp(
'Add',
('int32', 'uint32', 'uint64', 'float32', 'float64')
+ (() if runtime.is_hip else ('float16',)))
atomic_sub = AtomicOp(
'Sub', ('int32', 'uint32'))
atomic_exch = AtomicOp(
'Exch', ('int32', 'uint32', 'uint64', 'float32'))
atomic_min = AtomicOp(
'Min', ('int32', 'uint32', 'uint64'))
atomic_max = AtomicOp(
'Max', ('int32', 'uint32', 'uint64'))
atomic_inc = AtomicOp(
'Inc', ('uint32',))
atomic_dec = AtomicOp(
'Dec', ('uint32',))
atomic_cas = AtomicOp(
'CAS',
('int32', 'uint32', 'uint64')
+ (() if runtime.is_hip else ('uint16',)))
atomic_and = AtomicOp(
'And', ('int32', 'uint32', 'uint64'))
atomic_or = AtomicOp(
'Or', ('int32', 'uint32', 'uint64'))
atomic_xor = AtomicOp(
'Xor', ('int32', 'uint32', 'uint64'))
# warp-shuffle functions
_shfl_dtypes = (
('int32', 'uint32', 'int64', 'float32', 'float64')
+ (() if runtime.is_hip else ('uint64', 'float16')))
shfl_sync = WarpShuffleOp('', _shfl_dtypes)
shfl_up_sync = WarpShuffleOp('up', _shfl_dtypes)
shfl_down_sync = WarpShuffleOp('down', _shfl_dtypes)
shfl_xor_sync = WarpShuffleOp('xor', _shfl_dtypes)
| 2.234375 | 2 |
python-basic-grammer/python-basic/02-python-variables-and-string/string_strip_demo.py | jinrunheng/base-of-python | 0 | 3777 | # 字符串删除空白
str1 = " hello "
print(str1)
print(len(str1))
# 去除两端的空格
print(str1.strip())
print(len(str1.strip()))
# 去除左侧的空格
print(str1.lstrip())
print(len(str1.lstrip()))
# 去除右侧的空格
print(str1.rstrip())
print(len(str1.rstrip())) | 3.984375 | 4 |
bruges/util/__init__.py | hyperiongeo/bruges | 0 | 3778 | <filename>bruges/util/__init__.py<gh_stars>0
# -*- coding: utf-8 -*-
from .util import rms
from .util import moving_average
from .util import moving_avg_conv
from .util import moving_avg_fft
from .util import normalize
from .util import next_pow2
from .util import top_and_tail
from .util import extrapolate
from .util import nearest
from .util import deprecated
from .util import apply_along_axis
from .util import sigmoid
| 1.375 | 1 |
toontown/estate/DistributedHouseDoor.py | CrankySupertoon01/Toontown-2 | 1 | 3779 | <gh_stars>1-10
from toontown.toonbase.ToonBaseGlobal import *
from panda3d.core import *
from direct.interval.IntervalGlobal import *
from direct.distributed.ClockDelta import *
from direct.distributed import DistributedObject
from toontown.toonbase import ToontownGlobals
from direct.directnotify import DirectNotifyGlobal
from direct.showbase.MessengerGlobal import messenger
from direct.fsm import ClassicFSM
from toontown.building import DistributedDoor
from toontown.hood import ZoneUtil
from toontown.suit import Suit
from toontown.building import FADoorCodes
from toontown.building import DoorTypes
from toontown.estate.DistributedHouse import DistributedHouse
class DistributedHouseDoor(DistributedDoor.DistributedDoor):
def __init__(self, cr):
DistributedDoor.DistributedDoor.__init__(self, cr)
def disable(self):
DistributedDoor.DistributedDoor.disable(self)
self.ignoreAll()
def setZoneIdAndBlock(self, zoneId, block):
self.houseId = block
DistributedDoor.DistributedDoor.setZoneIdAndBlock(self, zoneId, block)
def getTriggerName(self):
return 'door_trigger_' + str(self.houseId)
def hideDoorParts(self):
try:
self.findDoorNode('doorFrameHoleRight').hide()
self.findDoorNode('doorFrameHoleLeft').hide()
except:
pass
def announceGenerate(self):
DistributedObject.DistributedObject.announceGenerate(self)
if self.doorType == DoorTypes.EXT_STANDARD:
house = base.cr.doId2do.get(self.houseId)
if not isinstance(house, DistributedHouse):
self.notify.error('tried to use {0} as house'.format(house.__class__.__name__))
if house and house.house_loaded:
self.__gotRelatedHouse()
else:
self.acceptOnce('houseLoaded-%d' % self.houseId, self.__gotRelatedHouse)
elif self.doorType == DoorTypes.INT_STANDARD:
door = render.find('**/leftDoor;+s')
if door.isEmpty():
self.acceptOnce('houseInteriorLoaded-%d' % self.zoneId, self.__gotRelatedHouse)
else:
self.__gotRelatedHouse()
def __gotRelatedHouse(self):
self.doPostAnnounceGenerate()
self.bHasFlat = not self.findDoorNode('door*flat', True).isEmpty()
self.hideDoorParts()
building = self.getBuilding()
doorTrigger = building.find('**/door_trigger*')
doorTrigger.setName(self.getTriggerName())
self.accept(self.getEnterTriggerEvent(), self.doorTrigger)
self.acceptOnce('clearOutToonInterior', self.doorTrigger)
self.zoneDoneLoading = 0
def getBuilding(self, allowEmpty = False):
if 'building' not in self.__dict__:
if self.doorType == DoorTypes.INT_STANDARD:
door = render.find('**/leftDoor;+s')
self.building = door.getParent()
elif self.doorType == DoorTypes.EXT_STANDARD:
if self.houseId:
self.building = self.cr.playGame.hood.loader.houseId2house.get(self.houseId, None)
if allowEmpty:
return self.building
return self.building
def isInterior(self):
if self.doorType == DoorTypes.INT_STANDARD:
return 1
return 0
def getDoorNodePath(self):
if self.doorType == DoorTypes.INT_STANDARD:
otherNP = render.find('**/door_origin')
elif self.doorType == DoorTypes.EXT_STANDARD:
building = self.getBuilding()
otherNP = building.find('**/door')
if otherNP.isEmpty():
otherNP = building.find('**/door_origin')
else:
self.notify.error('No such door type as ' + str(self.doorType))
return otherNP
def enterClosing(self, ts):
doorFrameHoleRight = self.findDoorNode('doorFrameHoleRight')
if doorFrameHoleRight.isEmpty():
self.notify.warning('enterClosing(): did not find doorFrameHoleRight')
return
rightDoor = self.findDoorNode('rightDoor')
if rightDoor.isEmpty():
self.notify.warning('enterClosing(): did not find rightDoor')
return
otherNP = self.getDoorNodePath()
trackName = 'doorClose-%d' % self.doId
if self.rightSwing:
h = 100
else:
h = -100
self.finishDoorTrack()
self.doorTrack = Sequence(LerpHprInterval(nodePath=rightDoor, duration=1.0, hpr=VBase3(0, 0, 0), startHpr=VBase3(h, 0, 0), other=otherNP, blendType='easeInOut'), Func(doorFrameHoleRight.hide), Func(self.hideIfHasFlat, rightDoor), SoundInterval(self.closeSfx, node=rightDoor), name=trackName)
self.doorTrack.start(ts)
if hasattr(self, 'done'):
base.cr.playGame.hood.loader.setHouse(self.houseId)
zoneId = self.otherZoneId
if self.doorType == DoorTypes.EXT_STANDARD:
whereTo = 'house'
else:
whereTo = 'estate'
request = {'loader': 'safeZoneLoader',
'where': whereTo,
'how': 'doorIn',
'hoodId': ToontownGlobals.MyEstate,
'zoneId': zoneId,
'shardId': None,
'avId': -1,
'allowRedirect': 0,
'doorDoId': self.otherDoId}
messenger.send('doorDoneEvent', [request])
return
| 1.851563 | 2 |
Neuro-Cognitive Models/Runs/Nonhier_run/res_nonhier.py | AGhaderi/spatial_attenNCM | 0 | 3780 | #!/home/a.ghaderi/.conda/envs/envjm/bin/python
# Model 2
import pystan
import pandas as pd
import numpy as np
import sys
sys.path.append('../../')
import utils
parts = 1
data = utils.get_data() #loading dateset
data = data[data['participant']==parts]
mis = np.where((data['n200lat']<.101)|(data['n200lat']>.248))[0] # missing data for n200lat
obs = np.where((data['n200lat']>.101)&(data['n200lat']<.248))[0] # observation and missing data for n200lat
N_mis = mis.shape[0] # number of missing data
N_obs = obs.shape[0] # number of observed data
modelfile = '../../stans/res_nonhier.stan' #reading the model span
f = open(modelfile, 'r')
model_wiener = f.read()
sm = pystan.StanModel(model_code=model_wiener)# Compile the model stan
ncohers = 2 #Number of coherence conditions
nspats = 2 #Number of spatial conditions
nconds = 4 #Number of conditions
y = data['y'].to_numpy()
cond_coher = data['cond_coher'].to_numpy()
cond_spat = data['cond_spat'].to_numpy()
conds = data['conds'].to_numpy()
n200lat = data['n200lat'].to_numpy()
#set inistial data for molde span
data_winner = {'N_obs':N_obs, #Number of trial-level observations
'N_mis':N_mis, #Number of trial-level mising data
'ncohers':ncohers, #Number of coherence conditions
'nspats':nspats, #Number of spatial conditions
'nconds':nconds, #Number of conditions
'y':np.concatenate([y[obs],y[mis]]), #acc*rt in seconds for obervation and missing data
'cond_coher':np.concatenate([cond_coher[obs],cond_coher[mis]]), #Coherence index for each trial
'cond_spat':np.concatenate([cond_spat[obs],cond_spat[mis]]), #sptial index for each trial
'conds':np.concatenate([conds[obs],conds[mis]]), #sptial index for each trial
'n200lat_obs':n200lat[obs]}; #n200 latency for each trial observation
# setting MCMC arguments
niter = 10000
nwarmup = 4000
nchains = 1
thin = 1
initials = [] # initial sampling
for c in range(0, nchains):
chaininit = {
'delta': np.random.uniform(1, 3, size=ncohers),
'alpha': np.random.uniform(.5, 1.),
'eta': np.random.uniform(.01, .2),
'res': np.random.uniform(.01, .02, size=nspats),
'n200sub': np.random.uniform(.11, .2, size=nconds),
'lambda': np.random.uniform(.01, .02),
'n200lat_mis': np.random.uniform(.11, .2, size = N_mis)
}
initials.append(chaininit)
# Train the model and generate samples
fit = sm.sampling(data=data_winner, iter=niter, chains=nchains, warmup=nwarmup, thin=thin, init=initials)
utils.to_pickle(stan_model=sm, stan_fit=fit, save_path='../../save/nonhier/'+str(parts)+'_res_nonhier.pkl')
| 2.328125 | 2 |
rq_dashboard/dashboard.py | refgenomics/rq-dashboard | 0 | 3781 | <reponame>refgenomics/rq-dashboard<gh_stars>0
from redis import Redis
from redis import from_url
from rq import push_connection, pop_connection
from rq.job import Job
from functools import wraps
import times
from flask import Blueprint
from flask import current_app, url_for, abort
from flask import render_template
from rq import Queue, Worker
from rq import cancel_job, requeue_job
from rq import get_failed_queue
from math import ceil
dashboard = Blueprint('rq_dashboard', __name__,
template_folder='templates',
static_folder='static',
)
@dashboard.before_request
def authentication_hook():
""" Allow the parent app to authenticate user's access to the dashboard
with it's own auth_handler method that must return True or False
"""
auth_handler = current_app.extensions['rq-dashboard'].auth_handler
if auth_handler and not auth_handler():
abort(401)
@dashboard.before_app_first_request
def setup_rq_connection():
if current_app.config.get('REDIS_URL'):
current_app.redis_conn = from_url(current_app.config.get('REDIS_URL'))
else:
current_app.redis_conn = Redis(host=current_app.config.get('REDIS_HOST', 'localhost'),
port=current_app.config.get('REDIS_PORT', 6379),
password=current_app.config.get('REDIS_PASSWORD', None),
db=current_app.config.get('REDIS_DB', 0))
@dashboard.before_request
def push_rq_connection():
push_connection(current_app.redis_conn)
@dashboard.teardown_request
def pop_rq_connection(exception=None):
pop_connection()
def jsonify(f):
@wraps(f)
def _wrapped(*args, **kwargs):
from flask import jsonify as flask_jsonify
try:
result_dict = f(*args, **kwargs)
except Exception as e:
result_dict = dict(status='error')
if current_app.config['DEBUG']:
result_dict['reason'] = str(e)
from traceback import format_exc
result_dict['exc_info'] = format_exc()
return flask_jsonify(**result_dict)
return _wrapped
def serialize_queues(queues):
return [dict(name=q.name, count=q.count, url=url_for('.overview',
queue_name=q.name)) for q in queues]
def serialize_date(dt):
if dt is None:
return None
return times.format(dt, 'UTC')
def serialize_job(job):
return dict(
id=job.id,
created_at=serialize_date(job.created_at),
enqueued_at=serialize_date(job.enqueued_at),
ended_at=serialize_date(job.ended_at),
origin=job.origin,
result=job._result,
exc_info=job.exc_info,
description=job.description)
def remove_none_values(input_dict):
return dict([ (k,v) for k,v in input_dict.items() if v is not None ])
def pagination_window(total_items, cur_page, per_page=5, window_size=10):
all_pages = range(1, int(ceil(total_items / float(per_page))) + 1)
results = all_pages
if (window_size >= 1):
pages_window_start = int(max(0, min(len(all_pages) - window_size, (cur_page-1) - ceil(window_size / 2.0))))
pages_window_end = int(pages_window_start + window_size)
result = all_pages[pages_window_start:pages_window_end]
return result
@dashboard.route('/', defaults={'queue_name': None, 'page': '1'})
@dashboard.route('/<queue_name>', defaults={'page': '1'})
@dashboard.route('/<queue_name>/<page>')
def overview(queue_name, page):
if queue_name is None:
# Show the failed queue by default if it contains any jobs
failed = Queue('failed')
if not failed.is_empty():
queue = failed
else:
queue = Queue()
else:
queue = Queue(queue_name)
return render_template('rq_dashboard/dashboard.html',
workers=Worker.all(),
queue=queue,
page=page,
queues=Queue.all(),
rq_url_prefix=url_for('.overview'))
@dashboard.route('/job/<job_id>/cancel', methods=['POST'])
@jsonify
def cancel_job_view(job_id):
rq_job = Job.fetch(job_id)
if rq_job.status == "failed":
rq_job.delete()
else:
rq_job.cancel()
return dict(status='OK')
@dashboard.route('/job/<job_id>/requeue', methods=['POST'])
@jsonify
def requeue_job_view(job_id):
requeue_job(job_id)
return dict(status='OK')
@dashboard.route('/requeue-all', methods=['GET', 'POST'])
@jsonify
def requeue_all():
fq = get_failed_queue()
job_ids = fq.job_ids
count = len(job_ids)
for job_id in job_ids:
requeue_job(job_id)
return dict(status='OK', count=count)
@dashboard.route('/queue/<queue_name>/empty', methods=['POST'])
@jsonify
def empty_queue(queue_name):
q = Queue(queue_name)
q.empty()
return dict(status='OK')
@dashboard.route('/queue/<queue_name>/compact', methods=['POST'])
@jsonify
def compact_queue(queue_name):
q = Queue(queue_name)
q.compact()
return dict(status='OK')
@dashboard.route('/queues.json')
@jsonify
def list_queues():
queues = serialize_queues(sorted(Queue.all()))
return dict(queues=queues)
@dashboard.route('/jobs/<queue_name>/<page>.json')
@jsonify
def list_jobs(queue_name, page):
current_page = int(page)
queue = Queue(queue_name)
per_page = 5
total_items = queue.count
pages_numbers_in_window = pagination_window(total_items, current_page, per_page)
pages_in_window = [ dict(number=p, url=url_for('.overview',
queue_name=queue_name, page=p)) for p in pages_numbers_in_window ]
last_page = int(ceil(total_items / float(per_page)))
prev_page = None
if current_page > 1:
prev_page = dict(url=url_for('.overview', queue_name=queue_name, page=(current_page-1)))
next_page = None
if current_page < last_page:
next_page = dict(url=url_for('.overview', queue_name=queue_name, page=(current_page+1)))
pagination = remove_none_values(
dict(pages_in_window=pages_in_window,
next_page=next_page,
prev_page=prev_page))
offset = (current_page - 1) * per_page
jobs = [serialize_job(job) for job in queue.get_jobs(offset, per_page)]
return dict(name=queue.name, jobs=jobs, pagination=pagination)
@dashboard.route('/workers.json')
@jsonify
def list_workers():
def serialize_queue_names(worker):
return [q.name for q in worker.queues]
workers = [dict(name=worker.name, queues=serialize_queue_names(worker),
state=worker.get_state()) for worker in Worker.all()]
return dict(workers=workers)
@dashboard.context_processor
def inject_interval():
interval = current_app.config.get('RQ_POLL_INTERVAL', 2500)
return dict(poll_interval=interval)
| 2.21875 | 2 |
layers/layer1_python3/0300_acquisition/acquisition/__init__.py | moas/mfdata | 0 | 3782 | from acquisition.step import AcquisitionStep
from acquisition.stats import AcquisitionStatsDClient
from acquisition.move_step import AcquisitionMoveStep
from acquisition.delete_step import AcquisitionDeleteStep
from acquisition.batch_step import AcquisitionBatchStep
from acquisition.reinject_step import AcquisitionReinjectStep
from acquisition.fork_step import AcquisitionForkStep
from acquisition.archive_step import AcquisitionArchiveStep
from acquisition.listener import AcquisitionListener
__all__ = ['AcquisitionStep', 'AcquisitionBatchStep',
'AcquisitionMoveStep', 'AcquisitionDeleteStep',
'AcquisitionReinjectStep', 'AcquisitionForkStep',
'AcquisitionArchiveStep', 'AcquisitionStatsDClient',
'AcquisitionListener']
| 1.335938 | 1 |
frappe-bench/apps/erpnext/erpnext/non_profit/doctype/member/member.py | Semicheche/foa_frappe_docker | 0 | 3783 | # -*- coding: utf-8 -*-
# Copyright (c) 2017, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
from frappe.model.document import Document
from frappe.contacts.address_and_contact import load_address_and_contact
class Member(Document):
def onload(self):
"""Load address and contacts in `__onload`"""
load_address_and_contact(self)
def validate(self):
self.validate_email_type(self.email)
def validate_email_type(self, email):
from frappe.utils import validate_email_add
validate_email_add(email.strip(), True) | 2.09375 | 2 |
networks/networks.py | ayyuriss/TRHPO | 0 | 3784 | <reponame>ayyuriss/TRHPO
from torch import nn
import numpy as np
import base.basenetwork as BaseN
from networks.cholesky import CholeskyBlock
class FCNet(BaseN.BaseNetwork):
name ="FCNet"
def __init__(self,input_shape,output_shape,owner_name=""):
super(FCNet,self).__init__(input_shape,output_shape,owner_name)
x = input_shape
self.model = nn.Sequential(BaseN.Flatten(),
nn.Linear(np.prod(x), 1024),nn.Softplus(),
nn.Linear(1024,512),nn.Tanh(),
nn.Linear(512,256),
BaseN.EigenLayer(256,self.output_shape[0]))
self.compile()
class FCSpectralNet(BaseN.BaseNetwork):
name ="FCSpectralNet"
def __init__(self,input_shape,output_shape,owner_name=""):
super(FCSpectralNet,self).__init__(input_shape,output_shape,owner_name)
x = input_shape
self.model = nn.Sequential(BaseN.Flatten(),
nn.Linear(np.prod(x), 1024),BaseN.AdaptiveTanh(),
nn.Linear(1024,1024),BaseN.AdaptiveTanh(),
nn.Linear(1024,512),BaseN.AdaptiveTanh(),
BaseN.EigenLayer(512,self.output_shape[0]))
self.compile()
class FCSpectralMNet(BaseN.BaseNetwork):
name ="FCSpectralMNet"
def __init__(self,input_shape,output_shape,owner_name=""):
super(FCSpectralMNet,self).__init__(input_shape,output_shape,owner_name)
x = input_shape
self.model = nn.Sequential(BaseN.Flatten(),
nn.Linear(np.prod(x), 1024),nn.ReLU(),
nn.Linear(1024,1024),nn.ReLU(),
nn.Linear(1024,512),nn.ReLU(),
nn.Linear(512,self.output_shape[0]-1),nn.Tanh(),
BaseN.EigenLayer())
self.compile()
class FCNetQ(BaseN.BaseNetwork):
name ="FCNetQ"
def __init__(self,input_shape,output_shape,owner_name=""):
super(FCNetQ,self).__init__(input_shape,output_shape,owner_name)
x = int(np.prod(input_shape))
self.model = nn.Sequential(BaseN.Flatten(),
nn.Linear(x,x),nn.Tanh(),
nn.Linear(x,self.output_shape[0]))
self.compile()
class ConvNet(BaseN.BaseNetwork):
name="ConvNet"
def __init__(self,input_shape,output_shape,owner_name=""):
super(ConvNet,self).__init__(input_shape,output_shape,owner_name)
self.conv = [nn.Sequential(BaseN.conv3_2(input_shape[0], 8),nn.ReLU(),
BaseN.conv3_2(8, 16),nn.ReLU(),
BaseN.conv3_2(8, 8))]
x = BaseN.output_shape(self.conv[0],input_shape)
self.model = nn.Sequential(self.conv[0],
BaseN.Flatten(),
nn.Linear(np.prod(x), 512),BaseN.AdaptiveTanh(),
nn.Linear(512,256),
BaseN.EigenLayer(256,self.output_shape[0],bias=False))
self.compile()
class ConvNetBias(BaseN.BaseNetwork):
name="ConvNetBias"
def __init__(self,input_shape,output_shape,owner_name=""):
super(ConvNetBias,self).__init__(input_shape,output_shape,owner_name)
self.conv = [nn.Sequential(BaseN.conv3_2(input_shape[0], 8),nn.Softplus(),
BaseN.conv3_2(8, 12),BaseN.AdaptiveTanh(),
BaseN.conv3_2(12, 16),
BaseN.conv3_2(16, 20))]
x = BaseN.output_shape(self.conv[0],input_shape)
self.model = nn.Sequential(self.conv[0],
BaseN.Flatten(),
nn.Linear(np.prod(x), 512),BaseN.AdaptiveTanh(),
nn.Linear(512,256),
BaseN.EigenLayer(256,self.output_shape[0],bias=False))
self.compile()
class FCConvNet(BaseN.BaseNetwork):
name="FCConvNet"
def __init__(self,input_shape,output_shape,owner_name=""):
super(FCConvNet,self).__init__(input_shape,output_shape,owner_name)
self.conv = [nn.Sequential(BaseN.conv3_2(input_shape[0], 4),nn.Softplus(),
BaseN.conv3_2(4, 8),BaseN.AdaptiveTanh())]
x = BaseN.output_shape(self.conv[0],input_shape)
self.model = nn.Sequential(self.conv[0],
BaseN.Flatten(),
nn.Linear(np.prod(x), 512),
nn.Linear(512,512),BaseN.AdaptiveTanh(),
nn.Linear(512,256),
BaseN.EigenLayer(256,self.output_shape[0],bias=False))
self.compile()
class FCConvNetBias(BaseN.BaseNetwork):
name="FCConvNetBias"
def __init__(self,input_shape,output_shape,owner_name=""):
super(FCConvNetBias,self).__init__(input_shape,output_shape,owner_name)
self.conv = [nn.Sequential(BaseN.conv3_2(input_shape[0], 4),nn.ReLU(),
BaseN.conv3_2(4, 4),BaseN.AdaptiveTanh())]
x = BaseN.output_shape(self.conv[0],input_shape)
self.model = nn.Sequential(self.conv[0],
BaseN.Flatten(),
nn.Linear(np.prod(x), 512),
nn.Linear(512,1024),BaseN.AdaptiveTanh(),
nn.Linear(1024,256),
BaseN.EigenLayer(256,self.output_shape[0],bias=False))
self.compile()
class ConvNet2(BaseN.BaseNetwork):
name="ConvNet2"
def __init__(self,input_shape,output_shape,owner_name=""):
super(ConvNet2,self).__init__(input_shape,output_shape,owner_name)
self.conv = [nn.Sequential(BaseN.conv3_2(input_shape[0], 3),nn.Softplus(),
BaseN.conv3_2(3, 6),BaseN.conv3_2(6, 12))]
x = BaseN.output_shape(self.conv[0],input_shape)
self.model = nn.Sequential(self.conv[0],
BaseN.Flatten(),
nn.Linear(np.prod(x), 512),
nn.Linear(512,256),nn.Tanh(),
nn.Linear(256,512),
nn.Linear(512,1024),nn.Tanh(),
nn.Linear(1024,512),
nn.Linear(512,256),nn.Tanh(),
nn.Linear(256,256),
BaseN.EigenLayer(256,self.output_shape[0]))
self.compile()
class ConvNetBig(BaseN.BaseNetwork):
name="ConvNetBig"
def __init__(self,input_shape,output_shape,owner_name=""):
super(ConvNetBig,self).__init__(input_shape,output_shape,owner_name)
self.conv = [nn.Sequential(BaseN.conv3_2(input_shape[0], 8),nn.Softplus(),
BaseN.conv3_2(8, 16),nn.Softplus(),
BaseN.conv3_2(16, 32))]
x = BaseN.output_shape(self.conv[0],input_shape)
self.model = nn.Sequential(self.conv[0],
BaseN.Flatten(),
nn.Linear(np.prod(x), 512),
nn.Linear(512,256),nn.Tanh(),
nn.Linear(256,512),
BaseN.EigenLayer(512,self.output_shape[0]))
self.compile()
class ConvNetBigBias(BaseN.BaseNetwork):
name="ConvNetBigBias"
def __init__(self,input_shape,output_shape,owner_name=""):
super(ConvNetBigBias,self).__init__(input_shape,output_shape,owner_name)
self.conv = [nn.Sequential(BaseN.conv3_2(input_shape[0], 4),nn.Softplus(),
BaseN.conv3_2(4, 4),BaseN.AdaptiveTanh())]
x = BaseN.output_shape(self.conv[0],input_shape)
self.model = nn.Sequential(self.conv[0],
BaseN.Flatten(),
nn.Linear(np.prod(x), 512),
nn.Linear(512,256),nn.Tanh(),
nn.Linear(256,512),
BaseN.EigenLayer(512,self.output_shape[0],bias=False))
self.compile()
class ConvNetBigAtari(BaseN.BaseNetwork):
name="ConvNetBigAtari"
def __init__(self,input_shape,output_shape,owner_name=""):
super(ConvNetBigAtari,self).__init__(input_shape,output_shape,owner_name)
self.conv = [nn.Sequential(BaseN.conv3_2(input_shape[0], 8),nn.Softplus(),
BaseN.conv3_2(8, 16),
BaseN.conv3_2(16, 32))]
x = BaseN.output_shape(self.conv[0],input_shape)
self.model = nn.Sequential(self.conv[0],
BaseN.Flatten(),
nn.Linear(np.prod(x), 512),
nn.Linear(512,512),nn.Tanh(),
nn.Linear(512,1024),
BaseN.EigenLayer(1024,self.output_shape[0]))
self.compile()
class ConvNetBigS(BaseN.BaseNetwork):
name="ConvNetBigS"
def __init__(self,input_shape,output_shape,owner_name=""):
super(ConvNetBigS,self).__init__(input_shape,output_shape,owner_name)
self.conv = [nn.Sequential(BaseN.conv3_2(input_shape[0], 8),nn.Softplus(),
BaseN.conv3_2(8, 16),
BaseN.conv3_2(16, 32))]
x = BaseN.output_shape(self.conv[0],input_shape)
self.model = nn.Sequential(self.conv[0],
BaseN.Flatten(),
nn.Linear(np.prod(x), 512),
nn.Linear(512,256),nn.Tanh(),
nn.Linear(256,512),
nn.Linear(512,self.output_shape[0]))
self.compile()
class ConvNetMNIST(BaseN.BaseNetwork):
name = "ConvNetMNIST"
def __init__(self,input_shape,output_shape,**kwargs):
super(ConvNetMNIST,self).__init__(**kwargs)
self.n = output_shape
self.conv = [BaseN.ResNetBlock(1,32),
BaseN.conv3_2(32,64)]
x = BaseN.output_shape(self.conv[0],input_shape)
self.model = nn.Sequential(self.conv[0], nn.Softplus(),
BaseN.Flatten(),
nn.Linear(np.prod(x), 512),
nn.Linear(512,256),nn.Tanh(),
BaseN.EigenLayer(256,self.output_shape[0]))
self.compile()
class ConvNetSimple(BaseN.BaseNetwork):
name="ConvNetSimple"
def __init__(self,input_shape,output_shape,owner_name=""):
super(ConvNetSimple,self).__init__(input_shape,output_shape,owner_name)
self.conv = [nn.Sequential(BaseN.conv3_2(input_shape[0], 4),nn.Softplus())]
x = BaseN.output_shape(self.conv[0],input_shape)
self.model = nn.Sequential(self.conv[0],
BaseN.Flatten(),
nn.Linear(np.prod(x), 512),
nn.Linear(512,256),nn.Tanh(),
nn.Linear(256,self.output_shape[0]))
self.compile()
class FCNetSimple(BaseN.BaseNetwork):
name ="FCNetSimple"
def __init__(self,input_shape,output_shape,owner_name=""):
super(FCNetSimple,self).__init__(input_shape,output_shape,owner_name)
x = input_shape
self.model = nn.Sequential(BaseN.Flatten(),
nn.Linear(np.prod(x), 1024),nn.Softplus(),
nn.Linear(1024,512),
nn.Linear(512,256),nn.Tanh(),
nn.Linear(256,self.output_shape[0]))
self.compile()
| 2.34375 | 2 |
icenews/api_important_words.py | sverrirab/icenews | 4 | 3785 | import logging
from pydantic import BaseModel, Field
from typing import List
from .similar import important_words
from .server import app
_MAX_LENGTH = 2000
logger = logging.getLogger(__name__)
class ImportantWordsResponse(BaseModel):
important_words: List[str] = Field(..., description="List of lemmas")
class ImportantWordsRequest(BaseModel):
input_string: str = Field(
...,
description="Icelandic text for analysis.",
min_length=1,
max_length=_MAX_LENGTH,
)
# Strange things happen with error handling when using alias - splitting up into two input models
class ParseInputDeprecated(BaseModel):
input_string: str = Field(
...,
description="Icelandic text for analysis.",
min_length=1,
max_length=_MAX_LENGTH,
alias="in",
)
@app.post(
"/v1/important_words",
description="Find lemmas of important words",
response_model=ImportantWordsResponse,
)
def v1_important_words(*, data: ImportantWordsRequest):
return ImportantWordsResponse(important_words=important_words(data.input_string))
@app.post(
"/v1/parse",
description="Find lemmas of important words",
response_model=ImportantWordsResponse,
deprecated=True,
)
def v1_parse(*, data: ParseInputDeprecated):
logger.info(f"parse: {repr(data.input_string)}")
return ImportantWordsResponse(important_words=important_words(data.input_string))
| 2.421875 | 2 |
try-except.py | kmarcini/Learn-Python---Full-Course-for-Beginners-Tutorial- | 0 | 3786 | <reponame>kmarcini/Learn-Python---Full-Course-for-Beginners-Tutorial-<filename>try-except.py
try:
# num = 10 / 0
number = int(input("Enter a number: "))
print(number)
# catch specific errors
except ZeroDivisionError as err:
print(err)
except ValueError:
print("Invalid input")
| 4.0625 | 4 |
peaksampl.py | Gattocrucco/sipmfilter | 0 | 3787 | <reponame>Gattocrucco/sipmfilter
import numpy as np
def _adddims(a, b):
n = max(a.ndim, b.ndim)
a = np.expand_dims(a, tuple(range(n - a.ndim)))
b = np.expand_dims(b, tuple(range(n - b.ndim)))
return a, b
def _yz(y, z, t, yout):
"""
Shared implementation of peaksampl and sumpeaks.
"""
y = np.asarray(y)
z = np.asarray(z)
t = np.asarray(t)
y = np.pad(y, [(0, 0)] * (y.ndim - 1) + [(1, 1)], constant_values=yout)
offset = np.argmax(np.abs(y), axis=-1)
ampl = np.take_along_axis(y, np.expand_dims(offset, -1), -1)
ampl = np.squeeze(ampl, -1)
indices = t[..., :, None] - t[..., None, :] + offset[..., None, None]
indices = np.minimum(indices, y.shape[-1] - 1)
indices = np.maximum(indices, 0)
N = t.shape[-1]
indices = indices.reshape(indices.shape[:-2] + (N * N,))
n = max(y.ndim, indices.ndim)
y, indices = _adddims(y, indices)
y = np.take_along_axis(y, indices, -1)
eps = np.finfo(float).eps * N * N * ampl
y[..., ::N + 1] += np.expand_dims(eps, -1)
y = y.reshape(y.shape[:-1] + (N, N))
z = z[..., None]
y, z = _adddims(y, z)
return y, z
def peaksampl(y, z, t, yout=0):
"""
Get peak amplitudes given their sum.
This assumes that the position of the signals is given by peaks positions
even when they are summed.
Parameters
----------
y : array (..., M,)
The single signal shape.
z : array (..., N,)
The peak height in the sum of the signals for each peak.
t : int array (..., N,)
The indices of the peaks in the sum.
yout : scalar
The value of the signal outside the provided values, default 0.
Return
------
a : array (..., N),
The amplitudes such that z_i = sum_j a_j * y[t_i - t_j].
Broadcasted along non-last axis.
"""
y, z = _yz(y, z, t, yout)
a = np.linalg.solve(y, z)
return np.squeeze(a, -1)
def sumpeaks(y, a, t, yout=0):
"""
Compute the peak heights of a sum of signals.
This assumes that the position of the peaks is given by the signal
positions even when they are summed.
Parameters
----------
y : array (..., M,)
The single signal shape.
a : array (..., N,)
The amplitudes of the signals (`y` is multiplied by `a`).
t : int array (..., N,)
The indices of the position of the signals.
yout : scalar
The value of the signal outside the provided values, default 0.
Return
------
z : array (..., N,)
The peak height in the sum of the signals for each signal. Broadcasted
along non-last axis.
"""
y, a = _yz(y, a, t, yout)
z = np.matmul(y, a)
return np.squeeze(z, axis=-1)
if __name__ == '__main__':
from matplotlib import pyplot as plt
from scipy import signal
y = np.exp(-np.linspace(0, 10, 1000) / 10)
i = np.arange(1, 1000)
t0 = np.array([10, 340, 523])
a0 = np.array([3, 2, 1])
indices = i - t0[:, None]
z = np.take(y, indices, mode='clip') * a0[:, None]
z = np.where((indices < 0) | (indices >= len(y)), 0, z)
z = np.sum(z, axis=0)
t, = signal.argrelmax(z)
assert len(t) == len(t0)
a = peaksampl(y, z[t], t)
h = sumpeaks(y, a, t)
fig, ax = plt.subplots(num='peaksampl', clear=True)
ax.plot(z, color='#f55')
ax.vlines(t0, 0, a0, color='gray', zorder=3)
ax.vlines(t, 0, a, linestyle='--', zorder=3)
ax.plot(t, h, 'ok')
ax.grid('major', linestyle='--')
fig.tight_layout()
fig.show()
| 2.234375 | 2 |
arachne/hdl/xilinx/ps8/resources/pmu.py | shrine-maiden-heavy-industries/arachne | 3 | 3788 | <reponame>shrine-maiden-heavy-industries/arachne<filename>arachne/hdl/xilinx/ps8/resources/pmu.py<gh_stars>1-10
# SPDX-License-Identifier: BSD-3-Clause
from amaranth import *
from amaranth.build import *
from .common import PS8Resource, MIOSet
__all__ = (
'PMUResource',
)
class PMUResource(PS8Resource):
name = 'pmu'
claimable_mio = [ ]
def __init__(self):
super().__init__(0, 0, None, False)
def used_mio(self, **kwargs):
raise NotImplementedError # :nocov:
def generate_mapping(self, **kwargs):
raise NotImplementedError # :nocov:
| 1.835938 | 2 |
backend/Washlist/tests.py | henrikhorluck/tdt4140-washlists | 0 | 3789 | <filename>backend/Washlist/tests.py
from django.test import TestCase
from django.urls import reverse
from rest_framework import status
from Dormroom.models import Dormroom
from SIFUser.mixins import AuthTestMixin
from StudentVillage.models import StudentVillage
from Washlist.jobs import reset_washlists
from Washlist.models.Templates import TemplateListItem, TemplateWashList
from Washlist.models.WashLists import ListItem
from Washlist.serializer import TemplateWashListSerializer
class WashListTemplateTest(TestCase):
room = None
def setUp(self):
village = StudentVillage.objects.create(name="Moholt")
self.room = Dormroom.objects.create(number=1, village=village)
temp_list = TemplateWashList.objects.create(title="Moholt")
village.templateWashList = temp_list
village.save()
def test_add_to_template_adds_to_each_list(self):
desc = "Vask badet"
temp_list = TemplateWashList.objects.get(title="Moholt")
TemplateListItem.objects.create(description=desc, washlist=temp_list).save()
self.assertEqual(desc, ListItem.objects.get(dormroom=self.room).description)
class WeeklyResetOfWashlistsTest(TestCase):
def setUp(self):
"""
Create a Washlist item that is completed
the method also sets up a village and a room to relate the Washlist item to
satisfy the db constraints
"""
village = StudentVillage.objects.create(name="Moholt")
self.room = Dormroom.objects.create(number=1, village=village)
temp_list = TemplateWashList.objects.create(title="Moholt")
village.templateWashList = temp_list
village.save()
self.item = ListItem.objects.create(
pk=1, dormroom=self.room, desc="Vask badet", completed=True
)
self.item.save()
def test_job_resets_items(self):
"""
Test that job to reset Washlist items when run manually actually rests the databases
Washlist items
"""
reset_washlists()
self.assertEqual(False, ListItem.objects.get(pk=1).completed)
class WashlistTemplateAPITest(AuthTestMixin):
def setUp(self):
super().setUp()
self.temp_list = TemplateWashList.objects.create(title="Moholt")
village = StudentVillage.objects.create(
name="Moholt", templateWashList=self.temp_list
)
self.room = Dormroom.objects.create(number=1, village=village)
self.item = ListItem.objects.create(
pk=1, dormroom=self.room, desc="Vask badet", completed=True
)
def test_get_template_list(self):
url = reverse("templatewashlist-list")
response = self.client.get(url, HTTP_AUTHORIZATION=self.auth)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(
response.data[0],
TemplateWashListSerializer(
TemplateWashList.objects.get(title="Moholt")
).data,
)
def test_get_detail_template_list(self):
url = reverse("templatewashlist-detail", args=[1])
response = self.client.get(url, HTTP_AUTHORIZATION=self.auth)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(
response.data,
TemplateWashListSerializer(
TemplateWashList.objects.get(title="Moholt")
).data,
)
def test_add_template_washlist(self):
url = reverse("templatewashlist-list")
response = self.client.post(
url, {"title": "Tyholt", "village": 1}, HTTP_AUTHORIZATION=self.auth
)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(
response.data,
TemplateWashListSerializer(
TemplateWashList.objects.get(title="Tyholt")
).data,
)
def test_partial_update(self):
url = reverse("templatewashlist-detail", args=[1])
response = self.client.patch(
url, {"title": "Berg"}, HTTP_AUTHORIZATION=self.auth
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(
response.data,
TemplateWashListSerializer(TemplateWashList.objects.get(title="Berg")).data,
)
def test_destroy(self):
url = reverse("templatewashlist-detail", args=[1])
response = self.client.delete(url, HTTP_AUTHORIZATION=self.auth)
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
self.assertEqual(TemplateWashList.objects.count(), 0)
| 2.515625 | 3 |
torchvision/prototype/models/mobilenetv3.py | piyush01123/vision | 0 | 3790 | from functools import partial
from typing import Any, Optional, List
from torchvision.prototype.transforms import ImageNetEval
from torchvision.transforms.functional import InterpolationMode
from ...models.mobilenetv3 import MobileNetV3, _mobilenet_v3_conf, InvertedResidualConfig
from ._api import WeightsEnum, Weights
from ._meta import _IMAGENET_CATEGORIES
from ._utils import handle_legacy_interface, _ovewrite_named_param
__all__ = [
"MobileNetV3",
"MobileNet_V3_Large_Weights",
"MobileNet_V3_Small_Weights",
"mobilenet_v3_large",
"mobilenet_v3_small",
]
def _mobilenet_v3(
inverted_residual_setting: List[InvertedResidualConfig],
last_channel: int,
weights: Optional[WeightsEnum],
progress: bool,
**kwargs: Any,
) -> MobileNetV3:
if weights is not None:
_ovewrite_named_param(kwargs, "num_classes", len(weights.meta["categories"]))
model = MobileNetV3(inverted_residual_setting, last_channel, **kwargs)
if weights is not None:
model.load_state_dict(weights.get_state_dict(progress=progress))
return model
_COMMON_META = {
"task": "image_classification",
"architecture": "MobileNetV3",
"publication_year": 2019,
"size": (224, 224),
"min_size": (1, 1),
"categories": _IMAGENET_CATEGORIES,
"interpolation": InterpolationMode.BILINEAR,
}
class MobileNet_V3_Large_Weights(WeightsEnum):
ImageNet1K_V1 = Weights(
url="https://download.pytorch.org/models/mobilenet_v3_large-8738ca79.pth",
transforms=partial(ImageNetEval, crop_size=224),
meta={
**_COMMON_META,
"num_params": 5483032,
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#mobilenetv3-large--small",
"acc@1": 74.042,
"acc@5": 91.340,
},
)
ImageNet1K_V2 = Weights(
url="https://download.pytorch.org/models/mobilenet_v3_large-5c1a4163.pth",
transforms=partial(ImageNetEval, crop_size=224, resize_size=232),
meta={
**_COMMON_META,
"num_params": 5483032,
"recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe-with-reg-tuning",
"acc@1": 75.274,
"acc@5": 92.566,
},
)
default = ImageNet1K_V2
class MobileNet_V3_Small_Weights(WeightsEnum):
ImageNet1K_V1 = Weights(
url="https://download.pytorch.org/models/mobilenet_v3_small-047dcff4.pth",
transforms=partial(ImageNetEval, crop_size=224),
meta={
**_COMMON_META,
"num_params": 2542856,
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#mobilenetv3-large--small",
"acc@1": 67.668,
"acc@5": 87.402,
},
)
default = ImageNet1K_V1
@handle_legacy_interface(weights=("pretrained", MobileNet_V3_Large_Weights.ImageNet1K_V1))
def mobilenet_v3_large(
*, weights: Optional[MobileNet_V3_Large_Weights] = None, progress: bool = True, **kwargs: Any
) -> MobileNetV3:
weights = MobileNet_V3_Large_Weights.verify(weights)
inverted_residual_setting, last_channel = _mobilenet_v3_conf("mobilenet_v3_large", **kwargs)
return _mobilenet_v3(inverted_residual_setting, last_channel, weights, progress, **kwargs)
@handle_legacy_interface(weights=("pretrained", MobileNet_V3_Small_Weights.ImageNet1K_V1))
def mobilenet_v3_small(
*, weights: Optional[MobileNet_V3_Small_Weights] = None, progress: bool = True, **kwargs: Any
) -> MobileNetV3:
weights = MobileNet_V3_Small_Weights.verify(weights)
inverted_residual_setting, last_channel = _mobilenet_v3_conf("mobilenet_v3_small", **kwargs)
return _mobilenet_v3(inverted_residual_setting, last_channel, weights, progress, **kwargs)
| 2.125 | 2 |
rest_auth/registration/urls.py | soul4code/django-rest-auth | 0 | 3791 | from django.urls import re_path
from django.views.generic import TemplateView
from .views import RegisterView, VerifyEmailView
urlpatterns = [
re_path(r'^$', RegisterView.as_view(), name='rest_register'),
re_path(r'^verify-email/$', VerifyEmailView.as_view(), name='rest_verify_email'),
# This url is used by django-allauth and empty TemplateView is
# defined just to allow reverse() call inside app, for example when email
# with verification link is being sent, then it's required to render email
# content.
# account_confirm_email - You should override this view to handle it in
# your API client somehow and then, send post to /verify-email/ endpoint
# with proper key.
# If you don't want to use API on that step, then just use ConfirmEmailView
# view from:
# django-allauth https://github.com/pennersr/django-allauth/blob/master/allauth/account/views.py
re_path(r'^account-confirm-email/(?P<key>[-:\w]+)/$', TemplateView.as_view(),
name='account_confirm_email'),
]
| 1.851563 | 2 |
crawler/tests.py | mental689/paddict | 1 | 3792 | from django.test import TestCase
# Create your tests here.
from crawler.download import *
from crawler.models import *
class AnimalDownloadTestCase(TestCase):
def setUp(self):
self.stopWords = ["CVPR 2019", "Computer Vision Foundation."]
self.url = "/Users/tuannguyenanh/Desktop/cvpr2019.html"#"http://openaccess.thecvf.com/CVPR2019.py"
self.root = "http://openaccess.thecvf.com/"
self.event = Event.objects.filter(shortname='CVPR2019').first()
if self.event is None:
self.event = Event(shortname='CVPR2019')
self.event.save()
def test_animal_can_download(self):
#print(get_html(self.url))
f = open(self.url)
soup = parse_html(f.read())
f.close()
f = open('cvpr2019.bib', 'w')
print(soup.title)
bibtexs = soup.find_all("div", attrs={"class": "bibref"})
#print(bibtexs)
for bib in bibtexs:
print(bib.text)
f.write(bib.text.replace('<br>', '\n'))
f.close()
| 2.4375 | 2 |
test_scripts/xml_example.py | petervdb/testrep1 | 1 | 3793 | <gh_stars>1-10
#!/usr/bin/python3
from urllib.request import urlopen
from xml.etree.ElementTree import parse
# Download the RSS feed and parse it
u = urlopen('http://planet.python.org/rss20.xml')
doc = parse(u)
# Extract and output tags of interest
for item in doc.iterfind('channel/item'):
title = item.findtext('title')
date = item.findtext('pubDate')
link = item.findtext('link')
print(title)
print(date)
print(link)
print()
print("Program executed.")
| 3.171875 | 3 |
contacts/urls.py | anthowen/duplify | 1 | 3794 | """dedupper_app URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from contacts import views
admin.autodiscover()
urlpatterns = [
path('', views.index, name='contact_index'),
path('', views.index, name='lead_index'),
path('contacts/', views.contacts, name='contacts'),
path('leads/', views.leads, name='leads'),
path('table/', views.table, name='table'),
path('plotly/', views.plotly, name='plotly'),
# url(r'^keys', views.upload, name='keys'),
# path('key-gen/', views.key_gen, name='key-gen'),
# path('heroku/', generic.ListView.as_view(model=models.Contact), name='heroku'),
# path('run/', views.run, name='run'),
# path('sorted/<id>', views.merge, name='merge'),
# path('sorted/export/<type>', views.download, name='export'),
# path('sorted/report/<type>', views.download_times, name='report'),
]
| 2.59375 | 3 |
pydm/PyQt/uic.py | klauer/pydm | 0 | 3795 | <gh_stars>0
from . import qtlib
QT_LIB = qtlib.QT_LIB
if QT_LIB == 'PyQt5':
from PyQt5.uic import *
| 1.242188 | 1 |
CPB100/lab2b/scheduled/ingestapp.py | pranaynanda/training-data-analyst | 0 | 3796 | #!/usr/bin/env python
# Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# [START app]
import os
import logging
import transform
import flask
import google.cloud.storage as gcs
# [start config]
app = flask.Flask(__name__)
# Configure this environment variable via app.yaml
CLOUD_STORAGE_BUCKET = os.environ['CLOUD_STORAGE_BUCKET']
#
logging.basicConfig(format='%(levelname)s: %(message)s', level=logging.INFO)
# [end config]
@app.route('/')
def welcome():
return '<html><a href="ingest">ingest last week</a> earthquake data</html>'
@app.route('/ingest')
def ingest_last_week():
try:
# verify that this is a cron job request
is_cron = flask.request.headers['X-Appengine-Cron']
logging.info('Received cron request {}'.format(is_cron))
# create png
url = 'http://earthquake.usgs.gov/earthquakes/feed/v1.0/summary/all_week.csv'
outfile = 'earthquakes.png'
status = 'scheduled ingest of {} to {}'.format(url, outfile)
logging.info(status)
transform.create_png(url, outfile)
# upload to cloud storage
client = gcs.Client()
bucket = client.get_bucket(CLOUD_STORAGE_BUCKET)
blob = gcs.Blob('earthquakes/earthquakes.png', bucket)
blob.upload_from_filename(outfile)
# change permissions
blob.make_public()
status = 'uploaded {} to {}'.format(outfile, blob.name)
logging.info(status)
except KeyError as e:
status = '<html>Sorry, this capability is accessible only by the Cron service, but I got a KeyError for {} -- try invoking it from <a href="{}"> the GCP console / AppEngine / taskqueues </a></html>'.format(
e, 'http://console.cloud.google.com/appengine/taskqueues?tab=CRON')
logging.info('Rejected non-Cron request')
return status
@app.errorhandler(500)
def server_error(e):
logging.exception('An error occurred during a request.')
return """
An internal error occurred: <pre>{}</pre>
See logs for full stacktrace.
""".format(e), 500
if __name__ == '__main__':
app.run(host='0.0.0.0', port=8080, debug=True)
# [END app]
| 2.390625 | 2 |
index.py | StarSky1/microsoft-python-study | 0 | 3797 | name=input('input your name:');
print('hello');
print(name.capitalize()); | 3.640625 | 4 |
credentials.py | Machel54/-pass-locker- | 0 | 3798 | import pyperclip
import random
import string
class Credential:
'''
class that generates new credentials
'''
credential_list = []
def __init__(self,username,sitename,password):
self.username = username
self.password = password
self.sitename = sitename
def save_credential(self):
'''
save_cred method saves the user objects into creds_list
'''
Credential.credential_list.append(self)
@classmethod
def display_credential(cls, user_name):
'''
Class method to show the list of credentials saved
'''
users_credential_list = []
for credential in cls.credential_list:
if credential.username == user_name:
users_credential_list.append(credential)
return users_credential_list
def delete_credential(self):
'''
delete_contact method deletes a saved credential from the credential_list
'''
Credential.credential_list.remove(self)
def generate_password(self):
'''
Function to generate a password where a user can generate a password based on their length of choice
'''
chars = "abcdefghijklmnopqrstuvwxyziABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890^?!?$%&/()=?`'+#*'~';:_,.-<>|"
password = ""
print("Use Char list = %s \n" % chars)
length = int(input("[*] Input Password Length: "))
while len(password) != length:
password = password + random.choice(chars)
if len(password) == length:
print("Password: %s" % password)
return password
@classmethod
def find_by_sitename(cls, sitename):
'''
Class method that takes a site name and returns the credential that matches that site
'''
for credential in cls.credential_list:
if credential.sitename == sitename:
return credential
@classmethod
def copy_credential(cls, sitename):
'''
Class method that copies a credentials details after the credentials sitename has been entered
'''
find_credential = Credential.find_by_sitename(sitename)
return pyperclip.copy(find_credential.password)
@classmethod
def credential_exist(cls, sitename):
'''
Method that checks if user exists from the credential list.
Returns:
Boolean: True or false depending if the credential exits
'''
the_credential = ""
for credential in Credential.credential_list:
if (credential.sitename == sitename):
the_credential = sitename
return the_credential
| 3.71875 | 4 |
tests/test_dice.py | mehulsatardekar/dice-on-demand | 1 | 3799 | <reponame>mehulsatardekar/dice-on-demand
import unittest
import app
def test_test():
assert app.test() == "Works!"
| 1.882813 | 2 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.