repo_name
stringlengths 7
94
| repo_path
stringlengths 4
237
| repo_head_hexsha
stringlengths 40
40
| content
stringlengths 10
680k
| apis
stringlengths 2
840k
|
---|---|---|---|---|
Miillky/automate_the_boring_stuff_with_python | Chapter 10/trackbackLog.py | 284b074b0738c66f38b54fe0fc5f69b3446e7e43 | import traceback
try:
raise Exception('This is the error message.')
except:
errorFile = open('./Chapter 10/errorInfo.txt', 'w')
errorFile.write(traceback.format_exc())
errorFile.close()
print('The traceback info was written to errorInfo.txt') | [((6, 20, 6, 42), 'traceback.format_exc', 'traceback.format_exc', ({}, {}), '()', False, 'import traceback\n')] |
wuchiehhan/KDD2019-HandsOn-Tutorial | Module_III/PySparkNetworkSimilarityClass.py | 0377ae4b2a74e9cc08b15c983e4e0f59ab02debe | # Databricks notebook source
from pyspark.sql.types import *
from pyspark.sql import functions as F
import base64
import array
# COMMAND ----------
# s is a base64 encoded float[] with first element being the magnitude
def Base64ToFloatArray(s):
arr = array.array('f', base64.b64decode(s))
return (arr[0], arr[1:])
def cosineSimilarity(s1, s2):
(m1, v1) = Base64ToFloatArray(s1)
(m2, v2) = Base64ToFloatArray(s2)
if (m1 == 0) or (m2 == 0):
return 0
else :
return sum(x*y for x,y in zip(v1, v2))/(m1 * m2)
# Register udf functions so that it could be used in dataframe
#
# Perform same computation as cosineSimilarity()
#
@F.udf("float")
def udfCosineSimilarity(s1, s2):
return cosineSimilarity(s1, s2)
# COMMAND ----------
# MAGIC %md **NetworkSimilarity** class to compute Network Similarity
# COMMAND ----------
# Parameters:
# resource: resource stream path
# container: container name in Azure Storage (AS) account
# account: Azure Storage (AS) account
# sas: complete 'Blob service SAS URL' of the shared access signature (sas) for the container
# key: access key for the container, if sas is specified, key is ignored
#
# Note:
# resource does not have header
# you need to provide value for either sas or key
#
class NetworkSimilarity(AzureStorageAccess):
# constructor
def __init__(self, resource, container, account, sas='', key=''):
AzureStorageAccess.__init__(self, container, account, sas, key)
schema = StructType()
schema.add(StructField('EntityId', LongType(), False))
schema.add(StructField('EntityType', StringType(), False))
schema.add(StructField('Data', StringType(), False))
self.df = spark.read.format('csv').options(header='false', delimiter='\t').schema(schema).load(self.getFullpath(resource))
def getDataframe(self):
return self.df
def raiseErrorIfNotFound(self, row, e):
if row is None:
raise KeyError('entity ' + str(e) + ' not found')
def getSimilarity(self, e1, e2):
df = self.df
row1 = df.where(df.EntityId == e1).first()
self.raiseErrorIfNotFound(row1, e1)
row2 = df.where(df.EntityId == e2).first()
self.raiseErrorIfNotFound(row2, e2)
return cosineSimilarity(row1.Data, row2.Data)
def getTopEntities(self, e, targetType = '', maxCount = 20, minScore = 0.0):
df1 = self.df
row1 = df1.where(df1.EntityId == e).first()
self.raiseErrorIfNotFound(row1, e)
if targetType == '':
df2 = df1.where(df1.EntityId != e)
else :
df2 = df1.where((df1.EntityId != e) & (df1.EntityType == targetType))
df3 = df2.select(df2.EntityId, df2.EntityType, udfCosineSimilarity(F.lit(row1.Data), df2.Data).alias('Score'))
return df3.where(df3.Score >= minScore).orderBy(df3.Score.desc()).limit(maxCount)
| [((26, 1, 26, 15), 'pyspark.sql.functions.udf', 'F.udf', ({(26, 7, 26, 14): '"""float"""'}, {}), "('float')", True, 'from pyspark.sql import functions as F\n'), ((11, 25, 11, 44), 'base64.b64decode', 'base64.b64decode', ({(11, 42, 11, 43): 's'}, {}), '(s)', False, 'import base64\n'), ((82, 71, 82, 87), 'pyspark.sql.functions.lit', 'F.lit', ({(82, 77, 82, 86): 'row1.Data'}, {}), '(row1.Data)', True, 'from pyspark.sql import functions as F\n')] |
vagnes/fizzbuzzgame | fizzbuzz.py | de72ffc5a21fbb3b1cfd930ef632b75697fa830f | print("Press q to quit")
quit = False
while quit is False:
in_val = input("Please enter a positive integer.\n > ")
if in_val is 'q':
quit = True
elif int(in_val) % 3 == 0 and int(in_val) % 5 == 0:
print("FizzBuzz")
elif int(in_val) % 5 == 0:
print("Buzz")
elif int(in_val) % 3 == 0:
print("Fizz")
else:
pass
| [] |
muzudho/py-state-machine-practice | lesson10019_projects/pen/data/transition.py | e31c066f4cf142b6b6c5ff273b56a0f89428c59e | from lesson14_projects.pen.data.const import (
A,
E_A,
E_AN,
E_IS,
E_OVER,
E_PEN,
E_PIN,
E_THAT,
E_THIS,
E_WAS,
INIT,
IS,
PEN,
THIS,
)
pen_transition_doc_v19 = {
"title": "This is a pen",
"entry_state": INIT,
"data": {
INIT: {
E_OVER: [INIT],
E_THAT: [INIT],
E_THIS: [INIT, THIS],
THIS: {
E_OVER: [INIT],
E_WAS: [INIT],
E_IS: [INIT, THIS, IS],
IS: {
E_OVER: [INIT],
E_AN: [INIT],
E_A: [INIT, THIS, IS, A],
A: {
E_OVER: [INIT],
E_PIN: [INIT],
E_PEN: [PEN],
},
},
},
},
PEN: {
E_OVER: None,
},
},
}
| [] |
olesmith/SmtC | Animation/Main.py | dfae5097f02192b60aae05b9d02404fcfe893be3 | import gd,os,time
from Html import Animation_Html
from Iteration import Animation_Iteration
from Write import Animation_Write
from Base import *
from Canvas2 import *
from Canvas2 import Canvas2
from Image import Image
from HTML import HTML
__Canvas__=None
class Animation(
Animation_Html,
Animation_Iteration,
Animation_Write,
Base,HTML
):
Convert_Bin="/usr/bin/convert"
HTML_Root="http://127.0.0.1/Graphics"
CGI_Root="http://127.0.0.1/cgi-bin/Graphics/Display.py"
__Switches__={
"v": {
"Attr": "Verbose",
"Text": "Verbosity level. Augment to see more numbers...",
"Type": None,
},
"-clean": {
"Attr": "Clean",
"Text": "Remove PNGs generated",
"Type": "int",
},
"-rewrite": {
"Attr": "Images_Rewrite",
"Text": "Rewrite image file between iterations",
"Type": None,
},
"l": {
"Attr": "Loop",
"Text": "Animated GIF no of loops (passed to convert)",
"Type": None,
},
"d": {
"Attr": "Delay",
"Text": "Animated GIF delay (passed to convert)",
"Type": None,
},
"W": {
"Attr": "W",
"Text": "White background",
"Type": "bool",
},
}
__Args__=[]
Indent=" "
W=False
Verbose=1
Delay="5"
Loop="0"
Path="curves"
Curve_Parms_Path=""
FileName="Curve"
Name="Curve"
Parameters=["a","b","c"]
Parameter_Names=["a","b","c"]
Clean=0 #Clean up afterwords
Iteration_Files=[]
Images_Rewrite=1
def __init__(self,pmin,pmax,vals={}):
self.Hash2Obj(vals)
self.__Canvas__=Canvas2(vals,[ pmin,pmax ])
self.Canvas([ pmin,pmax ]).CLI2Obj()
##!
##! Overrride __str__ to print some useful info.
##!
def __str__(self):
text="Animation, Path: "+self.Path
text+="\n\tFileName: "+self.FileName
text+="\n\tParms: "+self.Curve_Parms_Path
text+="\n\tLoop: "+self.Loop
text+="\n\tDelay: "+self.Delay
text+="\n\tClean: "+str(self.Clean)
text+="\n"+str(self.Canvas())
return text
##!
##! Returns Canvas object, stored in self.__Canvas__
##!
def Canvas(self,pexts=[]):
global __Canvas__ # Needed to modify global copy of __Canvas__
if (not __Canvas__):
parms={
}
__Canvas__=Canvas2(parms,pexts)
return __Canvas__
def BackGround_Color(self):
if (self.W):
return "White"
else:
return "Black"
def Initialize(self):
self.Canvas().Resolution=self.Resolution
self.Canvas().Image_Rewrite()
| [((81, 24, 81, 51), 'Canvas2.Canvas2', 'Canvas2', ({(81, 32, 81, 36): 'vals', (81, 37, 81, 50): '[pmin, pmax]'}, {}), '(vals, [pmin, pmax])', False, 'from Canvas2 import Canvas2\n'), ((109, 23, 109, 43), 'Canvas2.Canvas2', 'Canvas2', ({(109, 31, 109, 36): 'parms', (109, 37, 109, 42): 'pexts'}, {}), '(parms, pexts)', False, 'from Canvas2 import Canvas2\n')] |
junjungoal/pytorch_metric_learning | pytorch_metric_learning/miners/distance_weighted_miner.py | e56bb440d1ec63e13622025209135a788c6f51c1 | #! /usr/bin/env python3
from .base_miner import BasePostGradientMiner
import torch
from ..utils import loss_and_miner_utils as lmu
# adapted from
# https://github.com/chaoyuaw/incubator-mxnet/blob/master/example/gluon/
# /embedding_learning/model.py
class DistanceWeightedMiner(BasePostGradientMiner):
def __init__(self, cutoff, nonzero_loss_cutoff, **kwargs):
super().__init__(**kwargs)
self.cutoff = cutoff
self.nonzero_loss_cutoff = nonzero_loss_cutoff
def mine(self, embeddings, labels):
label_set = torch.unique(labels)
n, d = embeddings.size()
dist_mat = lmu.dist_mat(embeddings)
dist_mat = dist_mat + torch.eye(dist_mat.size(0)).to(embeddings.device)
# so that we don't get log(0). We mask the diagonal out later anyway
# Cut off to avoid high variance.
dist_mat = torch.max(dist_mat, torch.tensor(self.cutoff).to(dist_mat.device))
# Subtract max(log(distance)) for stability.
# See the first equation from Section 4 of the paper
log_weights = (2.0 - float(d)) * torch.log(dist_mat) - (
float(d - 3) / 2
) * torch.log(1.0 - 0.25 * (dist_mat ** 2.0))
weights = torch.exp(log_weights - torch.max(log_weights))
# Sample only negative examples by setting weights of
# the same-class examples to 0.
mask = torch.ones(weights.size()).to(embeddings.device)
for i in label_set:
idx = (labels == i).nonzero()
mask[torch.meshgrid(idx.squeeze(1), idx.squeeze(1))] = 0
weights = weights * mask * ((dist_mat < self.nonzero_loss_cutoff).float())
weights = weights / torch.sum(weights, dim=1, keepdim=True)
np_weights = weights.cpu().numpy()
return lmu.get_random_triplet_indices(labels, weights=np_weights)
| [((18, 20, 18, 40), 'torch.unique', 'torch.unique', ({(18, 33, 18, 39): 'labels'}, {}), '(labels)', False, 'import torch\n'), ((42, 28, 42, 67), 'torch.sum', 'torch.sum', (), '', False, 'import torch\n'), ((29, 41, 29, 60), 'torch.log', 'torch.log', ({(29, 51, 29, 59): 'dist_mat'}, {}), '(dist_mat)', False, 'import torch\n'), ((31, 12, 31, 53), 'torch.log', 'torch.log', ({(31, 22, 31, 52): '(1.0 - 0.25 * dist_mat ** 2.0)'}, {}), '(1.0 - 0.25 * dist_mat ** 2.0)', False, 'import torch\n'), ((32, 42, 32, 64), 'torch.max', 'torch.max', ({(32, 52, 32, 63): 'log_weights'}, {}), '(log_weights)', False, 'import torch\n'), ((25, 39, 25, 64), 'torch.tensor', 'torch.tensor', ({(25, 52, 25, 63): 'self.cutoff'}, {}), '(self.cutoff)', False, 'import torch\n')] |
cassie01/PumpLibrary | Keywords/__init__.py | c2a4884a36f4c6c6552fa942143ae5d21c120b41 | # -*- coding: utf-8 -*-
from .Alarm.alarm import Alarm
from .DeliveryView.bolus import Bolus
from .DeliveryView.info import Info
from .DeliveryView.infusion import Infusion
from .DeliveryView.infusion_parameter import InfusionParameter
from .DeliveryView.priming import Priming
from .HardwareControl.motor import Motor
from .MenuSettings.device_report import DeviceReport
from .MenuSettings.history_log import HistoryLog
from .MenuSettings.infusion_setting import InfusionSetting
from .MenuSettings.maintenance import Maintenance
from .MenuSettings.safety_setting import SafetySetting
from .MenuSettings.system_setting import SystemSetting
from .SensorControl.sensor import Sensor
__all__ = ["Alarm",
"Bolus",
"Info",
"Infusion",
"InfusionParameter",
"Priming",
"Motor",
"DeviceReport",
"HistoryLog",
"InfusionSetting",
"Maintenance",
"SafetySetting",
"SystemSetting",
"Sensor",
]
| [] |
Azure/automl-devplat2-preview | src/responsibleai/rai_analyse/constants.py | 05f327fe4c2504e9d49001ce26d8b49627214138 | # ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
class DashboardInfo:
MODEL_ID_KEY = "id" # To match Model schema
MODEL_INFO_FILENAME = "model_info.json"
RAI_INSIGHTS_MODEL_ID_KEY = "model_id"
RAI_INSIGHTS_RUN_ID_KEY = "rai_insights_parent_run_id"
RAI_INSIGHTS_PARENT_FILENAME = "rai_insights.json"
class PropertyKeyValues:
# The property to indicate the type of Run
RAI_INSIGHTS_TYPE_KEY = "_azureml.responsibleai.rai_insights.type"
RAI_INSIGHTS_TYPE_CONSTRUCT = "construction"
RAI_INSIGHTS_TYPE_CAUSAL = "causal"
RAI_INSIGHTS_TYPE_COUNTERFACTUAL = "counterfactual"
RAI_INSIGHTS_TYPE_EXPLANATION = "explanation"
RAI_INSIGHTS_TYPE_ERROR_ANALYSIS = "error_analysis"
RAI_INSIGHTS_TYPE_GATHER = "gather"
# Property to point at the model under examination
RAI_INSIGHTS_MODEL_ID_KEY = "_azureml.responsibleai.rai_insights.model_id"
# Property for tool runs to point at their constructor run
RAI_INSIGHTS_CONSTRUCTOR_RUN_ID_KEY = (
"_azureml.responsibleai.rai_insights.constructor_run"
)
# Property to record responsibleai version
RAI_INSIGHTS_RESPONSIBLEAI_VERSION_KEY = (
"_azureml.responsibleai.rai_insights.responsibleai_version"
)
# Property format to indicate presence of a tool
RAI_INSIGHTS_TOOL_KEY_FORMAT = "_azureml.responsibleai.rai_insights.has_{0}"
class RAIToolType:
CAUSAL = "causal"
COUNTERFACTUAL = "counterfactual"
ERROR_ANALYSIS = "error_analysis"
EXPLANATION = "explanation"
| [] |
goodboy/pulsar | pulsar/apps/data/redis/store.py | e4b42d94b7e262a165782747d65f8b39fb8d3ba9 | from functools import partial
from pulsar import Connection, Pool, get_actor
from pulsar.utils.pep import to_string
from pulsar.apps.data import RemoteStore
from pulsar.apps.ds import redis_parser
from .client import RedisClient, Pipeline, Consumer, ResponseError
from .pubsub import RedisPubSub, RedisChannels
class RedisStoreConnection(Connection):
def __init__(self, *args, **kw):
super().__init__(*args, **kw)
self.parser = self._producer._parser_class()
async def execute(self, *args, **options):
consumer = self.current_consumer()
await consumer.start((args, options))
result = await consumer.on_finished
if isinstance(result, ResponseError):
raise result.exception
return result
async def execute_pipeline(self, commands, raise_on_error=True):
consumer = self.current_consumer()
consumer.start((commands, raise_on_error, []))
result = await consumer.on_finished
if isinstance(result, ResponseError):
raise result.exception
return result
class RedisStore(RemoteStore):
'''Redis :class:`.Store` implementation.
'''
protocol_factory = partial(RedisStoreConnection, Consumer)
supported_queries = frozenset(('filter', 'exclude'))
def _init(self, namespace=None, parser_class=None, pool_size=50,
decode_responses=False, **kwargs):
self._decode_responses = decode_responses
if not parser_class:
actor = get_actor()
pyparser = actor.cfg.redis_py_parser if actor else False
parser_class = redis_parser(pyparser)
self._parser_class = parser_class
if namespace:
self._urlparams['namespace'] = namespace
self._pool = Pool(self.connect, pool_size=pool_size, loop=self._loop)
if self._database is None:
self._database = 0
self._database = int(self._database)
self.loaded_scripts = set()
@property
def pool(self):
return self._pool
@property
def namespace(self):
'''The prefix namespace to append to all transaction on keys
'''
n = self._urlparams.get('namespace')
return '%s:' % n if n else ''
def key(self):
return (self._dns, self._encoding)
def client(self):
'''Get a :class:`.RedisClient` for the Store'''
return RedisClient(self)
def pipeline(self):
'''Get a :class:`.Pipeline` for the Store'''
return Pipeline(self)
def pubsub(self, protocol=None):
return RedisPubSub(self, protocol=protocol)
def channels(self, protocol=None, **kw):
return RedisChannels(self.pubsub(protocol=protocol), **kw)
def ping(self):
return self.client().ping()
async def execute(self, *args, **options):
connection = await self._pool.connect()
with connection:
result = await connection.execute(*args, **options)
return result
async def execute_pipeline(self, commands, raise_on_error=True):
conn = await self._pool.connect()
with conn:
result = await conn.execute_pipeline(commands, raise_on_error)
return result
async def connect(self, protocol_factory=None):
protocol_factory = protocol_factory or self.create_protocol
if isinstance(self._host, tuple):
host, port = self._host
transport, connection = await self._loop.create_connection(
protocol_factory, host, port)
else:
raise NotImplementedError('Could not connect to %s' %
str(self._host))
if self._password:
await connection.execute('AUTH', self._password)
if self._database:
await connection.execute('SELECT', self._database)
return connection
def flush(self):
return self.execute('flushdb')
def close(self):
'''Close all open connections.'''
return self._pool.close()
def has_query(self, query_type):
return query_type in self.supported_queries
def basekey(self, meta, *args):
key = '%s%s' % (self.namespace, meta.table_name)
postfix = ':'.join((to_string(p) for p in args if p is not None))
return '%s:%s' % (key, postfix) if postfix else key
def meta(self, meta):
'''Extract model metadata for lua script stdnet/lib/lua/odm.lua'''
# indices = dict(((idx.attname, idx.unique) for idx in meta.indices))
data = meta.as_dict()
data['namespace'] = self.basekey(meta)
return data
class CompiledQuery:
def __init__(self, pipe, query):
self.pipe = pipe
| [((38, 23, 38, 62), 'functools.partial', 'partial', ({(38, 31, 38, 51): 'RedisStoreConnection', (38, 53, 38, 61): 'Consumer'}, {}), '(RedisStoreConnection, Consumer)', False, 'from functools import partial\n'), ((51, 21, 51, 77), 'pulsar.Pool', 'Pool', (), '', False, 'from pulsar import Connection, Pool, get_actor\n'), ((45, 20, 45, 31), 'pulsar.get_actor', 'get_actor', ({}, {}), '()', False, 'from pulsar import Connection, Pool, get_actor\n'), ((47, 27, 47, 49), 'pulsar.apps.ds.redis_parser', 'redis_parser', ({(47, 40, 47, 48): 'pyparser'}, {}), '(pyparser)', False, 'from pulsar.apps.ds import redis_parser\n'), ((127, 28, 127, 40), 'pulsar.utils.pep.to_string', 'to_string', ({(127, 38, 127, 39): 'p'}, {}), '(p)', False, 'from pulsar.utils.pep import to_string\n')] |
tschelbs18/fruitful | tasks/migrations/0005_auto_20200616_0123.py | 66635cd521ffc0990275e32298419bfc2167b90b | # Generated by Django 3.0.7 on 2020-06-16 05:23
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('tasks', '0004_auto_20200616_0116'),
]
operations = [
migrations.AddField(
model_name='userreward',
name='created_dt',
field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now),
preserve_default=False,
),
migrations.AddField(
model_name='userreward',
name='last_updated_dt',
field=models.DateTimeField(auto_now=True),
),
migrations.AddField(
model_name='usertask',
name='created_dt',
field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now),
preserve_default=False,
),
migrations.AddField(
model_name='usertask',
name='last_updated_dt',
field=models.DateTimeField(auto_now=True),
),
]
| [((17, 18, 17, 92), 'django.db.models.DateTimeField', 'models.DateTimeField', (), '', False, 'from django.db import migrations, models\n'), ((23, 18, 23, 53), 'django.db.models.DateTimeField', 'models.DateTimeField', (), '', False, 'from django.db import migrations, models\n'), ((28, 18, 28, 92), 'django.db.models.DateTimeField', 'models.DateTimeField', (), '', False, 'from django.db import migrations, models\n'), ((34, 18, 34, 53), 'django.db.models.DateTimeField', 'models.DateTimeField', (), '', False, 'from django.db import migrations, models\n')] |
boschresearch/pcg_gazebo_pkgs | pcg_libraries/src/pcg_gazebo/parsers/types/vector.py | 1c112d01847ca4f8da61ce9b273e13d13bc7eb73 | # Copyright (c) 2019 - The Procedural Generation for Gazebo authors
# For information on the respective copyright owner see the NOTICE file
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from . import XMLBase
import collections
class XMLVector(XMLBase):
_NAME = ''
def __init__(self, size=None):
XMLBase.__init__(self)
assert size is not None, 'Vector size cannot be None'
assert isinstance(size, int), \
'[{}] Vector size input must be an integer, received={}'.format(
self.xml_element_name, size)
assert size > 0, '[{}] Size must be greater than zero'.format(
self.xml_element_name)
self._size = size
self._value = [0 for _ in range(self._size)]
def _set_value(self, value):
assert isinstance(value, collections.Iterable), \
'Input must be iterable, element={}, received={}, type={}'.format(
self._NAME, value, type(value))
assert len(list(value)) == self._size, \
'Input vector has the wrong size, element={}, received={}, ' \
'size of received={}, expected length={}'.format(
self._NAME, value, len(list(value)), self._size)
for item in value:
assert isinstance(item, float) or isinstance(item, int)
self._value = list(value)
def reset(self):
self._value = [0 for _ in range(self._size)]
XMLBase.reset(self)
def is_valid(self):
if not isinstance(self._value, list):
print('Vector object must have a list as value')
return False
if len(self._value) != self._size:
print('Normal value must be a list with 3 elements')
return False
for item in self._value:
if not isinstance(item, float) and not isinstance(item, int):
print('Each vector element must be a float or integer')
return False
return True
def get_formatted_value_as_str(self):
assert self.is_valid(), 'Invalid vector'
output_str = ' '.join(['{}'] * self._size)
return output_str.format(*[format(x, 'n') for x in self._value])
| [] |
uk-gov-mirror/alphagov.digitalmarketplace-briefs-frontend | tests/main/helpers/test_buyers_helpers.py | 2325f01b1bdb13fb5b0afe7fe110c0be0c031da6 | import mock
import pytest
from werkzeug.exceptions import NotFound
import app.main.helpers as helpers
from dmcontent.content_loader import ContentLoader
from dmtestutils.api_model_stubs import BriefStub, FrameworkStub, LotStub
content_loader = ContentLoader('tests/fixtures/content')
content_loader.load_manifest('dos', 'data', 'edit_brief')
questions_builder = content_loader.get_manifest('dos', 'edit_brief')
class TestBuyersHelpers(object):
def test_get_framework_and_lot(self):
provided_lot = LotStub(slug='digital-specialists', allows_brief=True).response()
data_api_client = mock.Mock()
data_api_client.get_framework.return_value = FrameworkStub(
slug='digital-outcomes-and-specialists-4',
status='live',
lots=[provided_lot],
).single_result_response()
framework, lot = helpers.buyers_helpers.get_framework_and_lot('digital-outcomes-and-specialists-4',
'digital-specialists',
data_api_client)
assert framework['status'] == "live"
assert framework['name'] == 'Digital Outcomes and Specialists 4'
assert framework['slug'] == 'digital-outcomes-and-specialists-4'
assert framework['clarificationQuestionsOpen'] is True
assert lot == provided_lot
def test_get_framework_and_lot_404s_for_wrong_framework_status(self):
data_api_client = mock.Mock()
data_api_client.get_framework.return_value = FrameworkStub(
slug='digital-outcomes-and-specialists-4',
status='open',
lots=[
LotStub(slug='digital-specialists', allows_brief=True).response()
]
).single_result_response()
with pytest.raises(NotFound):
helpers.buyers_helpers.get_framework_and_lot(
'digital-outcomes-and-specialists-4',
'digital-specialists',
data_api_client,
allowed_statuses=['live'],
)
def test_get_framework_and_lot_404s_if_allows_brief_required(self):
data_api_client = mock.Mock()
data_api_client.get_framework.return_value = FrameworkStub(
slug='digital-outcomes-and-specialists-4',
status='live',
lots=[
LotStub(slug='digital-specialists', allows_brief=False).response()
]
).single_result_response()
with pytest.raises(NotFound):
helpers.buyers_helpers.get_framework_and_lot(
'digital-outcomes-and-specialists-4',
'digital-specialists',
data_api_client,
must_allow_brief=True,
)
@pytest.mark.parametrize(
['framework', 'lot', 'user', 'result'],
[
('digital-outcomes-and-specialists-4', 'digital-specialists', 123, True),
('not-digital-outcomes-and-specialists', 'digital-specialists', 123, False),
('digital-outcomes-and-specialists-4', 'not-digital-specialists', 123, False),
('digital-outcomes-and-specialists-4', 'digital-specialists', 124, False),
]
)
def test_is_brief_correct(self, framework, lot, user, result):
brief = BriefStub(framework_slug='digital-outcomes-and-specialists-4', user_id=123, status='live').response()
assert helpers.buyers_helpers.is_brief_correct(brief, framework, lot, user) is result
@pytest.mark.parametrize(
['status', 'allow_withdrawn', 'result'],
[
('withdrawn', True, True),
('withdrawn', False, False),
('live', True, True),
('live', False, True),
]
)
def test_if_brief_correct_allow_withdrawn(self, status, allow_withdrawn, result):
brief = BriefStub(framework_slug='digital-outcomes-and-specialists-4', user_id=123, status=status).response()
assert helpers.buyers_helpers.is_brief_correct(
brief, 'digital-outcomes-and-specialists-4', 'digital-specialists', 123, allow_withdrawn=allow_withdrawn
) is result
@pytest.mark.parametrize(
'allowed_statuses, result', [
(['live', 'closed'], True),
(['closed'], False)
]
)
def test_is_brief_correct_allowed_statuses(self, allowed_statuses, result):
brief = BriefStub(framework_slug='digital-outcomes-and-specialists-4', user_id=123, status='live').response()
assert helpers.buyers_helpers.is_brief_correct(
brief, 'digital-outcomes-and-specialists-4', 'digital-specialists', 123, allowed_statuses=allowed_statuses
) is result
def test_is_brief_associated_with_user(self):
brief = BriefStub(user_id=123).response()
assert helpers.buyers_helpers.is_brief_associated_with_user(brief, 123) is True
assert helpers.buyers_helpers.is_brief_associated_with_user(brief, 234) is False
def test_brief_can_be_edited(self):
assert helpers.buyers_helpers.brief_can_be_edited(BriefStub(status='draft').response()) is True
assert helpers.buyers_helpers.brief_can_be_edited(BriefStub(status='live').response()) is False
def test_brief_is_withdrawn(self):
assert helpers.buyers_helpers.brief_is_withdrawn(BriefStub(status='withdrawn').response()) is True
assert helpers.buyers_helpers.brief_is_withdrawn(BriefStub(status='live').response()) is False
def test_section_has_at_least_one_required_question(self):
content = content_loader.get_manifest('dos', 'edit_brief').filter(
{'lot': 'digital-specialists'}
)
sections_with_required_questions = {
'section-1': True,
'section-2': True,
'section-4': False,
'section-5': True
}
for section in content.sections:
assert helpers.buyers_helpers.section_has_at_least_one_required_question(section) \
== sections_with_required_questions[section.slug]
def test_count_unanswered_questions(self):
brief = {
'status': 'draft',
'frameworkSlug': 'dos',
'lotSlug': 'digital-specialists',
'required1': True
}
content = content_loader.get_manifest('dos', 'edit_brief').filter(
{'lot': 'digital-specialists'}
)
sections = content.summary(brief)
unanswered_required, unanswered_optional = helpers.buyers_helpers.count_unanswered_questions(sections)
assert unanswered_required == 2
assert unanswered_optional == 2
def test_add_unanswered_counts_to_briefs(self):
briefs = [{
'status': 'draft',
'frameworkSlug': 'dos',
'lotSlug': 'digital-specialists',
'required1': True
}]
assert helpers.buyers_helpers.add_unanswered_counts_to_briefs(briefs, content_loader) == [{
'status': 'draft',
'frameworkSlug': 'dos',
'lotSlug': 'digital-specialists',
'required1': True,
'unanswered_required': 2,
'unanswered_optional': 2
}]
def test_get_sorted_responses_for_brief(self):
data_api_client = mock.Mock()
data_api_client.find_brief_responses.return_value = {
"briefResponses": [
{"id": "five", "niceToHaveRequirements": [True, True, True, True, True]},
{"id": "zero", "niceToHaveRequirements": [False, False, False, False, False]},
{"id": "three", "niceToHaveRequirements": [True, True, False, False, True]},
{"id": "five", "niceToHaveRequirements": [True, True, True, True, True]},
{"id": "four", "niceToHaveRequirements": [True, True, True, True, False]},
{"id": "one", "niceToHaveRequirements": [False, False, False, True, False]},
{"id": "four", "niceToHaveRequirements": [True, True, True, True, False]},
]
}
brief = {"id": 1, "niceToHaveRequirements": ["Nice", "to", "have", "yes", "please"]}
assert helpers.buyers_helpers.get_sorted_responses_for_brief(brief, data_api_client) == [
{'id': 'five', 'niceToHaveRequirements': [True, True, True, True, True]},
{'id': 'five', 'niceToHaveRequirements': [True, True, True, True, True]},
{'id': 'four', 'niceToHaveRequirements': [True, True, True, True, False]},
{'id': 'four', 'niceToHaveRequirements': [True, True, True, True, False]},
{'id': 'three', 'niceToHaveRequirements': [True, True, False, False, True]},
{"id": "one", "niceToHaveRequirements": [False, False, False, True, False]},
{'id': 'zero', 'niceToHaveRequirements': [False, False, False, False, False]}
]
def test_get_sorted_responses_does_not_sort_if_no_nice_to_haves(self):
data_api_client = mock.Mock()
data_api_client.find_brief_responses.return_value = {
"briefResponses": [
{"id": "five"},
{"id": "zero"},
{"id": "three"},
{"id": "five"}
]
}
brief = {"id": 1, "niceToHaveRequirements": []}
assert helpers.buyers_helpers.get_sorted_responses_for_brief(brief, data_api_client) == [
{"id": "five"},
{"id": "zero"},
{"id": "three"},
{"id": "five"}
]
| [((10, 17, 10, 56), 'dmcontent.content_loader.ContentLoader', 'ContentLoader', ({(10, 31, 10, 55): '"""tests/fixtures/content"""'}, {}), "('tests/fixtures/content')", False, 'from dmcontent.content_loader import ContentLoader\n'), ((71, 5, 79, 5), 'pytest.mark.parametrize', 'pytest.mark.parametrize', ({(72, 8, 72, 46): "['framework', 'lot', 'user', 'result']", (73, 8, 78, 9): "[('digital-outcomes-and-specialists-4', 'digital-specialists', 123, True),\n ('not-digital-outcomes-and-specialists', 'digital-specialists', 123, \n False), ('digital-outcomes-and-specialists-4',\n 'not-digital-specialists', 123, False), (\n 'digital-outcomes-and-specialists-4', 'digital-specialists', 124, False)]"}, {}), "(['framework', 'lot', 'user', 'result'], [(\n 'digital-outcomes-and-specialists-4', 'digital-specialists', 123, True),\n ('not-digital-outcomes-and-specialists', 'digital-specialists', 123, \n False), ('digital-outcomes-and-specialists-4',\n 'not-digital-specialists', 123, False), (\n 'digital-outcomes-and-specialists-4', 'digital-specialists', 124, False)])", False, 'import pytest\n'), ((85, 5, 93, 5), 'pytest.mark.parametrize', 'pytest.mark.parametrize', ({(86, 8, 86, 47): "['status', 'allow_withdrawn', 'result']", (87, 8, 92, 9): "[('withdrawn', True, True), ('withdrawn', False, False), ('live', True, \n True), ('live', False, True)]"}, {}), "(['status', 'allow_withdrawn', 'result'], [(\n 'withdrawn', True, True), ('withdrawn', False, False), ('live', True, \n True), ('live', False, True)])", False, 'import pytest\n'), ((100, 5, 105, 5), 'pytest.mark.parametrize', 'pytest.mark.parametrize', ({(101, 8, 101, 34): '"""allowed_statuses, result"""', (101, 36, 104, 9): "[(['live', 'closed'], True), (['closed'], False)]"}, {}), "('allowed_statuses, result', [(['live', 'closed'], \n True), (['closed'], False)])", False, 'import pytest\n'), ((18, 26, 18, 37), 'mock.Mock', 'mock.Mock', ({}, {}), '()', False, 'import mock\n'), ((25, 25, 27, 86), 'app.main.helpers.buyers_helpers.get_framework_and_lot', 'helpers.buyers_helpers.get_framework_and_lot', ({(25, 70, 25, 106): '"""digital-outcomes-and-specialists-4"""', (26, 70, 26, 91): '"""digital-specialists"""', (27, 70, 27, 85): 'data_api_client'}, {}), "(\n 'digital-outcomes-and-specialists-4', 'digital-specialists',\n data_api_client)", True, 'import app.main.helpers as helpers\n'), ((36, 26, 36, 37), 'mock.Mock', 'mock.Mock', ({}, {}), '()', False, 'import mock\n'), ((54, 26, 54, 37), 'mock.Mock', 'mock.Mock', ({}, {}), '()', False, 'import mock\n'), ((153, 51, 153, 110), 'app.main.helpers.buyers_helpers.count_unanswered_questions', 'helpers.buyers_helpers.count_unanswered_questions', ({(153, 101, 153, 109): 'sections'}, {}), '(sections)', True, 'import app.main.helpers as helpers\n'), ((175, 26, 175, 37), 'mock.Mock', 'mock.Mock', ({}, {}), '()', False, 'import mock\n'), ((200, 26, 200, 37), 'mock.Mock', 'mock.Mock', ({}, {}), '()', False, 'import mock\n'), ((45, 13, 45, 36), 'pytest.raises', 'pytest.raises', ({(45, 27, 45, 35): 'NotFound'}, {}), '(NotFound)', False, 'import pytest\n'), ((46, 12, 51, 13), 'app.main.helpers.buyers_helpers.get_framework_and_lot', 'helpers.buyers_helpers.get_framework_and_lot', (), '', True, 'import app.main.helpers as helpers\n'), ((63, 13, 63, 36), 'pytest.raises', 'pytest.raises', ({(63, 27, 63, 35): 'NotFound'}, {}), '(NotFound)', False, 'import pytest\n'), ((64, 12, 69, 13), 'app.main.helpers.buyers_helpers.get_framework_and_lot', 'helpers.buyers_helpers.get_framework_and_lot', (), '', True, 'import app.main.helpers as helpers\n'), ((83, 15, 83, 83), 'app.main.helpers.buyers_helpers.is_brief_correct', 'helpers.buyers_helpers.is_brief_correct', ({(83, 55, 83, 60): 'brief', (83, 62, 83, 71): 'framework', (83, 73, 83, 76): 'lot', (83, 78, 83, 82): 'user'}, {}), '(brief, framework, lot, user)', True, 'import app.main.helpers as helpers\n'), ((96, 15, 98, 9), 'app.main.helpers.buyers_helpers.is_brief_correct', 'helpers.buyers_helpers.is_brief_correct', (), '', True, 'import app.main.helpers as helpers\n'), ((108, 15, 110, 9), 'app.main.helpers.buyers_helpers.is_brief_correct', 'helpers.buyers_helpers.is_brief_correct', (), '', True, 'import app.main.helpers as helpers\n'), ((114, 15, 114, 79), 'app.main.helpers.buyers_helpers.is_brief_associated_with_user', 'helpers.buyers_helpers.is_brief_associated_with_user', ({(114, 68, 114, 73): 'brief', (114, 75, 114, 78): '(123)'}, {}), '(brief, 123)', True, 'import app.main.helpers as helpers\n'), ((115, 15, 115, 79), 'app.main.helpers.buyers_helpers.is_brief_associated_with_user', 'helpers.buyers_helpers.is_brief_associated_with_user', ({(115, 68, 115, 73): 'brief', (115, 75, 115, 78): '(234)'}, {}), '(brief, 234)', True, 'import app.main.helpers as helpers\n'), ((165, 15, 165, 93), 'app.main.helpers.buyers_helpers.add_unanswered_counts_to_briefs', 'helpers.buyers_helpers.add_unanswered_counts_to_briefs', ({(165, 70, 165, 76): 'briefs', (165, 78, 165, 92): 'content_loader'}, {}), '(briefs, content_loader)', True, 'import app.main.helpers as helpers\n'), ((189, 15, 189, 92), 'app.main.helpers.buyers_helpers.get_sorted_responses_for_brief', 'helpers.buyers_helpers.get_sorted_responses_for_brief', ({(189, 69, 189, 74): 'brief', (189, 76, 189, 91): 'data_api_client'}, {}), '(brief, data_api_client)', True, 'import app.main.helpers as helpers\n'), ((210, 15, 210, 92), 'app.main.helpers.buyers_helpers.get_sorted_responses_for_brief', 'helpers.buyers_helpers.get_sorted_responses_for_brief', ({(210, 69, 210, 74): 'brief', (210, 76, 210, 91): 'data_api_client'}, {}), '(brief, data_api_client)', True, 'import app.main.helpers as helpers\n'), ((17, 23, 17, 77), 'dmtestutils.api_model_stubs.LotStub', 'LotStub', (), '', False, 'from dmtestutils.api_model_stubs import BriefStub, FrameworkStub, LotStub\n'), ((19, 53, 23, 9), 'dmtestutils.api_model_stubs.FrameworkStub', 'FrameworkStub', (), '', False, 'from dmtestutils.api_model_stubs import BriefStub, FrameworkStub, LotStub\n'), ((81, 16, 81, 106), 'dmtestutils.api_model_stubs.BriefStub', 'BriefStub', (), '', False, 'from dmtestutils.api_model_stubs import BriefStub, FrameworkStub, LotStub\n'), ((95, 16, 95, 106), 'dmtestutils.api_model_stubs.BriefStub', 'BriefStub', (), '', False, 'from dmtestutils.api_model_stubs import BriefStub, FrameworkStub, LotStub\n'), ((107, 16, 107, 106), 'dmtestutils.api_model_stubs.BriefStub', 'BriefStub', (), '', False, 'from dmtestutils.api_model_stubs import BriefStub, FrameworkStub, LotStub\n'), ((113, 16, 113, 38), 'dmtestutils.api_model_stubs.BriefStub', 'BriefStub', (), '', False, 'from dmtestutils.api_model_stubs import BriefStub, FrameworkStub, LotStub\n'), ((138, 19, 138, 93), 'app.main.helpers.buyers_helpers.section_has_at_least_one_required_question', 'helpers.buyers_helpers.section_has_at_least_one_required_question', ({(138, 85, 138, 92): 'section'}, {}), '(section)', True, 'import app.main.helpers as helpers\n'), ((118, 58, 118, 83), 'dmtestutils.api_model_stubs.BriefStub', 'BriefStub', (), '', False, 'from dmtestutils.api_model_stubs import BriefStub, FrameworkStub, LotStub\n'), ((119, 58, 119, 82), 'dmtestutils.api_model_stubs.BriefStub', 'BriefStub', (), '', False, 'from dmtestutils.api_model_stubs import BriefStub, FrameworkStub, LotStub\n'), ((122, 57, 122, 86), 'dmtestutils.api_model_stubs.BriefStub', 'BriefStub', (), '', False, 'from dmtestutils.api_model_stubs import BriefStub, FrameworkStub, LotStub\n'), ((123, 57, 123, 81), 'dmtestutils.api_model_stubs.BriefStub', 'BriefStub', (), '', False, 'from dmtestutils.api_model_stubs import BriefStub, FrameworkStub, LotStub\n'), ((41, 16, 41, 70), 'dmtestutils.api_model_stubs.LotStub', 'LotStub', (), '', False, 'from dmtestutils.api_model_stubs import BriefStub, FrameworkStub, LotStub\n'), ((59, 16, 59, 71), 'dmtestutils.api_model_stubs.LotStub', 'LotStub', (), '', False, 'from dmtestutils.api_model_stubs import BriefStub, FrameworkStub, LotStub\n')] |
devinrsmith/deephaven-core | Plot/src/test/java/io/deephaven/db/plot/example_plots/PlottingPQ.py | 3a6930046faf1cd556f62a914ce1cfd7860147b9 | import deephaven.TableTools as tt
import deephaven.Plot as plt
t = tt.emptyTable(50)\
.update("X = i + 5", "XLow = X -1", "XHigh = X + 1", "Y = Math.random() * 5", "YLow = Y - 1", "YHigh = Y + 1", "USym = i % 2 == 0 ? `AAPL` : `MSFT`")
p = plt.plot("S1", t, "X", "Y").lineColor("black").show()
p2 = plt.plot("S1", t, "X", "Y").plotStyle("bar").gradientVisible(True).show()
p3 = plt.plot("S1", t, "X", "Y").plotStyle("scatter").pointColor("black").pointSize(2).show()
p4 = plt.plot("S1", t, "X", "Y").plotStyle("area").seriesColor("red").show()
p4 = plt.plot3d("S1", t, "X", "X", "Y").show()
pBy = plt.plotBy("S1", t, "X", "Y", "USym").show()
pBy = plt.plot3dBy("S1", t, "X", "X", "Y", "USym").show()
cp = plt.catPlot("S1", t, "X", "Y").lineColor("black").show()
cp2 = plt.catPlot("S1", t, "X", "Y").plotStyle("bar").gradientVisible(True).show()
cp3 = plt.catPlot("S1", t, "X", "Y").plotStyle("scatter").pointColor("black").pointSize(2).show()
cp4 = plt.catPlot("S1", t, "X", "Y").plotStyle("area").seriesColor("red").show()
cp = plt.catPlot3d("S1", t, "X", "X", "Y").show()
cpBy = plt.catPlotBy("S1", t, "X", "Y", "USym").show()
cpBy = plt.catPlot3dBy("S1", t, "X", "X", "Y", "USym").show()
pp = plt.piePlot("S1", t, "X", "Y")
chp = plt.catHistPlot("S1", t, "X").show()
hp = plt.histPlot("S1", t, "X", 5).show()
hp = plt.histPlot("S1", t, "X", 0, 10, 5).show()
ep = plt.errorBarXY("S1", t, "X", "XLow", "XHigh", "Y", "YLow", "YHigh").show()
epBy = plt.errorBarXYBy("S1", t, "X", "XLow", "XHigh", "Y", "YLow", "YHigh", "USym").show()
ep2 = plt.errorBarX("S1", t, "X", "XLow", "XHigh", "Y").show()
epBy2 = plt.errorBarXBy("S1", t, "X", "XLow", "XHigh", "Y", "USym").show()
ep3 = plt.errorBarY("S1", t, "X", "Y", "YLow", "YHigh").show()
epBy3 = plt.errorBarYBy("S1", t, "X", "Y", "YLow", "YHigh", "USym").show()
doubles = [3, 4, 3, 5, 4, 5]
time = 1491946585000000000
t = tt.newTable(tt.col("USym", ["A", "B", "A", "B", "A", "B"]),
tt.doubleCol("Open", doubles), tt.doubleCol("High", doubles),
tt.doubleCol("Low", doubles), tt.doubleCol("Close", doubles))
t = t.updateView("Time = new DBDateTime(time + (MINUTE * i))")
ohlc = plt.ohlcPlot("Test1", t, "Time", "Open", "High", "Low", "Close")
ohlcPlotBy = plt.figure().newChart(0)\
.chartTitle("Chart Title")\
.newAxes()\
.xLabel("X")\
.yLabel("Y")\
.ohlcPlotBy("Test1", t, "Time", "Open", "High", "Low", "Close", "USym")
categories = ["Samsung", "Others", "Nokia", "Apple", "MSFT"]
valuesD = [27.8, 55.3, 16.8, 17.1, 23.1]
valuesI = [27, 55, 16, 17, 15]
ap = plt.plot("S1", valuesD, valuesI).show()
ap = plt.plot3d("S1", valuesI, valuesI, valuesI).show()
acp = plt.catPlot("S1", categories, valuesI).show()
acp2 = plt.catPlot3d("S1", categories, categories, valuesD).show()
achp = plt.catHistPlot("S1", categories).show()
app = plt.figure().xLabel("X").yLabel("Y").piePlot("S1", categories, valuesI).pointLabelFormat("{0}").show()
aep = plt.errorBarXY("S1", valuesD, valuesD, valuesD, valuesD, valuesD, valuesD).show()
aep2 = plt.errorBarX("S1", valuesD, valuesD, valuesD, valuesD).show()
aep3 = plt.errorBarY("S1", valuesD, valuesD, valuesD, valuesD).show()
hp = plt.histPlot("S1", valuesD, 5).show()
hp = plt.histPlot("S1", valuesD, 0, 10, 5).show()
hp = plt.histPlot("S1", valuesI, 5).show()
| [((32, 5, 32, 35), 'deephaven.Plot.piePlot', 'plt.piePlot', ({(32, 17, 32, 21): '"""S1"""', (32, 23, 32, 24): 't', (32, 26, 32, 29): '"""X"""', (32, 31, 32, 34): '"""Y"""'}, {}), "('S1', t, 'X', 'Y')", True, 'import deephaven.Plot as plt\n'), ((56, 7, 56, 71), 'deephaven.Plot.ohlcPlot', 'plt.ohlcPlot', ({(56, 20, 56, 27): '"""Test1"""', (56, 29, 56, 30): 't', (56, 32, 56, 38): '"""Time"""', (56, 40, 56, 46): '"""Open"""', (56, 48, 56, 54): '"""High"""', (56, 56, 56, 61): '"""Low"""', (56, 63, 56, 70): '"""Close"""'}, {}), "('Test1', t, 'Time', 'Open', 'High', 'Low', 'Close')", True, 'import deephaven.Plot as plt\n'), ((50, 16, 50, 62), 'deephaven.TableTools.col', 'tt.col', ({(50, 23, 50, 29): '"""USym"""', (50, 31, 50, 61): "['A', 'B', 'A', 'B', 'A', 'B']"}, {}), "('USym', ['A', 'B', 'A', 'B', 'A', 'B'])", True, 'import deephaven.TableTools as tt\n'), ((51, 16, 51, 45), 'deephaven.TableTools.doubleCol', 'tt.doubleCol', ({(51, 29, 51, 35): '"""Open"""', (51, 37, 51, 44): 'doubles'}, {}), "('Open', doubles)", True, 'import deephaven.TableTools as tt\n'), ((51, 47, 51, 76), 'deephaven.TableTools.doubleCol', 'tt.doubleCol', ({(51, 60, 51, 66): '"""High"""', (51, 68, 51, 75): 'doubles'}, {}), "('High', doubles)", True, 'import deephaven.TableTools as tt\n'), ((52, 16, 52, 44), 'deephaven.TableTools.doubleCol', 'tt.doubleCol', ({(52, 29, 52, 34): '"""Low"""', (52, 36, 52, 43): 'doubles'}, {}), "('Low', doubles)", True, 'import deephaven.TableTools as tt\n'), ((52, 46, 52, 76), 'deephaven.TableTools.doubleCol', 'tt.doubleCol', ({(52, 59, 52, 66): '"""Close"""', (52, 68, 52, 75): 'doubles'}, {}), "('Close', doubles)", True, 'import deephaven.TableTools as tt\n'), ((5, 4, 5, 21), 'deephaven.TableTools.emptyTable', 'tt.emptyTable', ({(5, 18, 5, 20): '50'}, {}), '(50)', True, 'import deephaven.TableTools as tt\n'), ((15, 5, 15, 39), 'deephaven.Plot.plot3d', 'plt.plot3d', ({(15, 16, 15, 20): '"""S1"""', (15, 22, 15, 23): 't', (15, 25, 15, 28): '"""X"""', (15, 30, 15, 33): '"""X"""', (15, 35, 15, 38): '"""Y"""'}, {}), "('S1', t, 'X', 'X', 'Y')", True, 'import deephaven.Plot as plt\n'), ((18, 6, 18, 43), 'deephaven.Plot.plotBy', 'plt.plotBy', ({(18, 17, 18, 21): '"""S1"""', (18, 23, 18, 24): 't', (18, 26, 18, 29): '"""X"""', (18, 31, 18, 34): '"""Y"""', (18, 36, 18, 42): '"""USym"""'}, {}), "('S1', t, 'X', 'Y', 'USym')", True, 'import deephaven.Plot as plt\n'), ((19, 6, 19, 51), 'deephaven.Plot.plot3dBy', 'plt.plot3dBy', ({(19, 19, 19, 23): '"""S1"""', (19, 25, 19, 26): 't', (19, 28, 19, 31): '"""X"""', (19, 34, 19, 37): '"""X"""', (19, 39, 19, 42): '"""Y"""', (19, 44, 19, 50): '"""USym"""'}, {}), "('S1', t, 'X', 'X', 'Y', 'USym')", True, 'import deephaven.Plot as plt\n'), ((26, 5, 26, 42), 'deephaven.Plot.catPlot3d', 'plt.catPlot3d', ({(26, 19, 26, 23): '"""S1"""', (26, 25, 26, 26): 't', (26, 28, 26, 31): '"""X"""', (26, 33, 26, 36): '"""X"""', (26, 38, 26, 41): '"""Y"""'}, {}), "('S1', t, 'X', 'X', 'Y')", True, 'import deephaven.Plot as plt\n'), ((28, 7, 28, 47), 'deephaven.Plot.catPlotBy', 'plt.catPlotBy', ({(28, 21, 28, 25): '"""S1"""', (28, 27, 28, 28): 't', (28, 30, 28, 33): '"""X"""', (28, 35, 28, 38): '"""Y"""', (28, 40, 28, 46): '"""USym"""'}, {}), "('S1', t, 'X', 'Y', 'USym')", True, 'import deephaven.Plot as plt\n'), ((30, 7, 30, 54), 'deephaven.Plot.catPlot3dBy', 'plt.catPlot3dBy', ({(30, 23, 30, 27): '"""S1"""', (30, 29, 30, 30): 't', (30, 32, 30, 35): '"""X"""', (30, 37, 30, 40): '"""X"""', (30, 42, 30, 45): '"""Y"""', (30, 47, 30, 53): '"""USym"""'}, {}), "('S1', t, 'X', 'X', 'Y', 'USym')", True, 'import deephaven.Plot as plt\n'), ((35, 6, 35, 35), 'deephaven.Plot.catHistPlot', 'plt.catHistPlot', ({(35, 22, 35, 26): '"""S1"""', (35, 28, 35, 29): 't', (35, 31, 35, 34): '"""X"""'}, {}), "('S1', t, 'X')", True, 'import deephaven.Plot as plt\n'), ((37, 5, 37, 34), 'deephaven.Plot.histPlot', 'plt.histPlot', ({(37, 18, 37, 22): '"""S1"""', (37, 24, 37, 25): 't', (37, 27, 37, 30): '"""X"""', (37, 32, 37, 33): '5'}, {}), "('S1', t, 'X', 5)", True, 'import deephaven.Plot as plt\n'), ((38, 5, 38, 41), 'deephaven.Plot.histPlot', 'plt.histPlot', ({(38, 18, 38, 22): '"""S1"""', (38, 24, 38, 25): 't', (38, 27, 38, 30): '"""X"""', (38, 32, 38, 33): '0', (38, 35, 38, 37): '10', (38, 39, 38, 40): '5'}, {}), "('S1', t, 'X', 0, 10, 5)", True, 'import deephaven.Plot as plt\n'), ((40, 5, 40, 72), 'deephaven.Plot.errorBarXY', 'plt.errorBarXY', ({(40, 20, 40, 24): '"""S1"""', (40, 26, 40, 27): 't', (40, 29, 40, 32): '"""X"""', (40, 34, 40, 40): '"""XLow"""', (40, 42, 40, 49): '"""XHigh"""', (40, 51, 40, 54): '"""Y"""', (40, 56, 40, 62): '"""YLow"""', (40, 64, 40, 71): '"""YHigh"""'}, {}), "('S1', t, 'X', 'XLow', 'XHigh', 'Y', 'YLow', 'YHigh')", True, 'import deephaven.Plot as plt\n'), ((41, 7, 41, 84), 'deephaven.Plot.errorBarXYBy', 'plt.errorBarXYBy', ({(41, 24, 41, 28): '"""S1"""', (41, 30, 41, 31): 't', (41, 33, 41, 36): '"""X"""', (41, 38, 41, 44): '"""XLow"""', (41, 46, 41, 53): '"""XHigh"""', (41, 55, 41, 58): '"""Y"""', (41, 60, 41, 66): '"""YLow"""', (41, 68, 41, 75): '"""YHigh"""', (41, 77, 41, 83): '"""USym"""'}, {}), "('S1', t, 'X', 'XLow', 'XHigh', 'Y', 'YLow', 'YHigh', 'USym')", True, 'import deephaven.Plot as plt\n'), ((42, 6, 42, 55), 'deephaven.Plot.errorBarX', 'plt.errorBarX', ({(42, 20, 42, 24): '"""S1"""', (42, 26, 42, 27): 't', (42, 29, 42, 32): '"""X"""', (42, 34, 42, 40): '"""XLow"""', (42, 42, 42, 49): '"""XHigh"""', (42, 51, 42, 54): '"""Y"""'}, {}), "('S1', t, 'X', 'XLow', 'XHigh', 'Y')", True, 'import deephaven.Plot as plt\n'), ((43, 8, 43, 67), 'deephaven.Plot.errorBarXBy', 'plt.errorBarXBy', ({(43, 24, 43, 28): '"""S1"""', (43, 30, 43, 31): 't', (43, 33, 43, 36): '"""X"""', (43, 38, 43, 44): '"""XLow"""', (43, 46, 43, 53): '"""XHigh"""', (43, 55, 43, 58): '"""Y"""', (43, 60, 43, 66): '"""USym"""'}, {}), "('S1', t, 'X', 'XLow', 'XHigh', 'Y', 'USym')", True, 'import deephaven.Plot as plt\n'), ((44, 6, 44, 55), 'deephaven.Plot.errorBarY', 'plt.errorBarY', ({(44, 20, 44, 24): '"""S1"""', (44, 26, 44, 27): 't', (44, 29, 44, 32): '"""X"""', (44, 34, 44, 37): '"""Y"""', (44, 39, 44, 45): '"""YLow"""', (44, 47, 44, 54): '"""YHigh"""'}, {}), "('S1', t, 'X', 'Y', 'YLow', 'YHigh')", True, 'import deephaven.Plot as plt\n'), ((45, 8, 45, 67), 'deephaven.Plot.errorBarYBy', 'plt.errorBarYBy', ({(45, 24, 45, 28): '"""S1"""', (45, 30, 45, 31): 't', (45, 33, 45, 36): '"""X"""', (45, 38, 45, 41): '"""Y"""', (45, 43, 45, 49): '"""YLow"""', (45, 51, 45, 58): '"""YHigh"""', (45, 60, 45, 66): '"""USym"""'}, {}), "('S1', t, 'X', 'Y', 'YLow', 'YHigh', 'USym')", True, 'import deephaven.Plot as plt\n'), ((70, 5, 70, 37), 'deephaven.Plot.plot', 'plt.plot', ({(70, 14, 70, 18): '"""S1"""', (70, 20, 70, 27): 'valuesD', (70, 29, 70, 36): 'valuesI'}, {}), "('S1', valuesD, valuesI)", True, 'import deephaven.Plot as plt\n'), ((71, 5, 71, 48), 'deephaven.Plot.plot3d', 'plt.plot3d', ({(71, 16, 71, 20): '"""S1"""', (71, 22, 71, 29): 'valuesI', (71, 31, 71, 38): 'valuesI', (71, 40, 71, 47): 'valuesI'}, {}), "('S1', valuesI, valuesI, valuesI)", True, 'import deephaven.Plot as plt\n'), ((73, 6, 73, 44), 'deephaven.Plot.catPlot', 'plt.catPlot', ({(73, 18, 73, 22): '"""S1"""', (73, 24, 73, 34): 'categories', (73, 36, 73, 43): 'valuesI'}, {}), "('S1', categories, valuesI)", True, 'import deephaven.Plot as plt\n'), ((74, 7, 74, 59), 'deephaven.Plot.catPlot3d', 'plt.catPlot3d', ({(74, 21, 74, 25): '"""S1"""', (74, 27, 74, 37): 'categories', (74, 39, 74, 49): 'categories', (74, 51, 74, 58): 'valuesD'}, {}), "('S1', categories, categories, valuesD)", True, 'import deephaven.Plot as plt\n'), ((76, 7, 76, 40), 'deephaven.Plot.catHistPlot', 'plt.catHistPlot', ({(76, 23, 76, 27): '"""S1"""', (76, 29, 76, 39): 'categories'}, {}), "('S1', categories)", True, 'import deephaven.Plot as plt\n'), ((80, 6, 80, 80), 'deephaven.Plot.errorBarXY', 'plt.errorBarXY', ({(80, 21, 80, 25): '"""S1"""', (80, 27, 80, 34): 'valuesD', (80, 36, 80, 43): 'valuesD', (80, 45, 80, 52): 'valuesD', (80, 54, 80, 61): 'valuesD', (80, 63, 80, 70): 'valuesD', (80, 72, 80, 79): 'valuesD'}, {}), "('S1', valuesD, valuesD, valuesD, valuesD, valuesD, valuesD)", True, 'import deephaven.Plot as plt\n'), ((81, 7, 81, 62), 'deephaven.Plot.errorBarX', 'plt.errorBarX', ({(81, 21, 81, 25): '"""S1"""', (81, 27, 81, 34): 'valuesD', (81, 36, 81, 43): 'valuesD', (81, 45, 81, 52): 'valuesD', (81, 54, 81, 61): 'valuesD'}, {}), "('S1', valuesD, valuesD, valuesD, valuesD)", True, 'import deephaven.Plot as plt\n'), ((82, 7, 82, 62), 'deephaven.Plot.errorBarY', 'plt.errorBarY', ({(82, 21, 82, 25): '"""S1"""', (82, 27, 82, 34): 'valuesD', (82, 36, 82, 43): 'valuesD', (82, 45, 82, 52): 'valuesD', (82, 54, 82, 61): 'valuesD'}, {}), "('S1', valuesD, valuesD, valuesD, valuesD)", True, 'import deephaven.Plot as plt\n'), ((85, 5, 85, 35), 'deephaven.Plot.histPlot', 'plt.histPlot', ({(85, 18, 85, 22): '"""S1"""', (85, 24, 85, 31): 'valuesD', (85, 33, 85, 34): '5'}, {}), "('S1', valuesD, 5)", True, 'import deephaven.Plot as plt\n'), ((86, 5, 86, 42), 'deephaven.Plot.histPlot', 'plt.histPlot', ({(86, 18, 86, 22): '"""S1"""', (86, 24, 86, 31): 'valuesD', (86, 33, 86, 34): '0', (86, 36, 86, 38): '10', (86, 40, 86, 41): '5'}, {}), "('S1', valuesD, 0, 10, 5)", True, 'import deephaven.Plot as plt\n'), ((87, 5, 87, 35), 'deephaven.Plot.histPlot', 'plt.histPlot', ({(87, 18, 87, 22): '"""S1"""', (87, 24, 87, 31): 'valuesI', (87, 33, 87, 34): '5'}, {}), "('S1', valuesI, 5)", True, 'import deephaven.Plot as plt\n'), ((9, 4, 9, 31), 'deephaven.Plot.plot', 'plt.plot', ({(9, 13, 9, 17): '"""S1"""', (9, 19, 9, 20): 't', (9, 22, 9, 25): '"""X"""', (9, 27, 9, 30): '"""Y"""'}, {}), "('S1', t, 'X', 'Y')", True, 'import deephaven.Plot as plt\n'), ((21, 5, 21, 35), 'deephaven.Plot.catPlot', 'plt.catPlot', ({(21, 17, 21, 21): '"""S1"""', (21, 23, 21, 24): 't', (21, 26, 21, 29): '"""X"""', (21, 31, 21, 34): '"""Y"""'}, {}), "('S1', t, 'X', 'Y')", True, 'import deephaven.Plot as plt\n'), ((10, 5, 10, 32), 'deephaven.Plot.plot', 'plt.plot', ({(10, 14, 10, 18): '"""S1"""', (10, 20, 10, 21): 't', (10, 23, 10, 26): '"""X"""', (10, 28, 10, 31): '"""Y"""'}, {}), "('S1', t, 'X', 'Y')", True, 'import deephaven.Plot as plt\n'), ((12, 5, 12, 32), 'deephaven.Plot.plot', 'plt.plot', ({(12, 14, 12, 18): '"""S1"""', (12, 20, 12, 21): 't', (12, 23, 12, 26): '"""X"""', (12, 28, 12, 31): '"""Y"""'}, {}), "('S1', t, 'X', 'Y')", True, 'import deephaven.Plot as plt\n'), ((22, 6, 22, 36), 'deephaven.Plot.catPlot', 'plt.catPlot', ({(22, 18, 22, 22): '"""S1"""', (22, 24, 22, 25): 't', (22, 27, 22, 30): '"""X"""', (22, 32, 22, 35): '"""Y"""'}, {}), "('S1', t, 'X', 'Y')", True, 'import deephaven.Plot as plt\n'), ((24, 6, 24, 36), 'deephaven.Plot.catPlot', 'plt.catPlot', ({(24, 18, 24, 22): '"""S1"""', (24, 24, 24, 25): 't', (24, 27, 24, 30): '"""X"""', (24, 32, 24, 35): '"""Y"""'}, {}), "('S1', t, 'X', 'Y')", True, 'import deephaven.Plot as plt\n'), ((11, 5, 11, 32), 'deephaven.Plot.plot', 'plt.plot', ({(11, 14, 11, 18): '"""S1"""', (11, 20, 11, 21): 't', (11, 23, 11, 26): '"""X"""', (11, 28, 11, 31): '"""Y"""'}, {}), "('S1', t, 'X', 'Y')", True, 'import deephaven.Plot as plt\n'), ((23, 6, 23, 36), 'deephaven.Plot.catPlot', 'plt.catPlot', ({(23, 18, 23, 22): '"""S1"""', (23, 24, 23, 25): 't', (23, 27, 23, 30): '"""X"""', (23, 32, 23, 35): '"""Y"""'}, {}), "('S1', t, 'X', 'Y')", True, 'import deephaven.Plot as plt\n'), ((78, 6, 78, 18), 'deephaven.Plot.figure', 'plt.figure', ({}, {}), '()', True, 'import deephaven.Plot as plt\n'), ((58, 13, 58, 25), 'deephaven.Plot.figure', 'plt.figure', ({}, {}), '()', True, 'import deephaven.Plot as plt\n')] |
ahmedmagdyawaad/redhat-ci-dashboard | rhoci/test/routes.py | a9c0445add4e99bb44a8075752a62176968278df | # Copyright 2019 Arie Bregman
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
from flask import current_app as app
from flask import render_template
from flask import url_for
import logging
LOG = logging.getLogger(__name__)
from rhoci.test import bp # noqa
@bp.route('/index')
@bp.route('/')
def index():
"""All tests."""
jenkins_url = app.config['custom']['jenkins']['url']
uf = url_for('api.all_tests')
return render_template('tests/index.html',
jenkins_url=jenkins_url,
uf=uf)
@bp.route('/class/<class_name>/name/<name>')
def test(class_name, name):
"""Specific test summary."""
uf = url_for('api.test_to_jobs', class_name=class_name, test_name=name)
return render_template('tests/test_to_jobs.html', uf=uf)
| [((21, 6, 21, 33), 'logging.getLogger', 'logging.getLogger', ({(21, 24, 21, 32): '__name__'}, {}), '(__name__)', False, 'import logging\n'), ((26, 1, 26, 19), 'rhoci.test.bp.route', 'bp.route', ({(26, 10, 26, 18): '"""/index"""'}, {}), "('/index')", False, 'from rhoci.test import bp\n'), ((27, 1, 27, 14), 'rhoci.test.bp.route', 'bp.route', ({(27, 10, 27, 13): '"""/"""'}, {}), "('/')", False, 'from rhoci.test import bp\n'), ((37, 1, 37, 44), 'rhoci.test.bp.route', 'bp.route', ({(37, 10, 37, 43): '"""/class/<class_name>/name/<name>"""'}, {}), "('/class/<class_name>/name/<name>')", False, 'from rhoci.test import bp\n'), ((31, 9, 31, 33), 'flask.url_for', 'url_for', ({(31, 17, 31, 32): '"""api.all_tests"""'}, {}), "('api.all_tests')", False, 'from flask import url_for\n'), ((32, 11, 34, 33), 'flask.render_template', 'render_template', (), '', False, 'from flask import render_template\n'), ((40, 9, 40, 75), 'flask.url_for', 'url_for', (), '', False, 'from flask import url_for\n'), ((41, 11, 41, 60), 'flask.render_template', 'render_template', (), '', False, 'from flask import render_template\n')] |
aarnaut/mitmproxy | mitmproxy/net/http/http1/__init__.py | a8b6f48374b28954f9d8fb5cabbc4fdcaebe9e3a | from .read import (
read_request_head,
read_response_head,
connection_close,
expected_http_body_size,
validate_headers,
)
from .assemble import (
assemble_request, assemble_request_head,
assemble_response, assemble_response_head,
assemble_body,
)
__all__ = [
"read_request_head",
"read_response_head",
"connection_close",
"expected_http_body_size",
"validate_headers",
"assemble_request", "assemble_request_head",
"assemble_response", "assemble_response_head",
"assemble_body",
]
| [] |
alex-hutton/django-request-token | request_token/migrations/0009_requesttokenerror.py | 299c4cb22ce3012c7ef995a648e5b1ea6b8a84d7 | # -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2017-05-21 19:33
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('request_token', '0008_convert_token_data_to_jsonfield'),
]
operations = [
migrations.CreateModel(
name='RequestTokenErrorLog',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('error_type', models.CharField(help_text='The underlying type of error raised.', max_length=50)),
('error_message', models.CharField(help_text='The error message supplied.', max_length=200)),
('log', models.OneToOneField(help_text='The token use against which the error occurred.', on_delete=django.db.models.deletion.CASCADE, related_name='error', to='request_token.RequestTokenLog')),
('token', models.ForeignKey(help_text='The RequestToken that was used.', on_delete=django.db.models.deletion.CASCADE, related_name='errors', to='request_token.RequestToken')),
],
),
]
| [((19, 23, 19, 112), 'django.db.models.AutoField', 'models.AutoField', (), '', False, 'from django.db import migrations, models\n'), ((20, 31, 20, 112), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import migrations, models\n'), ((21, 34, 21, 107), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import migrations, models\n'), ((22, 24, 22, 208), 'django.db.models.OneToOneField', 'models.OneToOneField', (), '', False, 'from django.db import migrations, models\n'), ((23, 26, 23, 189), 'django.db.models.ForeignKey', 'models.ForeignKey', (), '', False, 'from django.db import migrations, models\n')] |
ncodeitgithub1/python-get-hands-dirty-programs | 01-basic-programs/04-lines.py | c9edb9e0bc9b2580737ca185935427343c550f01 | #4 lines: Fibonacci, tuple assignment
parents, babies = (1, 1)
while babies < 100:
print ('This generation has {0} babies'.format(babies))
parents, babies = (babies, parents + babies) | [] |
EvgenySmekalin/winter | winter/controller.py | 24b6a02f958478547a4a120324823743a1f7e1a1 | import typing
from .core import Component
_Controller = typing.TypeVar('_Controller')
_ControllerType = typing.Type[_Controller]
ControllerFactory = typing.NewType('ControllerFactory', typing.Callable[[typing.Type], object])
_controller_factory: typing.Optional[ControllerFactory] = None
def controller(controller_class: _ControllerType) -> _ControllerType:
Component.register(controller_class)
return controller_class
def set_controller_factory(controller_factory: ControllerFactory) -> None:
global _controller_factory
_controller_factory = controller_factory
def build_controller(controller_class: _ControllerType) -> _Controller:
if _controller_factory is None:
return controller_class()
return _controller_factory(controller_class)
def get_component(controller_class: _ControllerType) -> Component:
return Component.get_by_cls(controller_class)
| [((5, 14, 5, 43), 'typing.TypeVar', 'typing.TypeVar', ({(5, 29, 5, 42): '"""_Controller"""'}, {}), "('_Controller')", False, 'import typing\n'), ((7, 20, 7, 95), 'typing.NewType', 'typing.NewType', ({(7, 35, 7, 54): '"""ControllerFactory"""', (7, 56, 7, 94): 'typing.Callable[[typing.Type], object]'}, {}), "('ControllerFactory', typing.Callable[[typing.Type], object])", False, 'import typing\n')] |
bobg/rules_go | go/def.bzl | fd11dd2768669dc2cc1f3a11f2b0b81d84e81c32 | # Copyright 2014 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Public definitions for Go rules.
All public Go rules, providers, and other definitions are imported and
re-exported in this file. This allows the real location of definitions
to change for easier maintenance.
Definitions outside this file are private unless otherwise noted, and
may change without notice.
"""
load(
"//go/private:context.bzl",
_go_context = "go_context",
)
load(
"//go/private:providers.bzl",
_GoArchive = "GoArchive",
_GoArchiveData = "GoArchiveData",
_GoLibrary = "GoLibrary",
_GoPath = "GoPath",
_GoSDK = "GoSDK",
_GoSource = "GoSource",
)
load(
"//go/private/rules:sdk.bzl",
_go_sdk = "go_sdk",
)
load(
"//go/private:go_toolchain.bzl",
_declare_toolchains = "declare_toolchains",
_go_toolchain = "go_toolchain",
)
load(
"//go/private/rules:wrappers.bzl",
_go_binary_macro = "go_binary_macro",
_go_library_macro = "go_library_macro",
_go_test_macro = "go_test_macro",
)
load(
"//go/private/rules:source.bzl",
_go_source = "go_source",
)
load(
"//extras:embed_data.bzl",
_go_embed_data = "go_embed_data",
)
load(
"//go/private/tools:path.bzl",
_go_path = "go_path",
)
load(
"//go/private/rules:library.bzl",
_go_tool_library = "go_tool_library",
)
load(
"//go/private/rules:nogo.bzl",
_nogo = "nogo_wrapper",
)
# TOOLS_NOGO is a list of all analysis passes in
# golang.org/x/tools/go/analysis/passes.
# This is not backward compatible, so use caution when depending on this --
# new analyses may discover issues in existing builds.
TOOLS_NOGO = [
"@org_golang_x_tools//go/analysis/passes/asmdecl:go_default_library",
"@org_golang_x_tools//go/analysis/passes/assign:go_default_library",
"@org_golang_x_tools//go/analysis/passes/atomic:go_default_library",
"@org_golang_x_tools//go/analysis/passes/atomicalign:go_default_library",
"@org_golang_x_tools//go/analysis/passes/bools:go_default_library",
"@org_golang_x_tools//go/analysis/passes/buildssa:go_default_library",
"@org_golang_x_tools//go/analysis/passes/buildtag:go_default_library",
# TODO(#2396): pass raw cgo sources to cgocall and re-enable.
# "@org_golang_x_tools//go/analysis/passes/cgocall:go_default_library",
"@org_golang_x_tools//go/analysis/passes/composite:go_default_library",
"@org_golang_x_tools//go/analysis/passes/copylock:go_default_library",
"@org_golang_x_tools//go/analysis/passes/ctrlflow:go_default_library",
"@org_golang_x_tools//go/analysis/passes/deepequalerrors:go_default_library",
"@org_golang_x_tools//go/analysis/passes/errorsas:go_default_library",
"@org_golang_x_tools//go/analysis/passes/findcall:go_default_library",
"@org_golang_x_tools//go/analysis/passes/httpresponse:go_default_library",
"@org_golang_x_tools//go/analysis/passes/ifaceassert:go_default_library",
"@org_golang_x_tools//go/analysis/passes/inspect:go_default_library",
"@org_golang_x_tools//go/analysis/passes/loopclosure:go_default_library",
"@org_golang_x_tools//go/analysis/passes/lostcancel:go_default_library",
"@org_golang_x_tools//go/analysis/passes/nilfunc:go_default_library",
"@org_golang_x_tools//go/analysis/passes/nilness:go_default_library",
"@org_golang_x_tools//go/analysis/passes/pkgfact:go_default_library",
"@org_golang_x_tools//go/analysis/passes/printf:go_default_library",
"@org_golang_x_tools//go/analysis/passes/shadow:go_default_library",
"@org_golang_x_tools//go/analysis/passes/shift:go_default_library",
"@org_golang_x_tools//go/analysis/passes/sortslice:go_default_library",
"@org_golang_x_tools//go/analysis/passes/stdmethods:go_default_library",
"@org_golang_x_tools//go/analysis/passes/stringintconv:go_default_library",
"@org_golang_x_tools//go/analysis/passes/structtag:go_default_library",
"@org_golang_x_tools//go/analysis/passes/testinggoroutine:go_default_library",
"@org_golang_x_tools//go/analysis/passes/tests:go_default_library",
"@org_golang_x_tools//go/analysis/passes/unmarshal:go_default_library",
"@org_golang_x_tools//go/analysis/passes/unreachable:go_default_library",
"@org_golang_x_tools//go/analysis/passes/unsafeptr:go_default_library",
"@org_golang_x_tools//go/analysis/passes/unusedresult:go_default_library",
]
# Current version or next version to be tagged. Gazelle and other tools may
# check this to determine compatibility.
RULES_GO_VERSION = "0.30.0"
declare_toolchains = _declare_toolchains
go_context = _go_context
go_embed_data = _go_embed_data
go_sdk = _go_sdk
go_tool_library = _go_tool_library
go_toolchain = _go_toolchain
nogo = _nogo
# See go/providers.rst#GoLibrary for full documentation.
GoLibrary = _GoLibrary
# See go/providers.rst#GoSource for full documentation.
GoSource = _GoSource
# See go/providers.rst#GoPath for full documentation.
GoPath = _GoPath
# See go/providers.rst#GoArchive for full documentation.
GoArchive = _GoArchive
# See go/providers.rst#GoArchiveData for full documentation.
GoArchiveData = _GoArchiveData
# See go/providers.rst#GoSDK for full documentation.
GoSDK = _GoSDK
# See docs/go/core/rules.md#go_library for full documentation.
go_library = _go_library_macro
# See docs/go/core/rules.md#go_binary for full documentation.
go_binary = _go_binary_macro
# See docs/go/core/rules.md#go_test for full documentation.
go_test = _go_test_macro
# See docs/go/core/rules.md#go_test for full documentation.
go_source = _go_source
# See docs/go/core/rules.md#go_path for full documentation.
go_path = _go_path
def go_vet_test(*args, **kwargs):
fail("The go_vet_test rule has been removed. Please migrate to nogo instead, which supports vet tests.")
def go_rule(**kwargs):
fail("The go_rule function has been removed. Use rule directly instead. See https://github.com/bazelbuild/rules_go/blob/master/go/toolchains.rst#writing-new-go-rules")
def go_rules_dependencies():
_moved("go_rules_dependencies")
def go_register_toolchains(**kwargs):
_moved("go_register_toolchains")
def go_download_sdk(**kwargs):
_moved("go_download_sdk")
def go_host_sdk(**kwargs):
_moved("go_host_sdk")
def go_local_sdk(**kwargs):
_moved("go_local_sdk")
def go_wrap_sdk(**kwargs):
_moved("go_wrap_sdK")
def _moved(name):
fail(name + " has moved. Please load from " +
" @io_bazel_rules_go//go:deps.bzl instead of def.bzl.")
| [] |
ayalapol/anyway | anyway/parsers/united.py | ebf2436a8f9b152ae8f4d051c129bac754cb8cc1 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import calendar
import csv
from datetime import datetime
import os
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy import and_
from ..constants import CONST
from ..models import AccidentMarker
from ..utilities import init_flask, decode_hebrew, open_utf8
from ..import importmail
from xml.dom import minidom
import math
import requests
import logging
############################################################################################
# United.py is responsible for the parsing and deployment of "united hatzala" data to the DB
############################################################################################
PROVIDER_CODE = CONST.UNITED_HATZALA_CODE
TIME_ZONE = 2
# convert IMS hours code to hours
RAIN_DURATION_CODE_TO_HOURS = {"1": 6, "2": 12, "3": 18, "4": 24, "/": 24, "5": 1, "6": 2, "7": 3, "8": 9, "9": 15}
WEATHER = {"0": 1, "1": 2, "3": 3, "4": 4, "5": 5, "7": 6, "8": 6, "9": 7, "10": 8, "11": 9,
"12": 10, "17": 11, "18": 12, "19": 13, "20": 14, "21": 15, "22": 16, "23": 17, "24": 18,
"25": 19, "26": 20, "27": 21, "28": 22, "29": 23, "30": 24, "31": 24, "32": 24, "33": 7,
"34": 7, "35": 7, "36": 25, "37": 25, "38": 25, "39": 25, "40": 26, "41": 27, "42": 28,
"43": 29, "44": 9, "45": 30, "46": 30, "47": 30, "48": 31, "49": 32, "50": 33, "51": 34,
"52": 33, "53": 35, "54": 36, "55": 37, "56": 38, "57": 39, "58": 37, "59": 37, "61": 37, "60": 36,
"62": 40, "63": 15, "64": 41, "65": 19, "66": 42, "67": 43, "68": 44, "69": 45, "70": 46, "71": 47,
"72": 48, "73": 16, "74": 50, "75": 51, "76": 52, "77": 53, "78": 54, "79": 55, "80": 56, "81": 57,
"82": 58, "83": 59, "84": 60, "85": 61, "86": 62, "87": 63, "88": 64, "89": 65, "90": 66, "91": 67,
"92": 68, "93": 69, "94": 70, "95": 71, "96": 72, "97": 73, "98": 74, "99": 75}
def retrieve_ims_xml(): # getting an xml document from the ims(israel meteorological service) website
logging.basicConfig(level=logging.DEBUG)
s = requests.session()
r = s.get('http://www.ims.gov.il/ims/PublicXML/observ.xml')
xml_doc = minidom.parseString(r.text)
collection = xml_doc.documentElement
return collection
def parse_date(created):
"""
:param created: Date & Time string from csv
:return: Python datetime object
"""
global time
global hour
DATE_FORMATS = ['%m/%d/%Y %I:%M:%S', '%Y-%m-%d %H:%M:%S', '%Y/%m/%d %I:%M:%S', '%d/%m/%Y %I:%M', '%Y/%m/%d %I:%M', '%m/%d/%Y %I:%M']
for date_format in DATE_FORMATS:
try:
if date_format == '%Y-%m-%d %H:%M:%S':
time = datetime.strptime(str(created)[:-4], date_format)
hour = time.strftime('%H')
hour = int(hour)
else:
time = datetime.strptime(str(created)[:-3], date_format)
hour = time.strftime('%H')
hour = int(hour) if str(created).endswith('AM') else int(hour) + 12
break
except ValueError:
pass
return datetime(time.year, time.month, time.day, hour, time.minute, 0)
def is_nth_weekday(nth, daynum, year,
month): # find if date is the nth occurrence of the daynum day of the week (ex: the forth sunday of april 2016)
# start counting the daynum from monday = 0
return calendar.Calendar(nth).monthdatescalendar(
year,
month
)[nth][daynum]
def get_parent_object_node(node):
while node.parentNode:
node = node.parentNode
if node.nodeName == "Object":
return node
def accident_time_zone_adjustment(created): # return accident time in UTC time
# pylint: disable=unexpected-keyword-arg
accident_date = parse_date(created)
daylight_saving_time = is_nth_weekday(4, 4, accident_date.year, 3)
winter_clock = is_nth_weekday(4, 6, accident_date.year, 10)
# weather is given in UTC time
# therefore in daylight_saving_time we deduct 3 hours from the local time and in winter clock 2 hours
# [
accident_date = accident_date.replace(hour=accident_date.hour - TIME_ZONE)
# if accident happend between april and september
if accident_date.month < 10 & accident_date.month > 3:
accident_date.replace(hour=accident_date.hour - 1)
# if accident happend before the last sunday of october at 2:00 o'clock
elif accident_date.month == 10 & (
winter_clock.day > accident_date.day | (
winter_clock.day == accident_date.day & accident_date.hour < 2)):
accident_date.replace(hour=accident_date.hour - 1)
# if accident happend after the last friday of march at 2:00 o'clock
elif (accident_date.month == 3 & daylight_saving_time.day < accident_date.day | (
daylight_saving_time.day == accident_date.day & accident_date.hour >= 2)):
accident_date.replace(hour=accident_date.hour - 1)
# ]
adate = ''.join(
(str(accident_date.year), str(accident_date.month), str(accident_date.day), str(accident_date.hour)))
return adate
def all_station_in_date_frame(collection, created): # return the stations data in the time of the accident
doc = minidom.Document()
base = doc.createElement('accident_date')
doc.appendChild(base)
station_data_in_date = collection.getElementsByTagName('date_selected')
station_data_in_date.sort()
accident_date = accident_time_zone_adjustment(created)
for station in enumerate(station_data_in_date):
if accident_date in str(station.childNodes[0].nodeValue):
base.appendChild(get_parent_object_node(station))
return base
def find_station_by_coordinate(collection, latitude, longitude):
station_place_in_xml = -1
min_distance = float("inf") # initialize big starting value so the distance will always be smaller than the initial
station_data = collection.getElementsByTagName('surface_station')
for i, station in enumerate(station_data):
station_lon = station.getElementsByTagName('station_lon')
assert len(station_lon) == 1
lon = float(station_lon[0].childNodes[0].nodeValue)
lon_difference = (lon - float(longitude)) ** 2
station_lat = station.getElementsByTagName('station_lat')
assert len(station_lat) == 1
lat = float(station_lat[0].childNodes[0].nodeValue)
lat_difference = (lat - float(latitude)) ** 2
temp_dis = math.sqrt(lat_difference + lon_difference)
if temp_dis < min_distance:
min_distance = temp_dis
station_place_in_xml = i
return station_place_in_xml
def convert_xml_values_to_numbers(rain):
num_conv = rain[:2] # variable to help convert from string to number
for char in num_conv: # in the xml number are in a three digits format (4-004), we delete the 0es before the number
if char == '0':
rain.replace(char, '')
else:
break
rain_in_millimeters = float(rain)
if rain_in_millimeters >= 990:
# numbers that are higher then 990 in the xml code equals 0.(the last digit) for example 991 = 0.1
rain_in_millimeters *= 0.01
return rain_in_millimeters
def get_weather_element(station, weather_data, tag):
element = weather_data[station].getElementsByTagName(tag)
if element:
weather_element = element[0].childNodes[0].nodeValue
else:
weather_element = None
return weather_element
def process_weather_data(collection, latitude, longitude):
weather = 1 # default weather is clear sky
station = find_station_by_coordinate(collection, latitude, longitude)
weather_data = collection.getElementsByTagName('surface_observation')
wind_force = get_weather_element(station, weather_data, 'FF')
rain = get_weather_element(station, weather_data, 'RRR')
rain_duration = get_weather_element(station, weather_data,
'TR') # the duration of time in which the rain amount was measured
weather_code = get_weather_element(station, weather_data, 'WW')
if weather_code is not None:
return WEATHER[weather_code.strip()]
if wind_force is not None:
if int(wind_force) > 8:
weather = 76 # סופת רוחות
elif int(wind_force) > 5:
weather = 77 # רוחות חזקות
if rain is not None and rain_duration is not None:
rain_in_millimeters = convert_xml_values_to_numbers(rain)
rain_hours = RAIN_DURATION_CODE_TO_HOURS[str(rain_duration).strip()]
# rain amount is between 0.1 and 0.5 millimeter
if 0.0 < rain_in_millimeters <= 0.5 or (
0.0 < rain_in_millimeters / rain_hours <= 0.5):
if weather == 76:
weather = 80 # סופת רוחות, גשם קל
elif weather == 77:
weather = 84 # רוחות חזקות, גשם קל
else:
weather = 37 # גשם קל
# average rain amount per hour is between 0.5 and 4.0 millimeters
if 0.5 < rain_in_millimeters / rain_hours <= 4:
if weather == 76:
weather = 81 # גשם וסופת רוחות
elif weather == 77:
weather = 85 # גשם ורוחות חזקות
else:
weather = 15 # גשם
# average rain amount per hour is between 4.0 and 8.0 millimeters
elif 4 < rain_in_millimeters / rain_hours <= 8:
if 76 == weather:
weather = 82 # סופת רוחות, גשם שוטף
if weather == 77:
weather = 86 # רוחות חזקות, גשם שוטף
else:
weather = 78 # גשם שוטף
# average rain amount per hour is more than 8.0 millimeters
elif rain_in_millimeters / rain_hours > 8:
if weather == 76:
weather = 83 # סופת רוחות, גשם זלעפות
if weather == 77:
weather = 87 # רוחות חזקות, גשם זלעפות
else:
weather = 79 # גשם זלעפות
return weather
CSVMAP = [
{"id": 0, "time": 1, "lat": 2, "long": 3, "street": 4, "city": 6, "comment": 7, "type": 8, "casualties": 9},
{"id": 0, "time": 1, "type": 2, "long": 3, "lat": 4, "city": 5, "street": 6, "comment": 7, "casualties": 8},
]
def create_accidents(collection, file_location):
"""
:param file_location: local location of .csv
:return: Yields a marker object with every iteration
"""
logging.info("\tReading accidents data from '%s'..." % file_location)
with open_utf8(file_location, 'rU') as f:
reader = csv.reader(f, delimiter=',', dialect=csv.excel_tab)
for line, accident in enumerate(reader):
if line == 0: # header
format_version = 0 if "MissionID" in accident[0] else 1
continue
if not accident: # empty line
continue
if line == 1 and accident[0] == "":
logging.warn("\t\tEmpty File!")
continue
csvmap = CSVMAP[format_version]
if accident[csvmap["lat"]] == "" or accident[csvmap["long"]] == "" or \
accident[csvmap["lat"]] is None or accident[csvmap["long"]] is None or \
accident[csvmap["lat"]] == "NULL" or accident[csvmap["long"]] == "NULL":
logging.warn("\t\tMissing coordinates in line {0}. Moving on...".format(line + 1))
continue
created = parse_date(accident[csvmap["time"]])
marker = {'id': accident[csvmap["id"]], 'latitude': accident[csvmap["lat"]],
'longitude': accident[csvmap["long"]], 'created': created, 'provider_code': PROVIDER_CODE,
'title': decode_hebrew(accident[csvmap["type"]], encoding="utf-8")[:100],
'address': decode_hebrew((accident[csvmap["street"]] + ' ' + accident[csvmap["city"]]), encoding="utf-8"),
'accident_severity': 2 if u"קשה" in decode_hebrew(accident[csvmap["type"]], encoding="utf-8") else 3,
'location_accuracy': 1, 'accident_type': 21, 'type': CONST.MARKER_TYPE_ACCIDENT,
'description': decode_hebrew(accident[csvmap["comment"]], encoding="utf-8"),
'weather': process_weather_data(collection, accident[csvmap["lat"]],
accident[csvmap["long"]])}
if format_version == 0:
casualties = accident[csvmap["casualties"]]
marker['road_intactness'] = casualties if casualties.isdigit() else 0
yield marker
def import_to_db(collection, path):
"""
:param path: Local files directory ('united_path' on main() below)
:return: length of DB entries after execution
"""
app = init_flask()
db = SQLAlchemy(app)
accidents = list(create_accidents(collection, path))
if not accidents:
return 0
new_ids = [m["id"] for m in accidents
if 0 == db.session.query(AccidentMarker).filter(and_(AccidentMarker.id == m["id"],
AccidentMarker.provider_code == m["provider_code"])).count()]
if not new_ids:
logging.info("\t\tNothing loaded, all accidents already in DB")
return 0
db.session.execute(AccidentMarker.__table__.insert(), [m for m in accidents if m["id"] in new_ids])
db.session.commit()
return len(new_ids)
def update_db(collection):
"""
:return: length of DB entries after execution
"""
app = init_flask()
db = SQLAlchemy(app)
united = db.session.query(AccidentMarker).filter(AccidentMarker.provider_code == 2)
for accident in united:
if not accident.weather:
accident.weather = process_weather_data(collection, accident.latitude, accident.longitude)
db.session.commit()
logging.info("\tFinished commiting the changes")
def main(light=True, username='', password='', lastmail=False):
"""
Calls importmail.py prior to importing to DB
"""
collection = retrieve_ims_xml()
if not light:
logging.info("Importing data from mail...")
importmail.main(username, password, lastmail)
united_path = "static/data/united/"
total = 0
logging.info("Loading United accidents...")
for united_file in os.listdir(united_path):
if united_file.endswith(".csv"):
total += import_to_db(collection, united_path + united_file)
logging.info("\tImported {0} items".format(total))
update_db(collection)
| [((45, 4, 45, 44), 'logging.basicConfig', 'logging.basicConfig', (), '', False, 'import logging\n'), ((46, 8, 46, 26), 'requests.session', 'requests.session', ({}, {}), '()', False, 'import requests\n'), ((48, 14, 48, 41), 'xml.dom.minidom.parseString', 'minidom.parseString', ({(48, 34, 48, 40): 'r.text'}, {}), '(r.text)', False, 'from xml.dom import minidom\n'), ((75, 11, 75, 74), 'datetime.datetime', 'datetime', ({(75, 20, 75, 29): 'time.year', (75, 31, 75, 41): 'time.month', (75, 43, 75, 51): 'time.day', (75, 53, 75, 57): 'hour', (75, 59, 75, 70): 'time.minute', (75, 72, 75, 73): '(0)'}, {}), '(time.year, time.month, time.day, hour, time.minute, 0)', False, 'from datetime import datetime\n'), ((128, 10, 128, 28), 'xml.dom.minidom.Document', 'minidom.Document', ({}, {}), '()', False, 'from xml.dom import minidom\n'), ((266, 4, 266, 73), 'logging.info', 'logging.info', ({(266, 17, 266, 72): '("\\tReading accidents data from \'%s\'..." % file_location)'}, {}), '("\\tReading accidents data from \'%s\'..." % file_location)', False, 'import logging\n'), ((310, 9, 310, 24), 'flask_sqlalchemy.SQLAlchemy', 'SQLAlchemy', ({(310, 20, 310, 23): 'app'}, {}), '(app)', False, 'from flask_sqlalchemy import SQLAlchemy\n'), ((332, 9, 332, 24), 'flask_sqlalchemy.SQLAlchemy', 'SQLAlchemy', ({(332, 20, 332, 23): 'app'}, {}), '(app)', False, 'from flask_sqlalchemy import SQLAlchemy\n'), ((338, 4, 338, 52), 'logging.info', 'logging.info', ({(338, 17, 338, 51): '"""\tFinished commiting the changes"""'}, {}), "('\\tFinished commiting the changes')", False, 'import logging\n'), ((352, 4, 352, 47), 'logging.info', 'logging.info', ({(352, 17, 352, 46): '"""Loading United accidents..."""'}, {}), "('Loading United accidents...')", False, 'import logging\n'), ((353, 23, 353, 46), 'os.listdir', 'os.listdir', ({(353, 34, 353, 45): 'united_path'}, {}), '(united_path)', False, 'import os\n'), ((158, 19, 158, 61), 'math.sqrt', 'math.sqrt', ({(158, 29, 158, 60): 'lat_difference + lon_difference'}, {}), '(lat_difference + lon_difference)', False, 'import math\n'), ((269, 17, 269, 68), 'csv.reader', 'csv.reader', (), '', False, 'import csv\n'), ((319, 8, 319, 71), 'logging.info', 'logging.info', ({(319, 21, 319, 70): '"""\t\tNothing loaded, all accidents already in DB"""'}, {}), "('\\t\\tNothing loaded, all accidents already in DB')", False, 'import logging\n'), ((348, 8, 348, 51), 'logging.info', 'logging.info', ({(348, 21, 348, 50): '"""Importing data from mail..."""'}, {}), "('Importing data from mail...')", False, 'import logging\n'), ((278, 16, 278, 47), 'logging.warn', 'logging.warn', ({(278, 29, 278, 46): '"""\t\tEmpty File!"""'}, {}), "('\\t\\tEmpty File!')", False, 'import logging\n'), ((81, 11, 81, 33), 'calendar.Calendar', 'calendar.Calendar', ({(81, 29, 81, 32): 'nth'}, {}), '(nth)', False, 'import calendar\n'), ((316, 63, 317, 99), 'sqlalchemy.and_', 'and_', ({(316, 68, 316, 96): "(AccidentMarker.id == m['id'])", (317, 48, 317, 98): "(AccidentMarker.provider_code == m['provider_code'])"}, {}), "(AccidentMarker.id == m['id'], AccidentMarker.provider_code == m[\n 'provider_code'])", False, 'from sqlalchemy import and_\n')] |
joequant/libact | libact/query_strategies/tests/test_variance_reduction.py | 4fbf4d59fd0d4e23858b264de2f35f674c50445b | import unittest
from numpy.testing import assert_array_equal
import numpy as np
from libact.base.dataset import Dataset
from libact.models import LogisticRegression
from libact.query_strategies import VarianceReduction
from .utils import run_qs
class VarianceReductionTestCase(unittest.TestCase):
"""Variance reduction test case using artifitial dataset"""
def setUp(self):
self.X = [[-2, -1], [1, 1], [-1, -2], [-1, -1], [1, 2], [2, 1]]
self.y = [0, 1, 0, 1, 0, 1]
self.quota = 4
def test_variance_reduction(self):
trn_ds = Dataset(self.X,
np.concatenate([self.y[:2],
[None] * (len(self.y) - 2)]))
qs = VarianceReduction(trn_ds, model=LogisticRegression(), sigma=0.1)
qseq = run_qs(trn_ds, qs, self.y, self.quota)
assert_array_equal(qseq, np.array([4, 5, 2, 3]))
if __name__ == '__main__':
unittest.main()
| [((29, 4, 29, 19), 'unittest.main', 'unittest.main', ({}, {}), '()', False, 'import unittest\n'), ((25, 33, 25, 55), 'numpy.array', 'np.array', ({(25, 42, 25, 54): '[4, 5, 2, 3]'}, {}), '([4, 5, 2, 3])', True, 'import numpy as np\n'), ((23, 45, 23, 65), 'libact.models.LogisticRegression', 'LogisticRegression', ({}, {}), '()', False, 'from libact.models import LogisticRegression\n')] |
fgreg/hysds | hysds/log_utils.py | 74a1019665b02f0f475cc4e7fc0a993dd71d7a53 | from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from builtins import open
from builtins import str
from future import standard_library
standard_library.install_aliases()
import os
import re
import json
import copy
import socket
import msgpack
import traceback
import types
import backoff
from datetime import datetime
from uuid import uuid4
from redis import BlockingConnectionPool, StrictRedis, RedisError
from celery.utils.log import get_task_logger
import hysds
from hysds.celery import app
from prov_es.model import get_uuid, ProvEsDocument
# logger
logger = get_task_logger(__name__)
# redis connection pools
JOB_STATUS_POOL = None
JOB_INFO_POOL = None
WORKER_STATUS_POOL = None
EVENT_STATUS_POOL = None
# job status key template
JOB_STATUS_KEY_TMPL = "hysds-job-status-%s"
# worker status key template
WORKER_STATUS_KEY_TMPL = "hysds-worker-status-%s"
# task worker key template
TASK_WORKER_KEY_TMPL = "hysds-task-worker-%s"
def backoff_max_value():
"""Return max value for backoff."""
return app.conf.BACKOFF_MAX_VALUE
def backoff_max_tries():
"""Return max tries for backoff."""
return app.conf.BACKOFF_MAX_TRIES
def hard_time_limit_gap():
"""Return minimum gap time after soft time limit."""
return app.conf.HARD_TIME_LIMIT_GAP
def ensure_hard_time_limit_gap(soft_time_limit, time_limit):
"""Ensure hard time limit gap."""
gap = hard_time_limit_gap()
if soft_time_limit is not None and (time_limit is None or
time_limit <= soft_time_limit+gap):
time_limit = soft_time_limit + gap
return soft_time_limit, time_limit
def set_redis_job_status_pool():
"""Set redis connection pool for job status."""
global JOB_STATUS_POOL
if JOB_STATUS_POOL is None:
JOB_STATUS_POOL = BlockingConnectionPool.from_url(
app.conf.REDIS_JOB_STATUS_URL)
def set_redis_job_info_pool():
"""Set redis connection pool for job info metrics."""
global JOB_INFO_POOL
if JOB_INFO_POOL is None:
JOB_INFO_POOL = BlockingConnectionPool.from_url(
app.conf.REDIS_JOB_INFO_URL)
def set_redis_worker_status_pool():
"""Set redis connection pool for worker status."""
global WORKER_STATUS_POOL
if WORKER_STATUS_POOL is None:
WORKER_STATUS_POOL = BlockingConnectionPool.from_url(
app.conf.REDIS_JOB_STATUS_URL)
def set_redis_event_status_pool():
"""Set redis connection pool for event status."""
global EVENT_STATUS_POOL
if EVENT_STATUS_POOL is None:
EVENT_STATUS_POOL = BlockingConnectionPool.from_url(
app.conf.REDIS_JOB_STATUS_URL)
@backoff.on_exception(backoff.expo,
RedisError,
max_tries=backoff_max_tries,
max_value=backoff_max_value)
def log_task_worker(task_id, worker):
"""Log task worker for task ID in redis."""
set_redis_worker_status_pool()
global WORKER_STATUS_POOL
# set task worker for task ID
r = StrictRedis(connection_pool=WORKER_STATUS_POOL)
r.setex(TASK_WORKER_KEY_TMPL % task_id,
app.conf.HYSDS_JOB_STATUS_EXPIRES,
worker)
@backoff.on_exception(backoff.expo,
RedisError,
max_tries=backoff_max_tries,
max_value=backoff_max_value)
def get_task_worker(task_id):
"""Retrieve task worker by task ID from redis."""
set_redis_worker_status_pool()
global WORKER_STATUS_POOL
# retrieve task worker
r = StrictRedis(connection_pool=WORKER_STATUS_POOL)
return r.get(TASK_WORKER_KEY_TMPL % task_id)
@backoff.on_exception(backoff.expo,
RedisError,
max_tries=backoff_max_tries,
max_value=backoff_max_value)
def get_worker_status(worker):
"""Retrieve worker status by worker ID from redis."""
set_redis_worker_status_pool()
global WORKER_STATUS_POOL
# retrieve worker status
r = StrictRedis(connection_pool=WORKER_STATUS_POOL)
return r.get(WORKER_STATUS_KEY_TMPL % worker)
@backoff.on_exception(backoff.expo,
RedisError,
max_tries=backoff_max_tries,
max_value=backoff_max_value)
def get_job_status(task_id):
"""Retrieve job status by task ID from redis."""
set_redis_job_status_pool()
global JOB_STATUS_POOL
# retrieve job status
r = StrictRedis(connection_pool=JOB_STATUS_POOL)
return r.get(JOB_STATUS_KEY_TMPL % task_id)
@backoff.on_exception(backoff.expo,
RedisError,
max_tries=backoff_max_tries,
max_value=backoff_max_value)
def log_job_status(job):
"""Print job status."""
set_redis_job_status_pool()
global JOB_STATUS_POOL
job['resource'] = 'job'
job['type'] = job.get('job', {}).get('type', 'unknown')
job['@version'] = '1'
job['@timestamp'] = "%sZ" % datetime.utcnow().isoformat()
if 'tag' in job.get('job', {}):
tags = job.setdefault('tags', [])
if isinstance(tags, str):
tags = [tags]
tags.append(job['job']['tag'])
job['tags'] = tags
# send update to redis
r = StrictRedis(connection_pool=JOB_STATUS_POOL)
r.setex(JOB_STATUS_KEY_TMPL % job['uuid'],
app.conf.HYSDS_JOB_STATUS_EXPIRES,
job['status']) # for dedup
r.rpush(app.conf.REDIS_JOB_STATUS_KEY, msgpack.dumps(job)) # for ES
logger.info("job_status_json:%s" % json.dumps(job))
@backoff.on_exception(backoff.expo,
RedisError,
max_tries=backoff_max_tries,
max_value=backoff_max_value)
def log_job_info(job):
"""Print job info."""
set_redis_job_info_pool()
global JOB_INFO_POOL
filtered_info = {}
for info in ('job_info', 'job_id', 'task_id', 'delivery_info', 'tag',
'priority', 'container_image_name', 'container_image_url',
'name'):
if info in job:
filtered_info[info] = job[info]
job_info = {'type': 'job_info',
'@version': '1',
'@timestamp': "%sZ" % datetime.utcnow().isoformat(),
'job': filtered_info,
'job_type': job['type']}
# send update to redis
r = StrictRedis(connection_pool=JOB_INFO_POOL)
r.rpush(app.conf.REDIS_JOB_INFO_KEY, msgpack.dumps(job_info))
logger.info("job_info_json:%s" % json.dumps(job_info))
@backoff.on_exception(backoff.expo,
RedisError,
max_tries=backoff_max_tries,
max_value=backoff_max_value)
def log_custom_event(event_type, event_status, event, tags=[], hostname=None):
"""Log custom event."""
set_redis_event_status_pool()
global EVENT_STATUS_POOL
uuid = str(uuid4())
if hostname is None:
try:
hostname = socket.getfqdn()
except:
try:
hostname = socket.gethostbyname(socket.gethostname())
except:
hostname = ''
info = {'resource': 'event',
'type': event_type,
'status': event_status,
'@timestamp': "%sZ" % datetime.utcnow().isoformat(),
'hostname': hostname,
'uuid': uuid,
'tags': tags,
'@version': '1',
'event': event}
# send update to redis
r = StrictRedis(connection_pool=EVENT_STATUS_POOL)
r.rpush(app.conf.REDIS_JOB_STATUS_KEY, msgpack.dumps(info))
logger.info("hysds.custom_event:%s" % json.dumps(info))
return uuid
def log_prov_es(job, prov_es_info, prov_es_file):
"""Log PROV-ES document. Create temp PROV-ES document to populate
attributes that only the worker has access to (e.g. PID)."""
# create PROV-ES doc to generate attributes that only verdi know
ps_id = "hysds:%s" % get_uuid(job['job_id'])
bundle_id = "hysds:%s" % get_uuid('bundle-%s' % job['job_id'])
doc = ProvEsDocument()
# get bundle
#bndl = doc.bundle(bundle_id)
bndl = None
# create sofware agent
sa_label = "hysds:pge_wrapper/%s/%d/%s" % (job['job_info']['execute_node'],
job['job_info']['pid'],
datetime.utcnow().isoformat())
sa_id = "hysds:%s" % get_uuid(sa_label)
doc.softwareAgent(sa_id, str(job['job_info']['pid']),
job['job_info']['execute_node'],
role=job.get('username', None),
label=sa_label, bundle=bndl)
# create processStep
doc.processStep(ps_id, job['job_info']['cmd_start'],
job['job_info']['cmd_end'], [], sa_id,
None, [], [], bundle=bndl,
prov_type="hysds:%s" % job['type'])
# get json
pd = json.loads(doc.serialize())
# update software agent and process step
if 'bundle' in prov_es_info:
if len(prov_es_info['bundle']) == 1:
bundle_id_orig = list(prov_es_info['bundle'].keys())[0]
# update software agent
prov_es_info['bundle'][bundle_id_orig].setdefault(
'agent', {}).update(pd['bundle'][bundle_id]['agent'])
# update wasAssociatedWith
prov_es_info['bundle'][bundle_id_orig].setdefault(
'wasAssociatedWith', {}).update(pd['bundle'][bundle_id]['wasAssociatedWith'])
# update activity
if 'activity' in prov_es_info['bundle'][bundle_id_orig]:
if len(prov_es_info['bundle'][bundle_id_orig]['activity']) == 1:
ps_id_orig = list(
prov_es_info['bundle'][bundle_id_orig]['activity'].keys())[0]
prov_es_info['bundle'][bundle_id_orig]['activity'][ps_id_orig][
'prov:startTime'] = pd['bundle'][bundle_id]['activity'][ps_id]['prov:startTime']
prov_es_info['bundle'][bundle_id_orig]['activity'][ps_id_orig][
'prov:endTime'] = pd['bundle'][bundle_id]['activity'][ps_id]['prov:endTime']
prov_es_info['bundle'][bundle_id_orig]['activity'][ps_id_orig]['hysds:job_id'] = job['job_id']
prov_es_info['bundle'][bundle_id_orig]['activity'][ps_id_orig]['hysds:job_type'] = job['type']
prov_es_info['bundle'][bundle_id_orig]['activity'][ps_id_orig]['hysds:job_url'] = job['job_info']['job_url']
prov_es_info['bundle'][bundle_id_orig]['activity'][ps_id_orig]['hysds:mozart_url'] = app.conf.MOZART_URL
if 'prov:type' not in prov_es_info['bundle'][bundle_id_orig]['activity'][ps_id_orig]:
prov_es_info['bundle'][bundle_id_orig]['activity'][ps_id_orig][
'prov:type'] = pd['bundle'][bundle_id]['activity'][ps_id]['prov:type']
# update wasAssociatedWith activity ids
for waw_id in prov_es_info['bundle'][bundle_id_orig]['wasAssociatedWith']:
if prov_es_info['bundle'][bundle_id_orig]['wasAssociatedWith'][waw_id]['prov:activity'] == ps_id:
prov_es_info['bundle'][bundle_id_orig]['wasAssociatedWith'][waw_id]['prov:activity'] = ps_id_orig
else:
prov_es_info['bundle'][bundle_id_orig]['activity'].update(
pd['bundle'][bundle_id]['activity'])
else:
prov_es_info['bundle'][bundle_id_orig]['activity'] = pd['bundle'][bundle_id]['activity']
else:
# update software agent
prov_es_info.setdefault('agent', {}).update(pd['agent'])
# update wasAssociatedWith
prov_es_info.setdefault('wasAssociatedWith', {}).update(
pd['wasAssociatedWith'])
# update process step
if 'activity' in prov_es_info:
if len(prov_es_info['activity']) == 1:
ps_id_orig = list(prov_es_info['activity'].keys())[0]
prov_es_info['activity'][ps_id_orig]['prov:startTime'] = pd['activity'][ps_id]['prov:startTime']
prov_es_info['activity'][ps_id_orig]['prov:endTime'] = pd['activity'][ps_id]['prov:endTime']
prov_es_info['activity'][ps_id_orig]['hysds:job_id'] = job['job_id']
prov_es_info['activity'][ps_id_orig]['hysds:job_type'] = job['type']
prov_es_info['activity'][ps_id_orig]['hysds:job_url'] = job['job_info']['job_url']
prov_es_info['activity'][ps_id_orig]['hysds:mozart_url'] = app.conf.MOZART_URL
if 'prov:type' not in prov_es_info['activity'][ps_id_orig]:
prov_es_info['activity'][ps_id_orig]['prov:type'] = pd['activity'][ps_id]['prov:type']
# update wasAssociatedWith activity ids
for waw_id in prov_es_info['wasAssociatedWith']:
if prov_es_info['wasAssociatedWith'][waw_id]['prov:activity'] == ps_id:
prov_es_info['wasAssociatedWith'][waw_id]['prov:activity'] = ps_id_orig
else:
prov_es_info['activity'].update(pd['activity'])
else:
prov_es_info['activity'] = pd['activity']
# write prov
with open(prov_es_file, 'w') as f:
json.dump(prov_es_info, f, indent=2)
def log_publish_prov_es(prov_es_info, prov_es_file, prod_path, pub_urls,
prod_metrics, objectid):
"""Log publish step in PROV-ES document."""
# create PROV-ES doc
doc = ProvEsDocument(namespaces=prov_es_info['prefix'])
# get bundle
#bndl = doc.bundle(bundle_id)
bndl = None
# add input entity
execute_node = socket.getfqdn()
prod_url = "file://%s%s" % (execute_node, prod_path)
input_id = "hysds:%s" % get_uuid(prod_url)
input_ent = doc.granule(input_id, None, [prod_url], [], None, None, None,
label=os.path.basename(prod_url), bundle=bndl)
# add output entity
output_id = "hysds:%s" % get_uuid(pub_urls[0])
output_ent = doc.product(output_id, None, [pub_urls[0]], [], None, None,
None, label=objectid, bundle=bndl)
# software and algorithm
algorithm = "eos:product_publishing"
software_version = hysds.__version__
software_title = "%s v%s" % (hysds.__description__, software_version)
software = "eos:HySDS-%s" % software_version
software_location = hysds.__url__
doc.software(software, [algorithm], software_version, label=software_title,
location=software_location, bundle=bndl)
# create sofware agent
pid = os.getpid()
sa_label = "hysds:publish_dataset/%s/%d/%s" % (execute_node, pid,
prod_metrics['time_start'])
sa_id = "hysds:%s" % get_uuid(sa_label)
doc.softwareAgent(sa_id, str(pid), execute_node, role="invoked",
label=sa_label, bundle=bndl)
# create processStep
job_id = "publish_dataset-%s" % os.path.basename(prod_path)
doc.processStep("hysds:%s" % get_uuid(job_id), prod_metrics['time_start'],
prod_metrics['time_end'], [software], sa_id, None,
[input_id], [output_id], label=job_id, bundle=bndl,
prov_type="hysds:publish_dataset")
# get json
pd = json.loads(doc.serialize())
# update input entity
orig_ent = prov_es_info.get('entity', {}).get(input_id, {})
pd['entity'][input_id].update(orig_ent)
# update output entity
for attr in orig_ent:
if attr in ('prov:location', 'prov:label', 'prov:type'):
continue
pd['entity'][output_id][attr] = orig_ent[attr]
# write prov
with open(prov_es_file, 'w') as f:
json.dump(pd, f, indent=2)
| [((10, 0, 10, 34), 'future.standard_library.install_aliases', 'standard_library.install_aliases', ({}, {}), '()', False, 'from future import standard_library\n'), ((33, 9, 33, 34), 'celery.utils.log.get_task_logger', 'get_task_logger', ({(33, 25, 33, 33): '__name__'}, {}), '(__name__)', False, 'from celery.utils.log import get_task_logger\n'), ((112, 1, 115, 50), 'backoff.on_exception', 'backoff.on_exception', (), '', False, 'import backoff\n'), ((129, 1, 132, 50), 'backoff.on_exception', 'backoff.on_exception', (), '', False, 'import backoff\n'), ((144, 1, 147, 50), 'backoff.on_exception', 'backoff.on_exception', (), '', False, 'import backoff\n'), ((159, 1, 162, 50), 'backoff.on_exception', 'backoff.on_exception', (), '', False, 'import backoff\n'), ((174, 1, 177, 50), 'backoff.on_exception', 'backoff.on_exception', (), '', False, 'import backoff\n'), ((203, 1, 206, 50), 'backoff.on_exception', 'backoff.on_exception', (), '', False, 'import backoff\n'), ((230, 1, 233, 50), 'backoff.on_exception', 'backoff.on_exception', (), '', False, 'import backoff\n'), ((123, 8, 123, 55), 'redis.StrictRedis', 'StrictRedis', (), '', False, 'from redis import BlockingConnectionPool, StrictRedis, RedisError\n'), ((140, 8, 140, 55), 'redis.StrictRedis', 'StrictRedis', (), '', False, 'from redis import BlockingConnectionPool, StrictRedis, RedisError\n'), ((155, 8, 155, 55), 'redis.StrictRedis', 'StrictRedis', (), '', False, 'from redis import BlockingConnectionPool, StrictRedis, RedisError\n'), ((170, 8, 170, 52), 'redis.StrictRedis', 'StrictRedis', (), '', False, 'from redis import BlockingConnectionPool, StrictRedis, RedisError\n'), ((195, 8, 195, 52), 'redis.StrictRedis', 'StrictRedis', (), '', False, 'from redis import BlockingConnectionPool, StrictRedis, RedisError\n'), ((225, 8, 225, 50), 'redis.StrictRedis', 'StrictRedis', (), '', False, 'from redis import BlockingConnectionPool, StrictRedis, RedisError\n'), ((260, 8, 260, 54), 'redis.StrictRedis', 'StrictRedis', (), '', False, 'from redis import BlockingConnectionPool, StrictRedis, RedisError\n'), ((273, 10, 273, 26), 'prov_es.model.ProvEsDocument', 'ProvEsDocument', ({}, {}), '()', False, 'from prov_es.model import get_uuid, ProvEsDocument\n'), ((377, 10, 377, 59), 'prov_es.model.ProvEsDocument', 'ProvEsDocument', (), '', False, 'from prov_es.model import get_uuid, ProvEsDocument\n'), ((384, 19, 384, 35), 'socket.getfqdn', 'socket.getfqdn', ({}, {}), '()', False, 'import socket\n'), ((405, 10, 405, 21), 'os.getpid', 'os.getpid', ({}, {}), '()', False, 'import os\n'), ((81, 26, 82, 42), 'redis.BlockingConnectionPool.from_url', 'BlockingConnectionPool.from_url', ({(82, 12, 82, 41): 'app.conf.REDIS_JOB_STATUS_URL'}, {}), '(app.conf.REDIS_JOB_STATUS_URL)', False, 'from redis import BlockingConnectionPool, StrictRedis, RedisError\n'), ((90, 24, 91, 40), 'redis.BlockingConnectionPool.from_url', 'BlockingConnectionPool.from_url', ({(91, 12, 91, 39): 'app.conf.REDIS_JOB_INFO_URL'}, {}), '(app.conf.REDIS_JOB_INFO_URL)', False, 'from redis import BlockingConnectionPool, StrictRedis, RedisError\n'), ((99, 29, 100, 42), 'redis.BlockingConnectionPool.from_url', 'BlockingConnectionPool.from_url', ({(100, 12, 100, 41): 'app.conf.REDIS_JOB_STATUS_URL'}, {}), '(app.conf.REDIS_JOB_STATUS_URL)', False, 'from redis import BlockingConnectionPool, StrictRedis, RedisError\n'), ((108, 28, 109, 42), 'redis.BlockingConnectionPool.from_url', 'BlockingConnectionPool.from_url', ({(109, 12, 109, 41): 'app.conf.REDIS_JOB_STATUS_URL'}, {}), '(app.conf.REDIS_JOB_STATUS_URL)', False, 'from redis import BlockingConnectionPool, StrictRedis, RedisError\n'), ((199, 43, 199, 61), 'msgpack.dumps', 'msgpack.dumps', ({(199, 57, 199, 60): 'job'}, {}), '(job)', False, 'import msgpack\n'), ((226, 41, 226, 64), 'msgpack.dumps', 'msgpack.dumps', ({(226, 55, 226, 63): 'job_info'}, {}), '(job_info)', False, 'import msgpack\n'), ((240, 15, 240, 22), 'uuid.uuid4', 'uuid4', ({}, {}), '()', False, 'from uuid import uuid4\n'), ((261, 43, 261, 62), 'msgpack.dumps', 'msgpack.dumps', ({(261, 57, 261, 61): 'info'}, {}), '(info)', False, 'import msgpack\n'), ((271, 25, 271, 48), 'prov_es.model.get_uuid', 'get_uuid', ({(271, 34, 271, 47): "job['job_id']"}, {}), "(job['job_id'])", False, 'from prov_es.model import get_uuid, ProvEsDocument\n'), ((272, 29, 272, 66), 'prov_es.model.get_uuid', 'get_uuid', ({(272, 38, 272, 65): "('bundle-%s' % job['job_id'])"}, {}), "('bundle-%s' % job['job_id'])", False, 'from prov_es.model import get_uuid, ProvEsDocument\n'), ((283, 25, 283, 43), 'prov_es.model.get_uuid', 'get_uuid', ({(283, 34, 283, 42): 'sa_label'}, {}), '(sa_label)', False, 'from prov_es.model import get_uuid, ProvEsDocument\n'), ((284, 29, 284, 56), 'builtins.str', 'str', ({(284, 33, 284, 55): "job['job_info']['pid']"}, {}), "(job['job_info']['pid'])", False, 'from builtins import str\n'), ((368, 9, 368, 32), 'builtins.open', 'open', ({(368, 14, 368, 26): 'prov_es_file', (368, 28, 368, 31): '"""w"""'}, {}), "(prov_es_file, 'w')", False, 'from builtins import open\n'), ((369, 8, 369, 44), 'json.dump', 'json.dump', (), '', False, 'import json\n'), ((386, 28, 386, 46), 'prov_es.model.get_uuid', 'get_uuid', ({(386, 37, 386, 45): 'prod_url'}, {}), '(prod_url)', False, 'from prov_es.model import get_uuid, ProvEsDocument\n'), ((391, 29, 391, 50), 'prov_es.model.get_uuid', 'get_uuid', ({(391, 38, 391, 49): 'pub_urls[0]'}, {}), '(pub_urls[0])', False, 'from prov_es.model import get_uuid, ProvEsDocument\n'), ((408, 25, 408, 43), 'prov_es.model.get_uuid', 'get_uuid', ({(408, 34, 408, 42): 'sa_label'}, {}), '(sa_label)', False, 'from prov_es.model import get_uuid, ProvEsDocument\n'), ((409, 29, 409, 37), 'builtins.str', 'str', ({(409, 33, 409, 36): 'pid'}, {}), '(pid)', False, 'from builtins import str\n'), ((413, 36, 413, 63), 'os.path.basename', 'os.path.basename', ({(413, 53, 413, 62): 'prod_path'}, {}), '(prod_path)', False, 'import os\n'), ((433, 9, 433, 32), 'builtins.open', 'open', ({(433, 14, 433, 26): 'prov_es_file', (433, 28, 433, 31): '"""w"""'}, {}), "(prov_es_file, 'w')", False, 'from builtins import open\n'), ((434, 8, 434, 34), 'json.dump', 'json.dump', (), '', False, 'import json\n'), ((200, 39, 200, 54), 'json.dumps', 'json.dumps', ({(200, 50, 200, 53): 'job'}, {}), '(job)', False, 'import json\n'), ((227, 37, 227, 57), 'json.dumps', 'json.dumps', ({(227, 48, 227, 56): 'job_info'}, {}), '(job_info)', False, 'import json\n'), ((243, 23, 243, 39), 'socket.getfqdn', 'socket.getfqdn', ({}, {}), '()', False, 'import socket\n'), ((262, 42, 262, 58), 'json.dumps', 'json.dumps', ({(262, 53, 262, 57): 'info'}, {}), '(info)', False, 'import json\n'), ((388, 34, 388, 60), 'os.path.basename', 'os.path.basename', ({(388, 51, 388, 59): 'prod_url'}, {}), '(prod_url)', False, 'import os\n'), ((414, 33, 414, 49), 'prov_es.model.get_uuid', 'get_uuid', ({(414, 42, 414, 48): 'job_id'}, {}), '(job_id)', False, 'from prov_es.model import get_uuid, ProvEsDocument\n'), ((186, 32, 186, 49), 'datetime.datetime.utcnow', 'datetime.utcnow', ({}, {}), '()', False, 'from datetime import datetime\n'), ((220, 38, 220, 55), 'datetime.datetime.utcnow', 'datetime.utcnow', ({}, {}), '()', False, 'from datetime import datetime\n'), ((252, 34, 252, 51), 'datetime.datetime.utcnow', 'datetime.utcnow', ({}, {}), '()', False, 'from datetime import datetime\n'), ((282, 47, 282, 64), 'datetime.datetime.utcnow', 'datetime.utcnow', ({}, {}), '()', False, 'from datetime import datetime\n'), ((246, 48, 246, 68), 'socket.gethostname', 'socket.gethostname', ({}, {}), '()', False, 'import socket\n')] |
CplusShen/aurora-horizon | openstack_dashboard/api/rest/swift.py | 8df16b3b87097d5a19bae3752d4b341ac64bda75 | # Copyright 2015, Rackspace, US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""API for the swift service.
"""
import os
from django import forms
from django.http import StreamingHttpResponse
from django.utils.http import urlunquote
from django.views.decorators.csrf import csrf_exempt
from django.views import generic
import six
from horizon import exceptions
from openstack_dashboard import api
from openstack_dashboard.api.rest import urls
from openstack_dashboard.api.rest import utils as rest_utils
from openstack_dashboard.api import swift
@urls.register
class Info(generic.View):
"""API for information about the Swift installation.
"""
url_regex = r'swift/info/$'
@rest_utils.ajax()
def get(self, request):
"""Get information about the Swift installation.
"""
capabilities = api.swift.swift_get_capabilities(request)
return {'info': capabilities}
@urls.register
class Containers(generic.View):
"""API for swift container listing for an account
"""
url_regex = r'swift/containers/$'
@rest_utils.ajax()
def get(self, request):
"""Get the list of containers for this account
TODO(neillc): Add pagination
"""
containers, has_more = api.swift.swift_get_containers(request)
containers = [container.to_dict() for container in containers]
return {'items': containers, 'has_more': has_more}
@urls.register
class Container(generic.View):
"""API for swift container level information
"""
url_regex = r'swift/containers/(?P<container>[^/]+)/metadata/$'
@rest_utils.ajax()
def get(self, request, container):
"""Get the container details
"""
return api.swift.swift_get_container(request, container).to_dict()
@rest_utils.ajax()
def post(self, request, container):
metadata = {}
if 'is_public' in request.DATA:
metadata['is_public'] = request.DATA['is_public']
# This will raise an exception if the container already exists
try:
api.swift.swift_create_container(request, container,
metadata=metadata)
except exceptions.AlreadyExists as e:
# 409 Conflict
return rest_utils.JSONResponse(str(e), 409)
return rest_utils.CreatedResponse(
u'/api/swift/containers/%s' % container,
)
@rest_utils.ajax()
def delete(self, request, container):
try:
api.swift.swift_delete_container(request, container)
except exceptions.Conflict as e:
# It cannot be deleted if it's not empty.
return rest_utils.JSONResponse(str(e), 409)
@rest_utils.ajax(data_required=True)
def put(self, request, container):
metadata = {'is_public': request.DATA['is_public']}
api.swift.swift_update_container(request, container, metadata=metadata)
@urls.register
class Objects(generic.View):
"""API for a list of swift objects
"""
url_regex = r'swift/containers/(?P<container>[^/]+)/objects/$'
@rest_utils.ajax()
def get(self, request, container):
"""Get object information.
:param request:
:param container:
:return:
"""
path = request.GET.get('path')
if path is not None:
path = urlunquote(path)
objects = api.swift.swift_get_objects(
request,
container,
prefix=path
)
# filter out the folder from the listing if we're filtering for
# contents of a (pseudo) folder
contents = [{
'path': o.subdir if isinstance(o, swift.PseudoFolder) else o.name,
'name': o.name.split('/')[-1],
'bytes': o.bytes,
'is_subdir': isinstance(o, swift.PseudoFolder),
'is_object': not isinstance(o, swift.PseudoFolder),
'content_type': getattr(o, 'content_type', None)
} for o in objects[0] if o.name != path]
return {'items': contents}
class UploadObjectForm(forms.Form):
file = forms.FileField(required=False)
@urls.register
class Object(generic.View):
"""API for a single swift object or pseudo-folder
"""
url_regex = r'swift/containers/(?P<container>[^/]+)/object/' \
'(?P<object_name>.+)$'
# note: not an AJAX request - the body will be raw file content
@csrf_exempt
def post(self, request, container, object_name):
"""Create or replace an object or pseudo-folder
:param request:
:param container:
:param object_name:
If the object_name (ie. POST path) ends in a '/' then a folder is
created, rather than an object. Any file content passed along with
the request will be ignored in that case.
POST parameter:
:param file: the file data for the upload.
:return:
"""
form = UploadObjectForm(request.POST, request.FILES)
if not form.is_valid():
raise rest_utils.AjaxError(500, 'Invalid request')
data = form.clean()
if object_name[-1] == '/':
result = api.swift.swift_create_pseudo_folder(
request,
container,
object_name
)
else:
result = api.swift.swift_upload_object(
request,
container,
object_name,
data['file']
)
return rest_utils.CreatedResponse(
u'/api/swift/containers/%s/object/%s' % (container, result.name)
)
@rest_utils.ajax()
def delete(self, request, container, object_name):
if object_name[-1] == '/':
try:
api.swift.swift_delete_folder(request, container, object_name)
except exceptions.Conflict as e:
# In case the given object is pseudo folder
# It cannot be deleted if it's not empty.
return rest_utils.JSONResponse(str(e), 409)
else:
api.swift.swift_delete_object(request, container, object_name)
def get(self, request, container, object_name):
"""Get the object contents.
"""
obj = api.swift.swift_get_object(
request,
container,
object_name
)
# Add the original file extension back on if it wasn't preserved in the
# name given to the object.
filename = object_name.rsplit(api.swift.FOLDER_DELIMITER)[-1]
if not os.path.splitext(obj.name)[1] and obj.orig_name:
name, ext = os.path.splitext(obj.orig_name)
filename = "%s%s" % (filename, ext)
response = StreamingHttpResponse(obj.data)
safe = filename.replace(",", "")
if six.PY2:
safe = safe.encode('utf-8')
response['Content-Disposition'] = 'attachment; filename="%s"' % safe
response['Content-Type'] = 'application/octet-stream'
response['Content-Length'] = obj.bytes
return response
@urls.register
class ObjectMetadata(generic.View):
"""API for a single swift object
"""
url_regex = r'swift/containers/(?P<container>[^/]+)/metadata/' \
'(?P<object_name>.+)$'
@rest_utils.ajax()
def get(self, request, container, object_name):
return api.swift.swift_get_object(
request,
container_name=container,
object_name=object_name,
with_data=False
).to_dict()
@urls.register
class ObjectCopy(generic.View):
"""API to copy a swift object
"""
url_regex = r'swift/containers/(?P<container>[^/]+)/copy/' \
'(?P<object_name>.+)$'
@rest_utils.ajax()
def post(self, request, container, object_name):
dest_container = request.DATA['dest_container']
dest_name = request.DATA['dest_name']
try:
result = api.swift.swift_copy_object(
request,
container,
object_name,
dest_container,
dest_name
)
except exceptions.AlreadyExists as e:
return rest_utils.JSONResponse(str(e), 409)
return rest_utils.CreatedResponse(
u'/api/swift/containers/%s/object/%s' % (dest_container,
result.name)
)
| [((38, 5, 38, 22), 'openstack_dashboard.api.rest.utils.ajax', 'rest_utils.ajax', ({}, {}), '()', True, 'from openstack_dashboard.api.rest import utils as rest_utils\n'), ((52, 5, 52, 22), 'openstack_dashboard.api.rest.utils.ajax', 'rest_utils.ajax', ({}, {}), '()', True, 'from openstack_dashboard.api.rest import utils as rest_utils\n'), ((70, 5, 70, 22), 'openstack_dashboard.api.rest.utils.ajax', 'rest_utils.ajax', ({}, {}), '()', True, 'from openstack_dashboard.api.rest import utils as rest_utils\n'), ((76, 5, 76, 22), 'openstack_dashboard.api.rest.utils.ajax', 'rest_utils.ajax', ({}, {}), '()', True, 'from openstack_dashboard.api.rest import utils as rest_utils\n'), ((95, 5, 95, 22), 'openstack_dashboard.api.rest.utils.ajax', 'rest_utils.ajax', ({}, {}), '()', True, 'from openstack_dashboard.api.rest import utils as rest_utils\n'), ((103, 5, 103, 40), 'openstack_dashboard.api.rest.utils.ajax', 'rest_utils.ajax', (), '', True, 'from openstack_dashboard.api.rest import utils as rest_utils\n'), ((115, 5, 115, 22), 'openstack_dashboard.api.rest.utils.ajax', 'rest_utils.ajax', ({}, {}), '()', True, 'from openstack_dashboard.api.rest import utils as rest_utils\n'), ((147, 11, 147, 42), 'django.forms.FileField', 'forms.FileField', (), '', False, 'from django import forms\n'), ((200, 5, 200, 22), 'openstack_dashboard.api.rest.utils.ajax', 'rest_utils.ajax', ({}, {}), '()', True, 'from openstack_dashboard.api.rest import utils as rest_utils\n'), ((244, 5, 244, 22), 'openstack_dashboard.api.rest.utils.ajax', 'rest_utils.ajax', ({}, {}), '()', True, 'from openstack_dashboard.api.rest import utils as rest_utils\n'), ((261, 5, 261, 22), 'openstack_dashboard.api.rest.utils.ajax', 'rest_utils.ajax', ({}, {}), '()', True, 'from openstack_dashboard.api.rest import utils as rest_utils\n'), ((42, 23, 42, 64), 'openstack_dashboard.api.swift.swift_get_capabilities', 'api.swift.swift_get_capabilities', ({(42, 56, 42, 63): 'request'}, {}), '(request)', False, 'from openstack_dashboard import api\n'), ((58, 31, 58, 70), 'openstack_dashboard.api.swift.swift_get_containers', 'api.swift.swift_get_containers', ({(58, 62, 58, 69): 'request'}, {}), '(request)', False, 'from openstack_dashboard import api\n'), ((91, 15, 93, 9), 'openstack_dashboard.api.rest.utils.CreatedResponse', 'rest_utils.CreatedResponse', ({(92, 12, 92, 51): "(u'/api/swift/containers/%s' % container)"}, {}), "(u'/api/swift/containers/%s' % container)", True, 'from openstack_dashboard.api.rest import utils as rest_utils\n'), ((106, 8, 106, 79), 'openstack_dashboard.api.swift.swift_update_container', 'api.swift.swift_update_container', (), '', False, 'from openstack_dashboard import api\n'), ((127, 18, 131, 9), 'openstack_dashboard.api.swift.swift_get_objects', 'api.swift.swift_get_objects', (), '', False, 'from openstack_dashboard import api\n'), ((196, 15, 198, 9), 'openstack_dashboard.api.rest.utils.CreatedResponse', 'rest_utils.CreatedResponse', ({(197, 12, 197, 76): "(u'/api/swift/containers/%s/object/%s' % (container, result.name))"}, {}), "(u'/api/swift/containers/%s/object/%s' % (\n container, result.name))", True, 'from openstack_dashboard.api.rest import utils as rest_utils\n'), ((215, 14, 219, 9), 'openstack_dashboard.api.swift.swift_get_object', 'api.swift.swift_get_object', ({(216, 12, 216, 19): 'request', (217, 12, 217, 21): 'container', (218, 12, 218, 23): 'object_name'}, {}), '(request, container, object_name)', False, 'from openstack_dashboard import api\n'), ((227, 19, 227, 50), 'django.http.StreamingHttpResponse', 'StreamingHttpResponse', ({(227, 41, 227, 49): 'obj.data'}, {}), '(obj.data)', False, 'from django.http import StreamingHttpResponse\n'), ((275, 15, 278, 9), 'openstack_dashboard.api.rest.utils.CreatedResponse', 'rest_utils.CreatedResponse', ({(276, 12, 277, 65): "(u'/api/swift/containers/%s/object/%s' % (dest_container, result.name))"}, {}), "(u'/api/swift/containers/%s/object/%s' % (\n dest_container, result.name))", True, 'from openstack_dashboard.api.rest import utils as rest_utils\n'), ((85, 12, 86, 63), 'openstack_dashboard.api.swift.swift_create_container', 'api.swift.swift_create_container', (), '', False, 'from openstack_dashboard import api\n'), ((98, 12, 98, 64), 'openstack_dashboard.api.swift.swift_delete_container', 'api.swift.swift_delete_container', ({(98, 45, 98, 52): 'request', (98, 54, 98, 63): 'container'}, {}), '(request, container)', False, 'from openstack_dashboard import api\n'), ((125, 19, 125, 35), 'django.utils.http.urlunquote', 'urlunquote', ({(125, 30, 125, 34): 'path'}, {}), '(path)', False, 'from django.utils.http import urlunquote\n'), ((178, 18, 178, 62), 'openstack_dashboard.api.rest.utils.AjaxError', 'rest_utils.AjaxError', ({(178, 39, 178, 42): '(500)', (178, 44, 178, 61): '"""Invalid request"""'}, {}), "(500, 'Invalid request')", True, 'from openstack_dashboard.api.rest import utils as rest_utils\n'), ((183, 21, 187, 13), 'openstack_dashboard.api.swift.swift_create_pseudo_folder', 'api.swift.swift_create_pseudo_folder', ({(184, 16, 184, 23): 'request', (185, 16, 185, 25): 'container', (186, 16, 186, 27): 'object_name'}, {}), '(request, container, object_name)', False, 'from openstack_dashboard import api\n'), ((189, 21, 194, 13), 'openstack_dashboard.api.swift.swift_upload_object', 'api.swift.swift_upload_object', ({(190, 16, 190, 23): 'request', (191, 16, 191, 25): 'container', (192, 16, 192, 27): 'object_name', (193, 16, 193, 28): "data['file']"}, {}), "(request, container, object_name, data['file'])", False, 'from openstack_dashboard import api\n'), ((210, 12, 210, 74), 'openstack_dashboard.api.swift.swift_delete_object', 'api.swift.swift_delete_object', ({(210, 42, 210, 49): 'request', (210, 51, 210, 60): 'container', (210, 62, 210, 73): 'object_name'}, {}), '(request, container, object_name)', False, 'from openstack_dashboard import api\n'), ((225, 24, 225, 55), 'os.path.splitext', 'os.path.splitext', ({(225, 41, 225, 54): 'obj.orig_name'}, {}), '(obj.orig_name)', False, 'import os\n'), ((266, 21, 272, 13), 'openstack_dashboard.api.swift.swift_copy_object', 'api.swift.swift_copy_object', ({(267, 16, 267, 23): 'request', (268, 16, 268, 25): 'container', (269, 16, 269, 27): 'object_name', (270, 16, 270, 30): 'dest_container', (271, 16, 271, 25): 'dest_name'}, {}), '(request, container, object_name, dest_container,\n dest_name)', False, 'from openstack_dashboard import api\n'), ((74, 15, 74, 64), 'openstack_dashboard.api.swift.swift_get_container', 'api.swift.swift_get_container', ({(74, 45, 74, 52): 'request', (74, 54, 74, 63): 'container'}, {}), '(request, container)', False, 'from openstack_dashboard import api\n'), ((204, 16, 204, 78), 'openstack_dashboard.api.swift.swift_delete_folder', 'api.swift.swift_delete_folder', ({(204, 46, 204, 53): 'request', (204, 55, 204, 64): 'container', (204, 66, 204, 77): 'object_name'}, {}), '(request, container, object_name)', False, 'from openstack_dashboard import api\n'), ((246, 15, 251, 9), 'openstack_dashboard.api.swift.swift_get_object', 'api.swift.swift_get_object', (), '', False, 'from openstack_dashboard import api\n'), ((224, 15, 224, 41), 'os.path.splitext', 'os.path.splitext', ({(224, 32, 224, 40): 'obj.name'}, {}), '(obj.name)', False, 'import os\n')] |
kuangliu/pytorch-ssd | datagen.py | 02ed1cbe6962e791895ab1c455dc5ddfb87291b9 | '''Load image/class/box from a annotation file.
The annotation file is organized as:
image_name #obj xmin ymin xmax ymax class_index ..
'''
from __future__ import print_function
import os
import sys
import os.path
import random
import numpy as np
import torch
import torch.utils.data as data
import torchvision.transforms as transforms
from encoder import DataEncoder
from PIL import Image, ImageOps
class ListDataset(data.Dataset):
img_size = 300
def __init__(self, root, list_file, train, transform):
'''
Args:
root: (str) ditectory to images.
list_file: (str) path to index file.
train: (boolean) train or test.
transform: ([transforms]) image transforms.
'''
self.root = root
self.train = train
self.transform = transform
self.fnames = []
self.boxes = []
self.labels = []
self.data_encoder = DataEncoder()
with open(list_file) as f:
lines = f.readlines()
self.num_samples = len(lines)
for line in lines:
splited = line.strip().split()
self.fnames.append(splited[0])
num_objs = int(splited[1])
box = []
label = []
for i in range(num_objs):
xmin = splited[2+5*i]
ymin = splited[3+5*i]
xmax = splited[4+5*i]
ymax = splited[5+5*i]
c = splited[6+5*i]
box.append([float(xmin),float(ymin),float(xmax),float(ymax)])
label.append(int(c))
self.boxes.append(torch.Tensor(box))
self.labels.append(torch.LongTensor(label))
def __getitem__(self, idx):
'''Load a image, and encode its bbox locations and class labels.
Args:
idx: (int) image index.
Returns:
img: (tensor) image tensor.
loc_target: (tensor) location targets, sized [8732,4].
conf_target: (tensor) label targets, sized [8732,].
'''
# Load image and bbox locations.
fname = self.fnames[idx]
img = Image.open(os.path.join(self.root, fname))
boxes = self.boxes[idx].clone()
labels = self.labels[idx]
# Data augmentation while training.
if self.train:
img, boxes = self.random_flip(img, boxes)
img, boxes, labels = self.random_crop(img, boxes, labels)
# Scale bbox locaitons to [0,1].
w,h = img.size
boxes /= torch.Tensor([w,h,w,h]).expand_as(boxes)
img = img.resize((self.img_size,self.img_size))
img = self.transform(img)
# Encode loc & conf targets.
loc_target, conf_target = self.data_encoder.encode(boxes, labels)
return img, loc_target, conf_target
def random_flip(self, img, boxes):
'''Randomly flip the image and adjust the bbox locations.
For bbox (xmin, ymin, xmax, ymax), the flipped bbox is:
(w-xmax, ymin, w-xmin, ymax).
Args:
img: (PIL.Image) image.
boxes: (tensor) bbox locations, sized [#obj, 4].
Returns:
img: (PIL.Image) randomly flipped image.
boxes: (tensor) randomly flipped bbox locations, sized [#obj, 4].
'''
if random.random() < 0.5:
img = img.transpose(Image.FLIP_LEFT_RIGHT)
w = img.width
xmin = w - boxes[:,2]
xmax = w - boxes[:,0]
boxes[:,0] = xmin
boxes[:,2] = xmax
return img, boxes
def random_crop(self, img, boxes, labels):
'''Randomly crop the image and adjust the bbox locations.
For more details, see 'Chapter2.2: Data augmentation' of the paper.
Args:
img: (PIL.Image) image.
boxes: (tensor) bbox locations, sized [#obj, 4].
labels: (tensor) bbox labels, sized [#obj,].
Returns:
img: (PIL.Image) cropped image.
selected_boxes: (tensor) selected bbox locations.
labels: (tensor) selected bbox labels.
'''
imw, imh = img.size
while True:
min_iou = random.choice([None, 0.1, 0.3, 0.5, 0.7, 0.9])
if min_iou is None:
return img, boxes, labels
for _ in range(100):
w = random.randrange(int(0.1*imw), imw)
h = random.randrange(int(0.1*imh), imh)
if h > 2*w or w > 2*h:
continue
x = random.randrange(imw - w)
y = random.randrange(imh - h)
roi = torch.Tensor([[x, y, x+w, y+h]])
center = (boxes[:,:2] + boxes[:,2:]) / 2 # [N,2]
roi2 = roi.expand(len(center), 4) # [N,4]
mask = (center > roi2[:,:2]) & (center < roi2[:,2:]) # [N,2]
mask = mask[:,0] & mask[:,1] #[N,]
if not mask.any():
continue
selected_boxes = boxes.index_select(0, mask.nonzero().squeeze(1))
iou = self.data_encoder.iou(selected_boxes, roi)
if iou.min() < min_iou:
continue
img = img.crop((x, y, x+w, y+h))
selected_boxes[:,0].add_(-x).clamp_(min=0, max=w)
selected_boxes[:,1].add_(-y).clamp_(min=0, max=h)
selected_boxes[:,2].add_(-x).clamp_(min=0, max=w)
selected_boxes[:,3].add_(-y).clamp_(min=0, max=h)
return img, selected_boxes, labels[mask]
def __len__(self):
return self.num_samples
| [((42, 28, 42, 41), 'encoder.DataEncoder', 'DataEncoder', ({}, {}), '()', False, 'from encoder import DataEncoder\n'), ((79, 25, 79, 55), 'os.path.join', 'os.path.join', ({(79, 38, 79, 47): 'self.root', (79, 49, 79, 54): 'fname'}, {}), '(self.root, fname)', False, 'import os\n'), ((113, 11, 113, 26), 'random.random', 'random.random', ({}, {}), '()', False, 'import random\n'), ((139, 22, 139, 68), 'random.choice', 'random.choice', ({(139, 36, 139, 67): '[None, 0.1, 0.3, 0.5, 0.7, 0.9]'}, {}), '([None, 0.1, 0.3, 0.5, 0.7, 0.9])', False, 'import random\n'), ((63, 30, 63, 47), 'torch.Tensor', 'torch.Tensor', ({(63, 43, 63, 46): 'box'}, {}), '(box)', False, 'import torch\n'), ((64, 31, 64, 54), 'torch.LongTensor', 'torch.LongTensor', ({(64, 48, 64, 53): 'label'}, {}), '(label)', False, 'import torch\n'), ((90, 17, 90, 40), 'torch.Tensor', 'torch.Tensor', ({(90, 30, 90, 39): '[w, h, w, h]'}, {}), '([w, h, w, h])', False, 'import torch\n'), ((150, 20, 150, 45), 'random.randrange', 'random.randrange', ({(150, 37, 150, 44): 'imw - w'}, {}), '(imw - w)', False, 'import random\n'), ((151, 20, 151, 45), 'random.randrange', 'random.randrange', ({(151, 37, 151, 44): 'imh - h'}, {}), '(imh - h)', False, 'import random\n'), ((152, 22, 152, 54), 'torch.Tensor', 'torch.Tensor', ({(152, 35, 152, 53): '[[x, y, x + w, y + h]]'}, {}), '([[x, y, x + w, y + h]])', False, 'import torch\n')] |
allenwang28/lingvo | lingvo/core/builder.py | 26d3d6672d3f46d8f281c2aa9f57166ef6296738 | # Lint as: python3
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A library to build composite layers.
WARNING:
The builder pattern is still experimental and we need to gain experience
on when to use and when not to use.
Please discuss w/ teammates before using it to build complicated
layers.
"""
import functools
from lingvo.core import activations
from lingvo.core import builder_layers
from lingvo.core import hyperparams
from lingvo.core import layers
from lingvo.core import py_utils
from lingvo.core import tshape
class Base:
"""Model builder with commonly used layers.
A method in a builder class constructs a layer param. FProp of a layer
constructed by a builder takes a tuple of tf.Tensor (one or more) and returns
a tuple of tf.Tensor (one or more). Even though certain layers support FProp
argument being None (e.g., Conv2DLayer), builder should not depend on such a
support.
The constructed layer is often a composition of multiple sub-layers connected
in certain patterns. We expect to have a few methods to facilitate building
these patterns. For example, _Seq() helps to build a sequential layer that
calls its sub-layer one after another.
TODO(zhifengc): Adds a more concrete example.
"""
@classmethod
def Params(cls):
"""The params of this layer."""
p = hyperparams.InstantiableParams(cls)
p.Define('deterministic_dropout', False,
'Used deterministic dropout or not.')
p.Define(
'fprop_dtype', None,
'Activations datatype to use. To enable bfloat16 activations for '
'layers built using model builder, set fprop_dtype to '
'tf.bfloat16, which will be propagated to layers that support '
'bfloat16 activations. Default is None, which will use float32 '
'activations.')
# SPMD partition related params.
p.Define(
'device_mesh', None,
'A numpy.ndarray specifying the topology of a device mesh to place the '
'computations onto. If device_mesh is None, it is assumed to be a '
'single device. Here are some examples: '
'np.array([0, 1, 2, 3, 4, 5, 6, 7]) which is a 1d mesh with 8 devices, '
'np.array([[0, 1, 2, 3], [4, 5, 6, 7]]) which is 2d matrix of 8 '
'devices.')
p.Define(
'weight_split_dims_mapping', None,
'Relevant only if device_mesh above is not None. If not None, it '
'specifies how weight of this layer or those of the sublayers should '
'be sharded over device mesh. ')
p.Define(
'activation_split_dims_mapping', None,
'Relevant only if device_mesh above is not None. If not None, it '
'specifies how activation of this layer or those of the sublayers '
'should be sharded over device mesh. ')
return p
@property
def params(self):
"""Returns the params upon which this layer is built."""
return self._params
def __init__(self, params):
# Sub-classes should put some options common to many layers in __init__.
self._params = params.Copy()
######################################################################
# Layers to compose multiple layers.
#
# Sub-classes are discouraged to override these composition method.
######################################################################
def _Rep(self, name, repeat, *subs):
r"""Connects sub-layers sequentially and repeat multiple times.
E.g., _Rep('foo', 2, sa, sb, sc) constructs a layer with 6 layers
sequentially connected: [sa1, sb1, sc1, sa2, sb2, sc2]. sa1 and sa2 have
the same structure as the given sa, but sa1 and sa2 do not share the same
weight.
Args:
name: The layer name.
repeat: Repeat \*subs this many times in the compose layer.
*subs: A list of sub-layers.
Returns:
The param for the composed layer.
"""
iterations = []
for i in range(repeat):
iterations.append(self._Seq('iter_%03d' % i, *[p.Copy() for p in subs]))
return self._Seq(name, *iterations)
def _Seq(self, name, *subs):
"""Connects sub-layers sequentially."""
return builder_layers.SequentialLayer.Params().Set(
name=name, sub=list(subs))
def _Graph(self, name, input_endpoints, output_endpoints,
*signature_sub_param_list):
"""Connects sub-layers into a data flow graph."""
return builder_layers.GraphLayer.Params().Set(
name=name,
input_endpoints=input_endpoints,
output_endpoints=output_endpoints,
sub=list(signature_sub_param_list))
def _Id(self, name):
"""Identity. (t_1, ..., t_n) -> (t1, ..., t_n)."""
return self._Seq(name)
def _Arg(self, name, index):
"""Picks index-th element. (t_1, ..., t_n) -> (t_{index},)."""
return builder_layers.ArgIndexLayer.Params().Set(name=name, idx=[index])
def _Par(self, name, *subs):
"""y = (f1, f2, ..., fn)(x).
We feed the input tuple to all sub-layers and concatenates their output
tuples into one tuple.
Args:
name: The layer name.
*subs: A list of sub-layers.
Returns:
The param for the composed layer.
"""
def ConcatTuples(tuples):
# tuples is a list of tuples.
return tuple(functools.reduce(lambda x, y: x + list(y), tuples, []))
def ConcatMeta(tuples):
return py_utils.NestedMap(
flops=0,
out_shapes=tuple(
functools.reduce(lambda x, y: x + list(y), tuples, [])))
return builder_layers.ParallelLayer.Params().Set(
name=name, sub=list(subs), merge=ConcatTuples, merge_meta=ConcatMeta)
def _Fn(self, name, fn, fn_out=None, fn_flops=None):
"""y = fn(x).
Applies a fn: tuple(Tensor) -> a single Tensor or tuple(Tensor) to the input
tuple. Typically, fn is a very simple python function. This layer can be
used for prototyping but we advice to implement the logic as a sub-class of
BaseLayer for all established layers as FnLayer can't be serialized.
Args:
name: The layer name.
fn: A lambda tuple(Tensor) -> tuple(Tensor).
fn_out: A lambda tuple(tshape.Shape) -> output tuple(tshape.Shape)
fn_flops: A lambda tuple(tshape.Shape) -> estimated flops of fn.
If None, we assume flops == sum of elements in the inputs.
Returns:
The param for the composed layer.
"""
def FnMeta(*shapes):
"""A lambda tuple(tshape.Shape) -> NestedMap{flops, out_shapes}."""
if fn_out:
out_shapes = fn_out(*shapes)
if isinstance(out_shapes, tshape.Shape):
out_shapes = (out_shapes,)
else:
out_shapes = shapes
if fn_flops:
flops = fn_flops(*shapes)
else:
flops = sum([s.size for s in shapes])
return py_utils.NestedMap(flops=flops, out_shapes=out_shapes)
return builder_layers.FnLayer.Params().Set(name=name, fn=fn, fn_meta=FnMeta)
def _Save(self, name):
"""Returns a layer from which the activation and gradient can be accessed."""
return layers.FetchLayer.Params().Set(name=name)
def _AddFetches(self, name, body, fetches):
"""Fetches saved activations in the body sub-layer.
E.g.:
_AddFetches('foo', _Seq( 'stack', _Layer('layer1', ...),
_Save('layer1_out', ...), _Layer('layer2', ...), _Save('layer2_out', ...),
_Output('output', ...)), ['layer1_out', 'layer2_out'])
The layer returns the stack's final output together with intermediate
activations from layer1_out and layer2_out.
Args:
name: This layer's name.
body: The sub-layer.
fetches: A list of fetch names inside the sub-layer body.
Returns:
A layer whose outputs correspond to the activations of fetch points
in the sub-layer body. [input1, input2, ..., inputN, fetch1, ..., fetchM].
"""
return builder_layers.BranchLayer.Params().Set(
name=name, body=body, fetches=fetches)
def _Rematerialize(self, name, body):
"""Forces rematerialization on FProp of the body layer."""
return builder_layers.RematerializationLayer.Params().Set(
name=name, body=body)
def _BatchParallel(self, name, sub):
"""Splits the batch and compute the forward pass on multiple devices.
Args:
name: This layer's name.
sub: The sub-layer.
Returns:
A BatchParallel layer which splits the batch and computes the forward pass
on multiple devices.
"""
return builder_layers.BatchParallelLayer.Params().Set(name=name, sub=sub)
def _PrintShape(self, name):
"""Print FProp input shape information."""
return builder_layers.PrintShapeLayer.Params().Set(name=name)
def _CreateNestedMap(self, name, keys):
"""Returns a NestedMap with keys from fprop args."""
return builder_layers.CreateNestedMapLayer.Params().Set(
name=name, keys=keys)
###########################################################################
# Basic nn layers.
#
# The following method returns a layer param, whose FProp takes a single
# Tensor and returns a single Tensor.
#
# These methods are designed to have minimal knobs. Sub-classes which needs to
# be flexible can override these methods with different options. E.g., a
# sub-class builder can override _BN() to tune the decay option.
###########################################################################
def _BN(self, name, dims):
"""Batch norm."""
return layers.BatchNormLayer.Params().Set(name=name, dim=dims, decay=0.99)
def _LN(self, name, dims, use_fused_layernorm=False):
"""Layer norm."""
return layers.LayerNorm.Params().Set(
name=name,
input_dim=dims,
use_fused_layernorm=use_fused_layernorm,
fprop_dtype=self.params.fprop_dtype)
def _Dropout(self, name, keep_prob, noise_shape_broadcast_dims=None):
"""Returns a DropoutLayer Params."""
if self.params.deterministic_dropout:
return layers.DeterministicDropoutLayer.Params().Set(
name=name,
keep_prob=keep_prob,
noise_shape_broadcast_dims=noise_shape_broadcast_dims)
return layers.DropoutLayer.Params().Set(
name=name,
keep_prob=keep_prob,
noise_shape_broadcast_dims=noise_shape_broadcast_dims,
fprop_dtype=self.params.fprop_dtype)
def _Linear(self,
name,
idims,
odims,
device_mesh=None,
weight_split_dims_mapping=None,
qdomain=None):
"""Linear layer. y = matmul([..., idims], [idims, odims])."""
p = builder_layers.LinearLayer.Params()
p.name = name
p.input_dims = idims
p.output_dims = odims
p.fprop_dtype = self.params.fprop_dtype
p.device_mesh = device_mesh
p.weight_split_dims_mapping = weight_split_dims_mapping
p.qdomain.default = qdomain
return p
def _Bias(self, name, dims, device_mesh=None, weight_split_dims_mapping=None):
"""Bias layer. The bias is added to the last dimension of the input."""
return builder_layers.BiasLayer.Params().Set(
name=name,
dims=dims,
fprop_dtype=self.params.fprop_dtype,
device_mesh=device_mesh,
weight_split_dims_mapping=weight_split_dims_mapping)
def _Activation(self, name, fn='RELU'):
"""Activation layer."""
return activations.ActivationLayer.Params().Set(activation=fn, name=name)
def _FC(self, name, idims, odims, act='RELU'):
"""Feed-forward fully connected. y = act(matmul(x, w) + b)."""
# pyformat: disable
return self._Seq(
name,
self._Linear('linear', idims, odims),
self._Bias('bias', odims),
self._Activation('act', fn=act))
def _MLP(self, name, dims, act='RELU'):
"""Multiple layers of feed-forward fully connected.
Args:
name: The layer name.
dims: A list of int. i-th layer has dims[i] as its input dimension, and
dims[i+1] as its output dimensions.
act: The activation function.
Returns:
The param for the composed layer.
"""
l = []
for n, (i, o) in enumerate(zip(dims[:-1], dims[1:])):
l += [self._FC('l%03d' % n, i, o, act)]
return self._Seq(name, *l)
def _Conv2D(self, name, filter_shape, filter_stride):
"""Conv2D layer."""
return layers.Conv2DLayerNoPadding.Params().Set(
name=name, filter_shape=filter_shape, filter_stride=filter_stride,
fprop_dtype=self.params.fprop_dtype)
def _Reshape(self, name, shape):
"""Reshape inputs to the shape provided."""
return builder_layers.ReshapeLayer.Params().Set(name=name,
shape=shape)
| [((54, 8, 54, 43), 'lingvo.core.hyperparams.InstantiableParams', 'hyperparams.InstantiableParams', ({(54, 39, 54, 42): 'cls'}, {}), '(cls)', False, 'from lingvo.core import hyperparams\n'), ((301, 8, 301, 43), 'lingvo.core.builder_layers.LinearLayer.Params', 'builder_layers.LinearLayer.Params', ({}, {}), '()', False, 'from lingvo.core import builder_layers\n'), ((200, 13, 200, 67), 'lingvo.core.py_utils.NestedMap', 'py_utils.NestedMap', (), '', False, 'from lingvo.core import py_utils\n'), ((122, 11, 122, 50), 'lingvo.core.builder_layers.SequentialLayer.Params', 'builder_layers.SequentialLayer.Params', ({}, {}), '()', False, 'from lingvo.core import builder_layers\n'), ((128, 11, 128, 45), 'lingvo.core.builder_layers.GraphLayer.Params', 'builder_layers.GraphLayer.Params', ({}, {}), '()', False, 'from lingvo.core import builder_layers\n'), ((140, 11, 140, 48), 'lingvo.core.builder_layers.ArgIndexLayer.Params', 'builder_layers.ArgIndexLayer.Params', ({}, {}), '()', False, 'from lingvo.core import builder_layers\n'), ((166, 11, 166, 48), 'lingvo.core.builder_layers.ParallelLayer.Params', 'builder_layers.ParallelLayer.Params', ({}, {}), '()', False, 'from lingvo.core import builder_layers\n'), ((202, 11, 202, 42), 'lingvo.core.builder_layers.FnLayer.Params', 'builder_layers.FnLayer.Params', ({}, {}), '()', False, 'from lingvo.core import builder_layers\n'), ((206, 11, 206, 37), 'lingvo.core.layers.FetchLayer.Params', 'layers.FetchLayer.Params', ({}, {}), '()', False, 'from lingvo.core import layers\n'), ((228, 11, 228, 46), 'lingvo.core.builder_layers.BranchLayer.Params', 'builder_layers.BranchLayer.Params', ({}, {}), '()', False, 'from lingvo.core import builder_layers\n'), ((233, 11, 233, 57), 'lingvo.core.builder_layers.RematerializationLayer.Params', 'builder_layers.RematerializationLayer.Params', ({}, {}), '()', False, 'from lingvo.core import builder_layers\n'), ((247, 11, 247, 53), 'lingvo.core.builder_layers.BatchParallelLayer.Params', 'builder_layers.BatchParallelLayer.Params', ({}, {}), '()', False, 'from lingvo.core import builder_layers\n'), ((251, 11, 251, 50), 'lingvo.core.builder_layers.PrintShapeLayer.Params', 'builder_layers.PrintShapeLayer.Params', ({}, {}), '()', False, 'from lingvo.core import builder_layers\n'), ((255, 11, 255, 55), 'lingvo.core.builder_layers.CreateNestedMapLayer.Params', 'builder_layers.CreateNestedMapLayer.Params', ({}, {}), '()', False, 'from lingvo.core import builder_layers\n'), ((270, 11, 270, 41), 'lingvo.core.layers.BatchNormLayer.Params', 'layers.BatchNormLayer.Params', ({}, {}), '()', False, 'from lingvo.core import layers\n'), ((274, 11, 274, 36), 'lingvo.core.layers.LayerNorm.Params', 'layers.LayerNorm.Params', ({}, {}), '()', False, 'from lingvo.core import layers\n'), ((287, 11, 287, 39), 'lingvo.core.layers.DropoutLayer.Params', 'layers.DropoutLayer.Params', ({}, {}), '()', False, 'from lingvo.core import layers\n'), ((313, 11, 313, 44), 'lingvo.core.builder_layers.BiasLayer.Params', 'builder_layers.BiasLayer.Params', ({}, {}), '()', False, 'from lingvo.core import builder_layers\n'), ((322, 11, 322, 47), 'lingvo.core.activations.ActivationLayer.Params', 'activations.ActivationLayer.Params', ({}, {}), '()', False, 'from lingvo.core import activations\n'), ((352, 11, 352, 47), 'lingvo.core.layers.Conv2DLayerNoPadding.Params', 'layers.Conv2DLayerNoPadding.Params', ({}, {}), '()', False, 'from lingvo.core import layers\n'), ((358, 11, 358, 47), 'lingvo.core.builder_layers.ReshapeLayer.Params', 'builder_layers.ReshapeLayer.Params', ({}, {}), '()', False, 'from lingvo.core import builder_layers\n'), ((283, 13, 283, 54), 'lingvo.core.layers.DeterministicDropoutLayer.Params', 'layers.DeterministicDropoutLayer.Params', ({}, {}), '()', False, 'from lingvo.core import layers\n')] |
gilramir/instmake | instmakelib/instmake_toolnames.py | 7b083a5061be43e9b92bdcf0f3badda7c4107eef | # Copyright (c) 2010 by Cisco Systems, Inc.
"""
Manage the tool plugins and use them appropriately.
"""
import os
TOOLNAME_PLUGIN_PREFIX = "toolname"
class ToolNameManager:
"""ToolName plugins have to register with this manager
the circumstances under which they wish to be called."""
def __init__(self, plugins):
toolname_plugins = plugins.LoadAllPlugins(TOOLNAME_PLUGIN_PREFIX)
self.first_arg_matches = []
self.first_arg_basename_matches = []
self.first_arg_regexes= []
self.first_arg_basename_regexes = []
self.command_line_regexes = []
for plugin in toolname_plugins:
plugin.register(self)
def RegisterFirstArgumentMatch(self, text, cb):
"""Call back parameters: first_arg, argv, cwd"""
self.first_arg_matches.append((text, cb))
def RegisterFirstArgumentRegex(self, regex, cb):
"""Call back parameters: first_arg, argv, cwd, regex_match"""
self.first_arg_regexes.append((regex, cb))
def RegisterFirstArgumentBasenameMatch(self, text, cb):
"""Call back parameters: basename, first_arg, argv, cwd"""
self.first_arg_basename_matches.append((text, cb))
def RegisterFirstArgumentBasenameRegex(self, regex, cb):
"""Call back parameters: basename, first_arg, argv, cw, regex_match"""
self.first_arg_basename_regexes.append((regex, cb))
def RegisterCommandLineRegex(self, regex, cb):
"""Call back parameters: argv, cwd, regex_match"""
self.command_line_regexes.append((regex, cb))
def GetTool(self, cmdline_args, cwd):
"""Returns a single string representing the tool in this
command-line. cmdline_args is an array of strings that will
be concatenated with spaces to form a single command-line."""
# It's done this way because of the way the command-line is
# stored in the instmake log. The top-most process (which is
# the first 'make' run, i.e., the last record in the instmake log)
# has a cmdline_args with one true argv-item per item. However,
# the instmakes that were called from 'make' have their entire
# command-line existing as a single string (the first and only
# item in cmdline_args).
argv_joined = ' '.join(cmdline_args)
argv = argv_joined.split()
# Call _GetTool as many times as necessary to find
# a non-changing answer.
seen = {}
max_iterations = 100
i = 0
while 1:
seen[argv_joined] = None
new_argv = self._GetTool(argv, cwd)
new_argv_joined = ' '.join(new_argv)
if new_argv_joined == argv_joined:
return new_argv[0]
elif seen.has_key(new_argv_joined):
return new_argv[0]
else:
i += 1
if i == max_iterations:
return new_argv[0]
argv = new_argv
argv_joined = new_argv_joined
def _GetTool(self, argv, cwd):
cmdline = ' '.join(argv)
# Check the command-line
for (regex, cb) in self.command_line_regexes:
m = regex.search(cmdline)
if m:
retval = cb(argv, cwd, m)
if retval != None:
return retval
# Get the first argument
if len(argv) >= 1:
first_arg = argv[0]
else:
return argv
# Check the first argument
for (text, cb) in self.first_arg_matches:
if first_arg == text:
retval = cb(first_arg, argv, cwd)
if retval != None:
return retval
for (regex, cb) in self.first_arg_regexes:
m = regex.search(first_arg)
if m:
retval = cb(first_arg, argv, cwd, m)
if retval != None:
return retval
# Check the basename of the first arg
basename = os.path.basename(first_arg)
for (text, cb) in self.first_arg_basename_matches:
if basename == text:
retval = cb(basename, first_arg, argv, cwd)
if retval != None:
return retval
for (regex, cb) in self.first_arg_basename_regexes:
m = regex.search(basename)
if m:
retval = cb(basename, first_arg, argv, cwd, m)
if retval != None:
return retval
# Nothing matched. Return the default value.
return argv
| [((113, 19, 113, 46), 'os.path.basename', 'os.path.basename', ({(113, 36, 113, 45): 'first_arg'}, {}), '(first_arg)', False, 'import os\n')] |
tirkarthi/raiden | raiden/tests/integration/long_running/test_stress.py | dbd03ddda039332b54ec0c02d81cbe1100bc8028 | import time
from http import HTTPStatus
from itertools import count
from typing import Sequence
import gevent
import grequests
import pytest
import structlog
from eth_utils import to_canonical_address
from flask import url_for
from raiden.api.python import RaidenAPI
from raiden.api.rest import APIServer, RestAPI
from raiden.constants import RoutingMode
from raiden.message_handler import MessageHandler
from raiden.network.transport import MatrixTransport
from raiden.raiden_event_handler import RaidenEventHandler
from raiden.raiden_service import RaidenService
from raiden.settings import RestApiConfig
from raiden.tests.integration.api.utils import wait_for_listening_port
from raiden.tests.integration.fixtures.raiden_network import RestartNode
from raiden.tests.utils.detect_failure import raise_on_failure
from raiden.tests.utils.protocol import HoldRaidenEventHandler
from raiden.tests.utils.transfer import (
assert_synced_channel_state,
wait_assert,
watch_for_unlock_failures,
)
from raiden.transfer import views
from raiden.ui.startup import RaidenBundle
from raiden.utils.formatting import to_checksum_address
from raiden.utils.typing import (
Address,
BlockNumber,
Host,
Iterator,
List,
Port,
TokenAddress,
TokenAmount,
TokenNetworkAddress,
Tuple,
)
log = structlog.get_logger(__name__)
def iwait_and_get(items: Sequence[gevent.Greenlet]) -> None:
"""Iteratively wait and get on passed greenlets.
This ensures exceptions in the greenlets are re-raised as soon as possible.
"""
for item in gevent.iwait(items):
item.get()
def _url_for(apiserver: APIServer, endpoint: str, **kwargs) -> str:
# url_for() expects binary address so we have to convert here
for key, val in kwargs.items():
if isinstance(val, str) and val.startswith("0x"):
kwargs[key] = to_canonical_address(val)
with apiserver.flask_app.app_context():
return url_for(f"v1_resources.{endpoint}", **kwargs)
def start_apiserver(raiden_app: RaidenService, rest_api_port_number: Port) -> APIServer:
raiden_api = RaidenAPI(raiden_app)
rest_api = RestAPI(raiden_api)
api_server = APIServer(
rest_api, config=RestApiConfig(host=Host("localhost"), port=rest_api_port_number)
)
# required for url_for
api_server.flask_app.config["SERVER_NAME"] = f"localhost:{rest_api_port_number}"
api_server.start()
wait_for_listening_port(rest_api_port_number)
return api_server
def start_apiserver_for_network(
raiden_network: List[RaidenService], port_generator: Iterator[Port]
) -> List[APIServer]:
return [start_apiserver(app, next(port_generator)) for app in raiden_network]
def restart_app(app: RaidenService, restart_node: RestartNode) -> RaidenService:
new_transport = MatrixTransport(
config=app.config.transport, environment=app.config.environment_type
)
raiden_event_handler = RaidenEventHandler()
hold_handler = HoldRaidenEventHandler(raiden_event_handler)
app = RaidenService(
config=app.config,
rpc_client=app.rpc_client,
proxy_manager=app.proxy_manager,
query_start_block=BlockNumber(0),
raiden_bundle=RaidenBundle(
app.default_registry,
app.default_secret_registry,
),
services_bundle=app.default_services_bundle,
transport=new_transport,
raiden_event_handler=hold_handler,
message_handler=MessageHandler(),
routing_mode=RoutingMode.PRIVATE,
)
restart_node(app)
return app
def restart_network(
raiden_network: List[RaidenService], restart_node: RestartNode
) -> List[RaidenService]:
for app in raiden_network:
app.stop()
wait_network = (gevent.spawn(restart_app, app, restart_node) for app in raiden_network)
gevent.joinall(set(wait_network), raise_error=True)
new_network = [greenlet.get() for greenlet in wait_network]
return new_network
def restart_network_and_apiservers(
raiden_network: List[RaidenService],
restart_node: RestartNode,
api_servers: List[APIServer],
port_generator: Iterator[Port],
) -> Tuple[List[RaidenService], List[APIServer]]:
"""Stop an app and start it back"""
for rest_api in api_servers:
rest_api.stop()
new_network = restart_network(raiden_network, restart_node)
new_servers = start_apiserver_for_network(new_network, port_generator)
return (new_network, new_servers)
def address_from_apiserver(apiserver: APIServer) -> Address:
return apiserver.rest_api.raiden_api.address
def transfer_and_assert(
server_from: APIServer,
server_to: APIServer,
token_address: TokenAddress,
identifier: int,
amount: TokenAmount,
) -> None:
url = _url_for(
server_from,
"token_target_paymentresource",
token_address=to_checksum_address(token_address),
target_address=to_checksum_address(address_from_apiserver(server_to)),
)
json = {"amount": amount, "identifier": identifier}
log.debug("PAYMENT REQUEST", url=url, json=json)
request = grequests.post(url, json=json)
start = time.monotonic()
response = request.send().response
duration = time.monotonic() - start
log.debug("PAYMENT RESPONSE", url=url, json=json, response=response, duration=duration)
assert getattr(request, "exception", None) is None
assert response is not None
assert response.status_code == HTTPStatus.OK, f"Payment failed, reason: {response.content}"
assert response.headers["Content-Type"] == "application/json"
def sequential_transfers(
server_from: APIServer,
server_to: APIServer,
number_of_transfers: int,
token_address: TokenAddress,
identifier_generator: Iterator[int],
) -> None:
for _ in range(number_of_transfers):
transfer_and_assert(
server_from=server_from,
server_to=server_to,
token_address=token_address,
identifier=next(identifier_generator),
amount=TokenAmount(1),
)
def stress_send_serial_transfers(
rest_apis: List[APIServer],
token_address: TokenAddress,
identifier_generator: Iterator[int],
deposit: TokenAmount,
) -> None:
"""Send `deposit` transfers of value `1` one at a time, without changing
the initial capacity.
"""
pairs = list(zip(rest_apis, rest_apis[1:] + [rest_apis[0]]))
# deplete the channels in one direction
for server_from, server_to in pairs:
sequential_transfers(
server_from=server_from,
server_to=server_to,
number_of_transfers=deposit,
token_address=token_address,
identifier_generator=identifier_generator,
)
# deplete the channels in the backwards direction
for server_to, server_from in pairs:
sequential_transfers(
server_from=server_from,
server_to=server_to,
number_of_transfers=deposit * 2,
token_address=token_address,
identifier_generator=identifier_generator,
)
# reset the balances balances by sending the "extra" deposit forward
for server_from, server_to in pairs:
sequential_transfers(
server_from=server_from,
server_to=server_to,
number_of_transfers=deposit,
token_address=token_address,
identifier_generator=identifier_generator,
)
def stress_send_parallel_transfers(
rest_apis: List[APIServer],
token_address: TokenAddress,
identifier_generator: Iterator[int],
deposit: TokenAmount,
) -> None:
"""Send `deposit` transfers in parallel, without changing the initial capacity."""
pairs = list(zip(rest_apis, rest_apis[1:] + [rest_apis[0]]))
# deplete the channels in one direction
iwait_and_get(
[
gevent.spawn(
sequential_transfers,
server_from=server_from,
server_to=server_to,
number_of_transfers=deposit,
token_address=token_address,
identifier_generator=identifier_generator,
)
for server_from, server_to in pairs
]
)
# deplete the channels in the backwards direction
iwait_and_get(
[
gevent.spawn(
sequential_transfers,
server_from=server_from,
server_to=server_to,
number_of_transfers=deposit * 2,
token_address=token_address,
identifier_generator=identifier_generator,
)
for server_to, server_from in pairs
]
)
# reset the balances balances by sending the "extra" deposit forward
iwait_and_get(
[
gevent.spawn(
sequential_transfers,
server_from=server_from,
server_to=server_to,
number_of_transfers=deposit,
token_address=token_address,
identifier_generator=identifier_generator,
)
for server_from, server_to in pairs
]
)
def stress_send_and_receive_parallel_transfers(
rest_apis: List[APIServer],
token_address: TokenAddress,
identifier_generator: Iterator[int],
deposit: TokenAmount,
) -> None:
"""Send transfers of value one in parallel"""
pairs = list(zip(rest_apis, rest_apis[1:] + [rest_apis[0]]))
forward_transfers = [
gevent.spawn(
sequential_transfers,
server_from=server_from,
server_to=server_to,
number_of_transfers=deposit,
token_address=token_address,
identifier_generator=identifier_generator,
)
for server_from, server_to in pairs
]
backwards_transfers = [
gevent.spawn(
sequential_transfers,
server_from=server_from,
server_to=server_to,
number_of_transfers=deposit,
token_address=token_address,
identifier_generator=identifier_generator,
)
for server_to, server_from in pairs
]
iwait_and_get(forward_transfers + backwards_transfers)
def assert_channels(
raiden_network: List[RaidenService],
token_network_address: TokenNetworkAddress,
deposit: TokenAmount,
) -> None:
pairs = list(zip(raiden_network, raiden_network[1:] + [raiden_network[0]]))
for first, second in pairs:
wait_assert(
assert_synced_channel_state,
token_network_address,
first,
deposit,
[],
second,
deposit,
[],
)
@pytest.mark.skip(reason="flaky, see https://github.com/raiden-network/raiden/issues/4803")
@raise_on_failure
@pytest.mark.parametrize("number_of_nodes", [3])
@pytest.mark.parametrize("number_of_tokens", [1])
@pytest.mark.parametrize("channels_per_node", [2])
@pytest.mark.parametrize("deposit", [2])
@pytest.mark.parametrize("reveal_timeout", [15])
@pytest.mark.parametrize("settle_timeout", [120])
def test_stress(
raiden_network: List[RaidenService],
restart_node: RestartNode,
deposit: TokenAmount,
token_addresses: List[TokenAddress],
port_generator: Iterator[Port],
) -> None:
token_address = token_addresses[0]
rest_apis = start_apiserver_for_network(raiden_network, port_generator)
identifier_generator = count(start=1)
token_network_address = views.get_token_network_address_by_token_address(
views.state_from_raiden(raiden_network[0]),
raiden_network[0].default_registry.address,
token_address,
)
assert token_network_address
for _ in range(2):
assert_channels(raiden_network, token_network_address, deposit)
with watch_for_unlock_failures(*raiden_network):
stress_send_serial_transfers(rest_apis, token_address, identifier_generator, deposit)
raiden_network, rest_apis = restart_network_and_apiservers(
raiden_network, restart_node, rest_apis, port_generator
)
assert_channels(raiden_network, token_network_address, deposit)
with watch_for_unlock_failures(*raiden_network):
stress_send_parallel_transfers(rest_apis, token_address, identifier_generator, deposit)
raiden_network, rest_apis = restart_network_and_apiservers(
raiden_network, restart_node, rest_apis, port_generator
)
assert_channels(raiden_network, token_network_address, deposit)
with watch_for_unlock_failures(*raiden_network):
stress_send_and_receive_parallel_transfers(
rest_apis, token_address, identifier_generator, deposit
)
raiden_network, rest_apis = restart_network_and_apiservers(
raiden_network, restart_node, rest_apis, port_generator
)
restart_network(raiden_network, restart_node)
| [((46, 6, 46, 36), 'structlog.get_logger', 'structlog.get_logger', ({(46, 27, 46, 35): '__name__'}, {}), '(__name__)', False, 'import structlog\n'), ((355, 1, 355, 91), 'pytest.mark.skip', 'pytest.mark.skip', (), '', False, 'import pytest\n'), ((357, 1, 357, 48), 'pytest.mark.parametrize', 'pytest.mark.parametrize', ({(357, 25, 357, 42): '"""number_of_nodes"""', (357, 44, 357, 47): '[3]'}, {}), "('number_of_nodes', [3])", False, 'import pytest\n'), ((358, 1, 358, 49), 'pytest.mark.parametrize', 'pytest.mark.parametrize', ({(358, 25, 358, 43): '"""number_of_tokens"""', (358, 45, 358, 48): '[1]'}, {}), "('number_of_tokens', [1])", False, 'import pytest\n'), ((359, 1, 359, 50), 'pytest.mark.parametrize', 'pytest.mark.parametrize', ({(359, 25, 359, 44): '"""channels_per_node"""', (359, 46, 359, 49): '[2]'}, {}), "('channels_per_node', [2])", False, 'import pytest\n'), ((360, 1, 360, 40), 'pytest.mark.parametrize', 'pytest.mark.parametrize', ({(360, 25, 360, 34): '"""deposit"""', (360, 36, 360, 39): '[2]'}, {}), "('deposit', [2])", False, 'import pytest\n'), ((361, 1, 361, 48), 'pytest.mark.parametrize', 'pytest.mark.parametrize', ({(361, 25, 361, 41): '"""reveal_timeout"""', (361, 43, 361, 47): '[15]'}, {}), "('reveal_timeout', [15])", False, 'import pytest\n'), ((362, 1, 362, 49), 'pytest.mark.parametrize', 'pytest.mark.parametrize', ({(362, 25, 362, 41): '"""settle_timeout"""', (362, 43, 362, 48): '[120]'}, {}), "('settle_timeout', [120])", False, 'import pytest\n'), ((54, 16, 54, 35), 'gevent.iwait', 'gevent.iwait', ({(54, 29, 54, 34): 'items'}, {}), '(items)', False, 'import gevent\n'), ((69, 17, 69, 38), 'raiden.api.python.RaidenAPI', 'RaidenAPI', ({(69, 27, 69, 37): 'raiden_app'}, {}), '(raiden_app)', False, 'from raiden.api.python import RaidenAPI\n'), ((70, 15, 70, 34), 'raiden.api.rest.RestAPI', 'RestAPI', ({(70, 23, 70, 33): 'raiden_api'}, {}), '(raiden_api)', False, 'from raiden.api.rest import APIServer, RestAPI\n'), ((80, 4, 80, 49), 'raiden.tests.integration.api.utils.wait_for_listening_port', 'wait_for_listening_port', ({(80, 28, 80, 48): 'rest_api_port_number'}, {}), '(rest_api_port_number)', False, 'from raiden.tests.integration.api.utils import wait_for_listening_port\n'), ((92, 20, 94, 5), 'raiden.network.transport.MatrixTransport', 'MatrixTransport', (), '', False, 'from raiden.network.transport import MatrixTransport\n'), ((95, 27, 95, 47), 'raiden.raiden_event_handler.RaidenEventHandler', 'RaidenEventHandler', ({}, {}), '()', False, 'from raiden.raiden_event_handler import RaidenEventHandler\n'), ((96, 19, 96, 63), 'raiden.tests.utils.protocol.HoldRaidenEventHandler', 'HoldRaidenEventHandler', ({(96, 42, 96, 62): 'raiden_event_handler'}, {}), '(raiden_event_handler)', False, 'from raiden.tests.utils.protocol import HoldRaidenEventHandler\n'), ((171, 14, 171, 44), 'grequests.post', 'grequests.post', (), '', False, 'import grequests\n'), ((173, 12, 173, 28), 'time.monotonic', 'time.monotonic', ({}, {}), '()', False, 'import time\n'), ((372, 27, 372, 41), 'itertools.count', 'count', (), '', False, 'from itertools import count\n'), ((65, 15, 65, 60), 'flask.url_for', 'url_for', ({(65, 23, 65, 49): 'f"""v1_resources.{endpoint}"""'}, {}), "(f'v1_resources.{endpoint}', **kwargs)", False, 'from flask import url_for\n'), ((125, 20, 125, 64), 'gevent.spawn', 'gevent.spawn', ({(125, 33, 125, 44): 'restart_app', (125, 46, 125, 49): 'app', (125, 51, 125, 63): 'restart_node'}, {}), '(restart_app, app, restart_node)', False, 'import gevent\n'), ((175, 15, 175, 31), 'time.monotonic', 'time.monotonic', ({}, {}), '()', False, 'import time\n'), ((309, 8, 316, 9), 'gevent.spawn', 'gevent.spawn', (), '', False, 'import gevent\n'), ((321, 8, 328, 9), 'gevent.spawn', 'gevent.spawn', (), '', False, 'import gevent\n'), ((343, 8, 352, 9), 'raiden.tests.utils.transfer.wait_assert', 'wait_assert', ({(344, 12, 344, 39): 'assert_synced_channel_state', (345, 12, 345, 33): 'token_network_address', (346, 12, 346, 17): 'first', (347, 12, 347, 19): 'deposit', (348, 12, 348, 14): '[]', (349, 12, 349, 18): 'second', (350, 12, 350, 19): 'deposit', (351, 12, 351, 14): '[]'}, {}), '(assert_synced_channel_state, token_network_address, first,\n deposit, [], second, deposit, [])', False, 'from raiden.tests.utils.transfer import assert_synced_channel_state, wait_assert, watch_for_unlock_failures\n'), ((375, 8, 375, 50), 'raiden.transfer.views.state_from_raiden', 'views.state_from_raiden', ({(375, 32, 375, 49): 'raiden_network[0]'}, {}), '(raiden_network[0])', False, 'from raiden.transfer import views\n'), ((62, 26, 62, 51), 'eth_utils.to_canonical_address', 'to_canonical_address', ({(62, 47, 62, 50): 'val'}, {}), '(val)', False, 'from eth_utils import to_canonical_address\n'), ((102, 26, 102, 40), 'raiden.utils.typing.BlockNumber', 'BlockNumber', ({(102, 38, 102, 39): '0'}, {}), '(0)', False, 'from raiden.utils.typing import Address, BlockNumber, Host, Iterator, List, Port, TokenAddress, TokenAmount, TokenNetworkAddress, Tuple\n'), ((103, 22, 106, 9), 'raiden.ui.startup.RaidenBundle', 'RaidenBundle', ({(104, 12, 104, 32): 'app.default_registry', (105, 12, 105, 39): 'app.default_secret_registry'}, {}), '(app.default_registry, app.default_secret_registry)', False, 'from raiden.ui.startup import RaidenBundle\n'), ((110, 24, 110, 40), 'raiden.message_handler.MessageHandler', 'MessageHandler', ({}, {}), '()', False, 'from raiden.message_handler import MessageHandler\n'), ((164, 22, 164, 56), 'raiden.utils.formatting.to_checksum_address', 'to_checksum_address', ({(164, 42, 164, 55): 'token_address'}, {}), '(token_address)', False, 'from raiden.utils.formatting import to_checksum_address\n'), ((256, 12, 263, 13), 'gevent.spawn', 'gevent.spawn', (), '', False, 'import gevent\n'), ((271, 12, 278, 13), 'gevent.spawn', 'gevent.spawn', (), '', False, 'import gevent\n'), ((286, 12, 293, 13), 'gevent.spawn', 'gevent.spawn', (), '', False, 'import gevent\n'), ((384, 13, 384, 55), 'raiden.tests.utils.transfer.watch_for_unlock_failures', 'watch_for_unlock_failures', ({(384, 39, 384, 54): '*raiden_network'}, {}), '(*raiden_network)', False, 'from raiden.tests.utils.transfer import assert_synced_channel_state, wait_assert, watch_for_unlock_failures\n'), ((393, 13, 393, 55), 'raiden.tests.utils.transfer.watch_for_unlock_failures', 'watch_for_unlock_failures', ({(393, 39, 393, 54): '*raiden_network'}, {}), '(*raiden_network)', False, 'from raiden.tests.utils.transfer import assert_synced_channel_state, wait_assert, watch_for_unlock_failures\n'), ((402, 13, 402, 55), 'raiden.tests.utils.transfer.watch_for_unlock_failures', 'watch_for_unlock_failures', ({(402, 39, 402, 54): '*raiden_network'}, {}), '(*raiden_network)', False, 'from raiden.tests.utils.transfer import assert_synced_channel_state, wait_assert, watch_for_unlock_failures\n'), ((198, 19, 198, 33), 'raiden.utils.typing.TokenAmount', 'TokenAmount', ({(198, 31, 198, 32): '(1)'}, {}), '(1)', False, 'from raiden.utils.typing import Address, BlockNumber, Host, Iterator, List, Port, TokenAddress, TokenAmount, TokenNetworkAddress, Tuple\n'), ((72, 44, 72, 61), 'raiden.utils.typing.Host', 'Host', ({(72, 49, 72, 60): '"""localhost"""'}, {}), "('localhost')", False, 'from raiden.utils.typing import Address, BlockNumber, Host, Iterator, List, Port, TokenAddress, TokenAmount, TokenNetworkAddress, Tuple\n')] |
jackie930/PyABSA | pyabsa/utils/preprocess.py | 3cf733f8b95610a69c985b4650309c24f42b44b5 | # -*- coding: utf-8 -*-
# file: preprocess.py
# author: jackie
# Copyright (C) 2021. All Rights Reserved.
import os
import pandas as pd
import argparse
import emoji
import re
from sklearn.model_selection import train_test_split
parser = argparse.ArgumentParser()
parser.add_argument("--inpath", type=str, required=True, default='./raw_data/data1.csv')
parser.add_argument("--folder_name", type=str, required=False, default='./custom')
parser.add_argument("--task", type=str, required=False, default='aptepc')
args = parser.parse_args()
def convert(text, labels):
# convert label to list
try:
labels = eval(labels)
tags = ['O'] * len(text)
sentiment = ['-999'] * len(text)
for j in range(len(labels)):
label = labels[j]
sentiment_key = labels[j][3]
if sentiment_key == '正':
sentiment_value = 'Positive'
elif sentiment_key == '负':
sentiment_value = 'Negative'
else:
sentiment_value = 'Others'
tags[label[4][0]] = 'B-ASP'
sentiment[label[4][0]] = sentiment_value
k = label[4][0] + 1
while k < label[4][1]:
tags[k] = 'I-ASP'
sentiment[k] = sentiment_value
k += 1
return text, tags, sentiment
except:
print ("labels", labels)
print ("text", text)
def convert_tag(text, labels):
# convert label to list
try:
labels = eval(labels)
tags = ['O'] * len(text)
sentiment = ['-999'] * len(text)
for j in range(len(labels)):
label = labels[j]
sentiment_key = labels[j][3]
if sentiment_key == '正':
sentiment_value = 'Positive'
elif sentiment_key == '负':
sentiment_value = 'Negative'
else:
sentiment_value = 'Others'
tags[label[4][0]] = 'B-'+label[1]
sentiment[label[4][0]] = sentiment_value
k = label[4][0] + 1
while k < label[4][1]:
tags[k] = 'I-'+label[1]
sentiment[k] = sentiment_value
k += 1
return text, tags, sentiment
except:
print ("labels", labels)
print ("text", text)
def convert_sentiment(sentiment_key):
if sentiment_key == '正':
sentiment_value = 'Positive'
else:
sentiment_value = 'Negative'
return sentiment_value
def convert_apc(text, label):
label_update = [(i[0], i[3], i[4]) for i in eval(label)]
label_update = list(set(label_update))
str1_list = []
str2_list = []
str3_list = []
for j in range(len(label_update)):
str1 = text[:label_update[j][2][0]] + '$T$ ' + text[label_update[j][2][1]:]
str1_list.append(str1)
str2_list.append(label_update[j][0])
str3_list.append(convert_sentiment(label_update[j][1]))
return str1_list, str2_list, str3_list
def filter_emoji(desstr, restr=''):
# 过滤表情
try:
co = re.compile(u'[\U00010000-\U0010ffff]')
except re.error:
co = re.compile(u'[\uD800-\uDBFF][\uDC00-\uDFFF]')
return co.sub(restr, desstr)
def convert_to_atepc(inpath, dist_fname, flag):
# 写之前,先检验文件是否存在,存在就删掉
if os.path.exists(dist_fname):
os.remove(dist_fname)
f1 = open(dist_fname, 'w', encoding='utf8')
data = pd.read_csv(inpath)
data.columns = ['text', 'tag_sentiment_list']
# preprocess for emoji
data['text'] = data['text'].map(lambda x: filter_emoji(x, restr='xx'))
# 只保留review的长度小于600的
data = data[data['text'].str.len() <= 600]
# train test split
x_train, x_test = train_test_split(data, test_size=0.2, random_state=42)
if flag == 'train':
data_res = x_train.iloc[:, :].reset_index()
else:
data_res = x_test.iloc[:, :].reset_index()
# print (data_res.head())
for i in range(len(data_res)):
text, label = data_res['text'][i], data_res['tag_sentiment_list'][i]
text, tags, sentiment = convert(text, label)
for word, tag, sen in zip(text, tags, sentiment):
if word not in [',', '。', ' ', '\xa0', '\u2006', '\u3000', '\u2002', '\u2003', '\u2005', '\x0c', '\u2028',
'\u2009', '\u200a']:
f1.write(word + ' ' + tag + ' ' + sen + '\n')
else:
f1.write("\n")
f1.write("\n")
f1.close()
print ("process atepc finished!")
def convert_to_atepc_tag(inpath, dist_fname, flag):
# 写之前,先检验文件是否存在,存在就删掉
if os.path.exists(dist_fname):
os.remove(dist_fname)
f1 = open(dist_fname, 'w', encoding='utf8')
data = pd.read_csv(inpath)
data.columns = ['text', 'tag_sentiment_list']
# preprocess for emoji
data['text'] = data['text'].map(lambda x: filter_emoji(x, restr='xx'))
# drop id list not able to process
# print (data.iloc[8832,:])
# data = data.drop([8832])
# 只保留review的长度小于600的
data = data[data['text'].str.len() <= 600]
# train test split
x_train, x_test = train_test_split(data, test_size=0.2, random_state=42)
if flag == 'train':
data_res = x_train.iloc[:, :].reset_index()
else:
data_res = x_test.iloc[:, :].reset_index()
# print (data_res.head())
for i in range(len(data_res)):
text, label = data_res['text'][i], data_res['tag_sentiment_list'][i]
text, tags, sentiment = convert(text, label)
for word, tag, sen in zip(text, tags, sentiment):
if word not in [',', '。', ' ', '\xa0', '\u2006', '\u3000', '\u2002', '\u2003', '\u2005', '\x0c', '\u2028',
'\u2009', '\u200a']:
f1.write(word + ' ' + tag + ' ' + sen + '\n')
else:
f1.write("\n")
f1.write("\n")
f1.close()
print ("process atepc finished!")
def convert_to_apc(inpath, dist_fname, flag):
# 写之前,先检验文件是否存在,存在就删掉
if os.path.exists(dist_fname):
os.remove(dist_fname)
f1 = open(dist_fname, 'w', encoding='utf8')
data = pd.read_csv(inpath)
# train test split
x_train, x_test = train_test_split(data, test_size=0.2, random_state=42)
if flag == 'train':
data_res = x_train.iloc[:, :].reset_index()
else:
data_res = x_test.iloc[:, :].reset_index()
# print (data_res.head())
for i in range(len(data_res)):
text, label = data_res['text'][i], data_res['tag_sentiment_list'][i]
str1_list, str2_list, str3_list = convert_apc(text, label)
for x1, x2, x3 in zip(str1_list, str2_list, str3_list):
f1.write(x1 + '\n')
f1.write(x2 + '\n')
f1.write(x3 + '\n')
f1.close()
print ("process apc finished!")
def main(inpath, folder_name, task):
if not os.path.exists(folder_name):
os.makedirs(folder_name)
if task == 'aptepc':
# get folder name
print ("start process for an aptepc task")
folder_name_prefix = folder_name.split('/')[-1]
dist_train_fname = os.path.join(folder_name_prefix, folder_name_prefix + '.train.txt.atepc')
dist_test_fname = os.path.join(folder_name_prefix, folder_name_prefix + '.test.txt.atepc')
# process train
convert_to_atepc(inpath, dist_train_fname, 'train')
print ("<<< finish training data preprocess")
# process test
convert_to_atepc(inpath, dist_test_fname, 'test')
print ("<<< finish test data preprocess")
elif task == 'apc':
# get folder name
folder_name_prefix = folder_name.split('/')[-1]
dist_train_fname = os.path.join(folder_name_prefix, folder_name_prefix + '.train.txt')
dist_test_fname = os.path.join(folder_name_prefix, folder_name_prefix + '.test.txt')
# process train
convert_to_apc(inpath, dist_train_fname, 'train')
print ("<<< finish training data preprocess")
# process test
convert_to_apc(inpath, dist_test_fname, 'test')
print ("<<< finish test data preprocess")
elif task == 'aptepc-tag':
# get folder name
print ("start process for an aptepc tag task")
folder_name_prefix = folder_name.split('/')[-1]
dist_train_fname = os.path.join(folder_name_prefix, folder_name_prefix + '.train.txt.atepc')
dist_test_fname = os.path.join(folder_name_prefix, folder_name_prefix + '.test.txt.atepc')
# process train
convert_to_atepc_tag(inpath, dist_train_fname, 'train')
print ("<<< finish training data preprocess")
# process test
convert_to_atepc_tag(inpath, dist_test_fname, 'test')
print ("<<< finish test data preprocess")
main(args.inpath, args.folder_name, args.task) | [((13, 9, 13, 34), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ({}, {}), '()', False, 'import argparse\n'), ((120, 7, 120, 33), 'os.path.exists', 'os.path.exists', ({(120, 22, 120, 32): 'dist_fname'}, {}), '(dist_fname)', False, 'import os\n'), ((124, 11, 124, 30), 'pandas.read_csv', 'pd.read_csv', ({(124, 23, 124, 29): 'inpath'}, {}), '(inpath)', True, 'import pandas as pd\n'), ((134, 22, 134, 76), 'sklearn.model_selection.train_test_split', 'train_test_split', (), '', False, 'from sklearn.model_selection import train_test_split\n'), ((158, 7, 158, 33), 'os.path.exists', 'os.path.exists', ({(158, 22, 158, 32): 'dist_fname'}, {}), '(dist_fname)', False, 'import os\n'), ((162, 11, 162, 30), 'pandas.read_csv', 'pd.read_csv', ({(162, 23, 162, 29): 'inpath'}, {}), '(inpath)', True, 'import pandas as pd\n'), ((176, 22, 176, 76), 'sklearn.model_selection.train_test_split', 'train_test_split', (), '', False, 'from sklearn.model_selection import train_test_split\n'), ((200, 7, 200, 33), 'os.path.exists', 'os.path.exists', ({(200, 22, 200, 32): 'dist_fname'}, {}), '(dist_fname)', False, 'import os\n'), ((204, 11, 204, 30), 'pandas.read_csv', 'pd.read_csv', ({(204, 23, 204, 29): 'inpath'}, {}), '(inpath)', True, 'import pandas as pd\n'), ((206, 22, 206, 76), 'sklearn.model_selection.train_test_split', 'train_test_split', (), '', False, 'from sklearn.model_selection import train_test_split\n'), ((112, 13, 112, 51), 're.compile', 're.compile', ({(112, 24, 112, 50): 'u"""[𐀀-\U0010ffff]"""'}, {}), "(u'[𐀀-\\U0010ffff]')", False, 'import re\n'), ((121, 8, 121, 29), 'os.remove', 'os.remove', ({(121, 18, 121, 28): 'dist_fname'}, {}), '(dist_fname)', False, 'import os\n'), ((159, 8, 159, 29), 'os.remove', 'os.remove', ({(159, 18, 159, 28): 'dist_fname'}, {}), '(dist_fname)', False, 'import os\n'), ((201, 8, 201, 29), 'os.remove', 'os.remove', ({(201, 18, 201, 28): 'dist_fname'}, {}), '(dist_fname)', False, 'import os\n'), ((228, 11, 228, 38), 'os.path.exists', 'os.path.exists', ({(228, 26, 228, 37): 'folder_name'}, {}), '(folder_name)', False, 'import os\n'), ((229, 8, 229, 32), 'os.makedirs', 'os.makedirs', ({(229, 20, 229, 31): 'folder_name'}, {}), '(folder_name)', False, 'import os\n'), ((235, 27, 235, 100), 'os.path.join', 'os.path.join', ({(235, 40, 235, 58): 'folder_name_prefix', (235, 60, 235, 99): "folder_name_prefix + '.train.txt.atepc'"}, {}), "(folder_name_prefix, folder_name_prefix + '.train.txt.atepc')", False, 'import os\n'), ((236, 26, 236, 98), 'os.path.join', 'os.path.join', ({(236, 39, 236, 57): 'folder_name_prefix', (236, 59, 236, 97): "folder_name_prefix + '.test.txt.atepc'"}, {}), "(folder_name_prefix, folder_name_prefix + '.test.txt.atepc')", False, 'import os\n'), ((114, 13, 114, 58), 're.compile', 're.compile', ({(114, 24, 114, 57): "u'[\\ud800-\\udbff][\\udc00-\\udfff]'"}, {}), "(u'[\\ud800-\\udbff][\\udc00-\\udfff]')", False, 'import re\n'), ((246, 27, 246, 94), 'os.path.join', 'os.path.join', ({(246, 40, 246, 58): 'folder_name_prefix', (246, 60, 246, 93): "folder_name_prefix + '.train.txt'"}, {}), "(folder_name_prefix, folder_name_prefix + '.train.txt')", False, 'import os\n'), ((247, 26, 247, 92), 'os.path.join', 'os.path.join', ({(247, 39, 247, 57): 'folder_name_prefix', (247, 59, 247, 91): "folder_name_prefix + '.test.txt'"}, {}), "(folder_name_prefix, folder_name_prefix + '.test.txt')", False, 'import os\n'), ((258, 27, 258, 100), 'os.path.join', 'os.path.join', ({(258, 40, 258, 58): 'folder_name_prefix', (258, 60, 258, 99): "folder_name_prefix + '.train.txt.atepc'"}, {}), "(folder_name_prefix, folder_name_prefix + '.train.txt.atepc')", False, 'import os\n'), ((259, 26, 259, 98), 'os.path.join', 'os.path.join', ({(259, 39, 259, 57): 'folder_name_prefix', (259, 59, 259, 97): "folder_name_prefix + '.test.txt.atepc'"}, {}), "(folder_name_prefix, folder_name_prefix + '.test.txt.atepc')", False, 'import os\n')] |
dparito/10Apps-Python_w-Andy | apps/06_lolcat_factory/you_try/PRD/cat_service.py | 77ca1ec280729a9002e49071e2f31cb5bc7b75cd | import os
import shutil
import requests
def get_cat(folder, name):
url = "http://consuming-python-services-api.azurewebsites.net/cats/random"
data = get_data_from_url(url)
save_image(folder, name, data)
def get_data_from_url(url):
response = requests.get(url, stream=True)
return response.raw
def save_image(folder, name, data):
file_name = os.path.join(folder, name + '.jpg')
with open(file_name, 'wb') as fout:
shutil.copyfileobj(data, fout)
| [((14, 15, 14, 45), 'requests.get', 'requests.get', (), '', False, 'import requests\n'), ((19, 16, 19, 51), 'os.path.join', 'os.path.join', ({(19, 29, 19, 35): 'folder', (19, 37, 19, 50): "name + '.jpg'"}, {}), "(folder, name + '.jpg')", False, 'import os\n'), ((21, 8, 21, 38), 'shutil.copyfileobj', 'shutil.copyfileobj', ({(21, 27, 21, 31): 'data', (21, 33, 21, 37): 'fout'}, {}), '(data, fout)', False, 'import shutil\n')] |
TryTestspace/dask | dask/dataframe/io/hdf.py | 86d4f7d8c6d48ec6c4b1de1b6cfd2d3f4e5a4c1b | from __future__ import absolute_import, division, print_function
from fnmatch import fnmatch
from glob import glob
import os
import uuid
from warnings import warn
import pandas as pd
from toolz import merge
from .io import _link
from ...base import get_scheduler
from ..core import DataFrame, new_dd_object
from ... import config, multiprocessing
from ...base import tokenize, compute_as_if_collection
from ...bytes.utils import build_name_function
from ...compatibility import PY3
from ...delayed import Delayed, delayed
from ...utils import get_scheduler_lock
def _pd_to_hdf(pd_to_hdf, lock, args, kwargs=None):
""" A wrapper function around pd_to_hdf that enables locking"""
if lock:
lock.acquire()
try:
pd_to_hdf(*args, **kwargs)
finally:
if lock:
lock.release()
return None
def to_hdf(df, path, key, mode='a', append=False, get=None, scheduler=None,
name_function=None, compute=True, lock=None, dask_kwargs={},
**kwargs):
""" Store Dask Dataframe to Hierarchical Data Format (HDF) files
This is a parallel version of the Pandas function of the same name. Please
see the Pandas docstring for more detailed information about shared keyword
arguments.
This function differs from the Pandas version by saving the many partitions
of a Dask DataFrame in parallel, either to many files, or to many datasets
within the same file. You may specify this parallelism with an asterix
``*`` within the filename or datapath, and an optional ``name_function``.
The asterix will be replaced with an increasing sequence of integers
starting from ``0`` or with the result of calling ``name_function`` on each
of those integers.
This function only supports the Pandas ``'table'`` format, not the more
specialized ``'fixed'`` format.
Parameters
----------
path: string
Path to a target filename. May contain a ``*`` to denote many filenames
key: string
Datapath within the files. May contain a ``*`` to denote many locations
name_function: function
A function to convert the ``*`` in the above options to a string.
Should take in a number from 0 to the number of partitions and return a
string. (see examples below)
compute: bool
Whether or not to execute immediately. If False then this returns a
``dask.Delayed`` value.
lock: Lock, optional
Lock to use to prevent concurrency issues. By default a
``threading.Lock``, ``multiprocessing.Lock`` or ``SerializableLock``
will be used depending on your scheduler if a lock is required. See
dask.utils.get_scheduler_lock for more information about lock
selection.
**other:
See pandas.to_hdf for more information
Examples
--------
Save Data to a single file
>>> df.to_hdf('output.hdf', '/data') # doctest: +SKIP
Save data to multiple datapaths within the same file:
>>> df.to_hdf('output.hdf', '/data-*') # doctest: +SKIP
Save data to multiple files:
>>> df.to_hdf('output-*.hdf', '/data') # doctest: +SKIP
Save data to multiple files, using the multiprocessing scheduler:
>>> df.to_hdf('output-*.hdf', '/data', scheduler='processes') # doctest: +SKIP
Specify custom naming scheme. This writes files as
'2000-01-01.hdf', '2000-01-02.hdf', '2000-01-03.hdf', etc..
>>> from datetime import date, timedelta
>>> base = date(year=2000, month=1, day=1)
>>> def name_function(i):
... ''' Convert integer 0 to n to a string '''
... return base + timedelta(days=i)
>>> df.to_hdf('*.hdf', '/data', name_function=name_function) # doctest: +SKIP
Returns
-------
None: if compute == True
delayed value: if compute == False
See Also
--------
read_hdf:
to_parquet:
"""
name = 'to-hdf-' + uuid.uuid1().hex
pd_to_hdf = getattr(df._partition_type, 'to_hdf')
single_file = True
single_node = True
# if path is string, format using i_name
if isinstance(path, str):
if path.count('*') + key.count('*') > 1:
raise ValueError("A maximum of one asterisk is accepted in file "
"path and dataset key")
fmt_obj = lambda path, i_name: path.replace('*', i_name)
if '*' in path:
single_file = False
else:
if key.count('*') > 1:
raise ValueError("A maximum of one asterisk is accepted in "
"dataset key")
fmt_obj = lambda path, _: path
if '*' in key:
single_node = False
if 'format' in kwargs and kwargs['format'] not in ['t', 'table']:
raise ValueError("Dask only support 'table' format in hdf files.")
if mode not in ('a', 'w', 'r+'):
raise ValueError("Mode must be one of 'a', 'w' or 'r+'")
if name_function is None:
name_function = build_name_function(df.npartitions - 1)
# we guarantee partition order is preserved when its saved and read
# so we enforce name_function to maintain the order of its input.
if not (single_file and single_node):
formatted_names = [name_function(i) for i in range(df.npartitions)]
if formatted_names != sorted(formatted_names):
warn("To preserve order between partitions name_function "
"must preserve the order of its input")
# If user did not specify scheduler and write is sequential default to the
# sequential scheduler. otherwise let the _get method choose the scheduler
if (get is None and
not config.get('get', None) and
scheduler is None and
not config.get('scheduler', None) and
single_node and single_file):
scheduler = 'single-threaded'
# handle lock default based on whether we're writing to a single entity
_actual_get = get_scheduler(get=get, collections=[df], scheduler=scheduler)
if lock is None:
if not single_node:
lock = True
elif not single_file and _actual_get is not multiprocessing.get:
# if we're writing to multiple files with the multiprocessing
# scheduler we don't need to lock
lock = True
else:
lock = False
if lock:
lock = get_scheduler_lock(get, df, scheduler=scheduler)
kwargs.update({'format': 'table', 'mode': mode, 'append': append})
dsk = dict()
i_name = name_function(0)
dsk[(name, 0)] = (_pd_to_hdf, pd_to_hdf, lock,
[(df._name, 0), fmt_obj(path, i_name),
key.replace('*', i_name)], kwargs)
kwargs2 = kwargs.copy()
if single_file:
kwargs2['mode'] = 'a'
if single_node:
kwargs2['append'] = True
filenames = []
for i in range(0,df.npartitions):
i_name = name_function(i)
filenames.append(fmt_obj(path, i_name))
for i in range(1, df.npartitions):
i_name = name_function(i)
task = (_pd_to_hdf, pd_to_hdf, lock,
[(df._name, i), fmt_obj(path, i_name),
key.replace('*', i_name)], kwargs2)
if single_file:
link_dep = i - 1 if single_node else 0
task = (_link, (name, link_dep), task)
dsk[(name, i)] = task
dsk = merge(df.dask, dsk)
if single_file and single_node:
keys = [(name, df.npartitions - 1)]
else:
keys = [(name, i) for i in range(df.npartitions)]
if compute:
compute_as_if_collection(DataFrame, dsk, keys, get=get,
scheduler=scheduler, **dask_kwargs)
return filenames
else:
return delayed([Delayed(k, dsk) for k in keys])
dont_use_fixed_error_message = """
This HDFStore is not partitionable and can only be use monolithically with
pandas. In the future when creating HDFStores use the ``format='table'``
option to ensure that your dataset can be parallelized"""
read_hdf_error_msg = """
The start and stop keywords are not supported when reading from more than
one file/dataset.
The combination is ambiguous because it could be interpreted as the starting
and stopping index per file, or starting and stopping index of the global
dataset."""
def _read_single_hdf(path, key, start=0, stop=None, columns=None,
chunksize=int(1e6), sorted_index=False, lock=None,
mode='a'):
"""
Read a single hdf file into a dask.dataframe. Used for each file in
read_hdf.
"""
def get_keys_stops_divisions(path, key, stop, sorted_index, chunksize):
"""
Get the "keys" or group identifiers which match the given key, which
can contain wildcards. This uses the hdf file identified by the
given path. Also get the index of the last row of data for each matched
key.
"""
with pd.HDFStore(path, mode=mode) as hdf:
keys = [k for k in hdf.keys() if fnmatch(k, key)]
stops = []
divisions = []
for k in keys:
storer = hdf.get_storer(k)
if storer.format_type != 'table':
raise TypeError(dont_use_fixed_error_message)
if stop is None:
stops.append(storer.nrows)
elif stop > storer.nrows:
raise ValueError("Stop keyword exceeds dataset number "
"of rows ({})".format(storer.nrows))
else:
stops.append(stop)
if sorted_index:
division = [storer.read_column('index', start=start, stop=start + 1)[0]
for start in range(0, storer.nrows, chunksize)]
division_end = storer.read_column('index',
start=storer.nrows - 1,
stop=storer.nrows)[0]
division.append(division_end)
divisions.append(division)
else:
divisions.append(None)
return keys, stops, divisions
def one_path_one_key(path, key, start, stop, columns, chunksize, division, lock):
"""
Get the data frame corresponding to one path and one key (which should
not contain any wildcards).
"""
empty = pd.read_hdf(path, key, mode=mode, stop=0)
if columns is not None:
empty = empty[columns]
token = tokenize((path, os.path.getmtime(path), key, start,
stop, empty, chunksize, division))
name = 'read-hdf-' + token
if empty.ndim == 1:
base = {'name': empty.name, 'mode': mode}
else:
base = {'columns': empty.columns, 'mode': mode}
if start >= stop:
raise ValueError("Start row number ({}) is above or equal to stop "
"row number ({})".format(start, stop))
def update(s):
new = base.copy()
new.update({'start': s, 'stop': s + chunksize})
return new
dsk = dict(((name, i), (_pd_read_hdf, path, key, lock,
update(s)))
for i, s in enumerate(range(start, stop, chunksize)))
if division:
divisions = division
else:
divisions = [None] * (len(dsk) + 1)
return new_dd_object(dsk, name, empty, divisions)
keys, stops, divisions = get_keys_stops_divisions(path, key, stop, sorted_index, chunksize)
if (start != 0 or stop is not None) and len(keys) > 1:
raise NotImplementedError(read_hdf_error_msg)
from ..multi import concat
return concat([one_path_one_key(path, k, start, s, columns, chunksize, d, lock)
for k, s, d in zip(keys, stops, divisions)])
def _pd_read_hdf(path, key, lock, kwargs):
""" Read from hdf5 file with a lock """
if lock:
lock.acquire()
try:
result = pd.read_hdf(path, key, **kwargs)
finally:
if lock:
lock.release()
return result
def read_hdf(pattern, key, start=0, stop=None, columns=None,
chunksize=1000000, sorted_index=False, lock=True, mode='a'):
"""
Read HDF files into a Dask DataFrame
Read hdf files into a dask dataframe. This function is like
``pandas.read_hdf``, except it can read from a single large file, or from
multiple files, or from multiple keys from the same file.
Parameters
----------
pattern : string, list
File pattern (string), buffer to read from, or list of file
paths. Can contain wildcards.
key : group identifier in the store. Can contain wildcards
start : optional, integer (defaults to 0), row number to start at
stop : optional, integer (defaults to None, the last row), row number to
stop at
columns : list of columns, optional
A list of columns that if not None, will limit the return
columns (default is None)
chunksize : positive integer, optional
Maximal number of rows per partition (default is 1000000).
sorted_index : boolean, optional
Option to specify whether or not the input hdf files have a sorted
index (default is False).
lock : boolean, optional
Option to use a lock to prevent concurrency issues (default is True).
mode : {'a', 'r', 'r+'}, default 'a'. Mode to use when opening file(s).
'r'
Read-only; no data can be modified.
'a'
Append; an existing file is opened for reading and writing,
and if the file does not exist it is created.
'r+'
It is similar to 'a', but the file must already exist.
Returns
-------
dask.DataFrame
Examples
--------
Load single file
>>> dd.read_hdf('myfile.1.hdf5', '/x') # doctest: +SKIP
Load multiple files
>>> dd.read_hdf('myfile.*.hdf5', '/x') # doctest: +SKIP
>>> dd.read_hdf(['myfile.1.hdf5', 'myfile.2.hdf5'], '/x') # doctest: +SKIP
Load multiple datasets
>>> dd.read_hdf('myfile.1.hdf5', '/*') # doctest: +SKIP
"""
if lock is True:
lock = get_scheduler_lock()
key = key if key.startswith('/') else '/' + key
if isinstance(pattern, str):
paths = sorted(glob(pattern))
else:
paths = pattern
if (start != 0 or stop is not None) and len(paths) > 1:
raise NotImplementedError(read_hdf_error_msg)
if chunksize <= 0:
raise ValueError("Chunksize must be a positive integer")
if (start != 0 or stop is not None) and sorted_index:
raise ValueError("When assuming pre-partitioned data, data must be "
"read in its entirety using the same chunksizes")
from ..multi import concat
return concat([_read_single_hdf(path, key, start=start, stop=stop,
columns=columns, chunksize=chunksize,
sorted_index=sorted_index,
lock=lock, mode=mode)
for path in paths])
if PY3:
from ..core import _Frame
_Frame.to_hdf.__doc__ = to_hdf.__doc__
| [((215, 10, 215, 29), 'toolz.merge', 'merge', ({(215, 16, 215, 23): 'df.dask', (215, 25, 215, 28): 'dsk'}, {}), '(df.dask, dsk)', False, 'from toolz import merge\n'), ((291, 16, 291, 57), 'pandas.read_hdf', 'pd.read_hdf', (), '', True, 'import pandas as pd\n'), ((336, 17, 336, 49), 'pandas.read_hdf', 'pd.read_hdf', ({(336, 29, 336, 33): 'path', (336, 35, 336, 38): 'key'}, {}), '(path, key, **kwargs)', True, 'import pandas as pd\n'), ((118, 23, 118, 35), 'uuid.uuid1', 'uuid.uuid1', ({}, {}), '()', False, 'import uuid\n'), ((159, 12, 160, 56), 'warnings.warn', 'warn', ({(159, 17, 160, 55): '"""To preserve order between partitions name_function must preserve the order of its input"""'}, {}), "(\n 'To preserve order between partitions name_function must preserve the order of its input'\n )", False, 'from warnings import warn\n'), ((257, 13, 257, 41), 'pandas.HDFStore', 'pd.HDFStore', (), '', True, 'import pandas as pd\n'), ((405, 23, 405, 36), 'glob.glob', 'glob', ({(405, 28, 405, 35): 'pattern'}, {}), '(pattern)', False, 'from glob import glob\n'), ((295, 32, 295, 54), 'os.path.getmtime', 'os.path.getmtime', ({(295, 49, 295, 53): 'path'}, {}), '(path)', False, 'import os\n'), ((258, 45, 258, 60), 'fnmatch.fnmatch', 'fnmatch', ({(258, 53, 258, 54): 'k', (258, 56, 258, 59): 'key'}, {}), '(k, key)', False, 'from fnmatch import fnmatch\n'), ((279, 20, 279, 49), '__future__.division.append', 'division.append', ({(279, 36, 279, 48): 'division_end'}, {}), '(division_end)', False, 'from __future__ import absolute_import, division, print_function\n')] |
mononobi/charma-server | src/charma/media_info/manager.py | ed90f5ec0b5ff3996232d5fe49a4f77f96d82ced | # -*- coding: utf-8 -*-
"""
media info manager module.
"""
from pyrin.core.mixin import HookMixin
from pyrin.core.structs import Manager
import pyrin.utils.path as path_utils
from charma.media_info import MediaInfoPackage
from charma.media_info.interface import AbstractMediaInfoProvider
from charma.media_info.exceptions import InvalidMediaInfoProviderTypeError
class MediaInfoManager(Manager, HookMixin):
"""
media info manager class.
"""
package_class = MediaInfoPackage
hook_type = AbstractMediaInfoProvider
invalid_hook_type_error = InvalidMediaInfoProviderTypeError
REQUIRED_INFO = ('runtime', 'width', 'height')
def _is_complete(self, info):
"""
gets a value indicating that given media info is complete.
:param dict info: media info to be checked.
:rtype: bool
"""
for item in self.REQUIRED_INFO:
result = info.get(item)
if result is None or result <= 0:
return False
return True
def register_provider(self, instance):
"""
registers the given instance into media info providers.
:param AbstractMediaInfoProvider instance: media info provider instance
to be registered.
:raises InvalidMediaInfoProviderTypeError: invalid media info provider type error.
"""
self.register_hook(instance)
def get_info(self, file, **options):
"""
gets a dict containing media info of given file.
:param str file: absolute path of video file.
:raises InvalidPathError: invalid path error.
:raises PathIsNotAbsoluteError: path is not absolute error.
:raises PathNotExistedError: path not existed error.
:raises IsNotFileError: is not directory error.
:returns: dict(int runtime,
int width,
int height)
:rtype: dict
"""
path_utils.assert_is_file(file)
result = dict()
for provider in self._get_hooks():
current_result = provider.get_info(file, **options)
result.update(current_result)
if self._is_complete(result) is True:
break
result.setdefault('runtime', 0)
result.setdefault('width', 0)
result.setdefault('height', 0)
return result
| [((72, 8, 72, 39), 'pyrin.utils.path.assert_is_file', 'path_utils.assert_is_file', ({(72, 34, 72, 38): 'file'}, {}), '(file)', True, 'import pyrin.utils.path as path_utils\n')] |
FlorisHoogenboom/BoxRec | tests/test_parsers.py | c9cc5d149318f916facdf57d7dbe94e797d81582 | import unittest
from boxrec.parsers import FightParser
class MockResponse(object):
def __init__(self, content, encoding, url):
self.content= content
self.encoding = encoding
self.url = url
class TestFightParser(unittest.TestCase):
def setUp(self):
with open('mock_data/fights/draw.html', 'rb') as file:
self.drawn_fight = file.read()
self.parser = FightParser()
def test_parses_draw(self):
"""Test it correctly handles draws"""
mock_response = MockResponse(
self.drawn_fight,
'UTF-8',
"http://boxrec.com/en/event/115689/202488"
)
result = self.parser.parse(mock_response)
self.assertEqual(result.winner, 'drawn', "Result should equal draw.")
class TestBoxerParser(unittest.TestCase):
pass
| [((17, 22, 17, 35), 'boxrec.parsers.FightParser', 'FightParser', ({}, {}), '()', False, 'from boxrec.parsers import FightParser\n')] |
ErikGartner/hyperdock | hyperdock/common/workqueue.py | 19510b4bf1e123576d7be067555d959cb8a7cf45 | from datetime import datetime, timedelta
from bson.objectid import ObjectId
WORK_TIMEOUT = 600
class WorkQueue:
"""
A simple MongoDB priority work queue that handles the queue
of experiment.
"""
def __init__(self, mongodb):
super().__init__()
self._mongodb = mongodb
self._collection = mongodb.workqueue
def assign_next_job(self, worker_id):
"""
Assigns the next free job to worker.
Returns the object from the mongodb.
"""
t = datetime.utcnow()
job = self._collection.find_and_modify(
query={"start_time": -1, "cancelled": False},
sort=[("priority", -1), ("created_on", 1)],
update={"$set": {"start_time": t, "last_update": t, "worker": worker_id}},
new=True,
)
return job
def add_job(self, parameters, data, trial_id, trial_name, priority=0):
"""
Adds new work to the workqueue.
"""
id = self._collection.insert(
{
"start_time": -1,
"end_time": -1,
"last_update": -1,
"created_on": datetime.utcnow(),
"priority": priority,
"parameters": parameters,
"data": data,
"worker": None,
"result": {},
"trial": trial_id,
"trial_name": trial_name,
"_id": str(ObjectId()),
"cancelled": False,
"orphaned": False,
}
)
return id
def update_job(self, _id, update=None):
"""
Marks the job as alive and post an update from the job.
"""
t = datetime.utcnow()
self._collection.update(
{"_id": _id}, {"$set": {"last_update": t, "update": update}}
)
def is_job_cancelled(self, _id):
"""
Checks if a certain job has been cancelled or all together removed.
"""
return self._collection.find_one({"_id": _id, "cancelled": False}) is None
def finish_job(self, _id, result):
"""
Marks the job as finished and attach the result.
"""
t = datetime.utcnow()
self._collection.update_one(
{"_id": _id}, {"$set": {"end_time": t, "last_update": t, "result": result}}
)
def purge_dead_jobs(self):
"""
Returns jobs that have timed out due to worker death and cancel them.
"""
now = datetime.utcnow()
deadline = now - timedelta(seconds=WORK_TIMEOUT)
jobs = []
while True:
job = self._collection.find_and_modify(
query={
"start_time": {"$ne": -1},
"end_time": -1,
"last_update": {"$lt": deadline},
},
sort=[("priority", -1), ("last_update", 1)],
update={
"$set": {
"cancelled": True,
"orphaned": True,
"end_time": now,
"result": {"state": "fail", "msg": "Timed out!"},
}
},
new=True,
)
if job is not None:
jobs.append(job)
else:
return jobs
def check_for_orphans(self, id_list):
"""
Checks if a list of Docker container ids are marked as orphans.
Returns a list of (Docker id, experiment id) tuples.
"""
jobs = self._collection.find(
{"orphaned": True, "update.container.long_id": {"$in": id_list}}
)
return [(j["update"]["container"]["long_id"], j["_id"]) for j in list(jobs)]
def not_orphaned(self, _id):
"""
Marks a job as not orphaned.
"""
job = self._collection.find_and_modify(
query={"_id": _id}, update={"$set": {"orphaned": False}}, new=True
)
return job is not None
def cancel_invalid_jobs(self, trial_list):
"""
Takes a list of all active (not finished, cancelled or removed) trial ids.
Work that is not associated with any of these are cancelled.
"""
now = datetime.utcnow()
jobs = []
while True:
job = self._collection.find_and_modify(
query={"trial": {"$nin": trial_list}, "end_time": -1},
update={
"$set": {
"cancelled": True,
"end_time": now,
"result": {"state": "fail", "msg": "Abandoned"},
}
},
new=True,
)
if job is not None:
jobs.append(job)
else:
return jobs
| [((25, 12, 25, 29), 'datetime.datetime.utcnow', 'datetime.utcnow', ({}, {}), '()', False, 'from datetime import datetime, timedelta\n'), ((62, 12, 62, 29), 'datetime.datetime.utcnow', 'datetime.utcnow', ({}, {}), '()', False, 'from datetime import datetime, timedelta\n'), ((77, 12, 77, 29), 'datetime.datetime.utcnow', 'datetime.utcnow', ({}, {}), '()', False, 'from datetime import datetime, timedelta\n'), ((86, 14, 86, 31), 'datetime.datetime.utcnow', 'datetime.utcnow', ({}, {}), '()', False, 'from datetime import datetime, timedelta\n'), ((137, 14, 137, 31), 'datetime.datetime.utcnow', 'datetime.utcnow', ({}, {}), '()', False, 'from datetime import datetime, timedelta\n'), ((87, 25, 87, 56), 'datetime.timedelta', 'timedelta', (), '', False, 'from datetime import datetime, timedelta\n'), ((43, 30, 43, 47), 'datetime.datetime.utcnow', 'datetime.utcnow', ({}, {}), '()', False, 'from datetime import datetime, timedelta\n'), ((51, 27, 51, 37), 'bson.objectid.ObjectId', 'ObjectId', ({}, {}), '()', False, 'from bson.objectid import ObjectId\n')] |
poojavade/Genomics_Docker | Dockerfiles/gedlab-khmer-filter-abund/pymodules/python2.7/lib/python/apache_libcloud-0.15.1-py2.7.egg/libcloud/test/test_connection.py | 829b5094bba18bbe03ae97daf925fee40a8476e8 | # -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one or more§
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import ssl
from mock import Mock, call
from libcloud.test import unittest
from libcloud.common.base import Connection
from libcloud.common.base import LoggingConnection
class ConnectionClassTestCase(unittest.TestCase):
def setUp(self):
self.originalConnect = Connection.connect
self.originalResponseCls = Connection.responseCls
Connection.connect = Mock()
Connection.responseCls = Mock()
Connection.allow_insecure = True
def tearDown(self):
Connection.connect = self.originalConnect
Connection.responseCls = Connection.responseCls
Connection.allow_insecure = True
def test_dont_allow_insecure(self):
Connection.allow_insecure = True
Connection(secure=False)
Connection.allow_insecure = False
expected_msg = (r'Non https connections are not allowed \(use '
'secure=True\)')
self.assertRaisesRegexp(ValueError, expected_msg, Connection,
secure=False)
def test_content_length(self):
con = Connection()
con.connection = Mock()
# GET method
# No data, no content length should be present
con.request('/test', method='GET', data=None)
call_kwargs = con.connection.request.call_args[1]
self.assertTrue('Content-Length' not in call_kwargs['headers'])
# '' as data, no content length should be present
con.request('/test', method='GET', data='')
call_kwargs = con.connection.request.call_args[1]
self.assertTrue('Content-Length' not in call_kwargs['headers'])
# 'a' as data, content length should be present (data in GET is not
# correct, but anyways)
con.request('/test', method='GET', data='a')
call_kwargs = con.connection.request.call_args[1]
self.assertEqual(call_kwargs['headers']['Content-Length'], '1')
# POST, PUT method
# No data, content length should be present
for method in ['POST', 'PUT', 'post', 'put']:
con.request('/test', method=method, data=None)
call_kwargs = con.connection.request.call_args[1]
self.assertEqual(call_kwargs['headers']['Content-Length'], '0')
# '' as data, content length should be present
for method in ['POST', 'PUT', 'post', 'put']:
con.request('/test', method=method, data='')
call_kwargs = con.connection.request.call_args[1]
self.assertEqual(call_kwargs['headers']['Content-Length'], '0')
# No data, raw request, do not touch Content-Length if present
for method in ['POST', 'PUT', 'post', 'put']:
con.request('/test', method=method, data=None,
headers={'Content-Length': '42'}, raw=True)
putheader_call_list = con.connection.putheader.call_args_list
self.assertIn(call('Content-Length', '42'), putheader_call_list)
# '' as data, raw request, do not touch Content-Length if present
for method in ['POST', 'PUT', 'post', 'put']:
con.request('/test', method=method, data=None,
headers={'Content-Length': '42'}, raw=True)
putheader_call_list = con.connection.putheader.call_args_list
self.assertIn(call('Content-Length', '42'), putheader_call_list)
# 'a' as data, content length should be present
for method in ['POST', 'PUT', 'post', 'put']:
con.request('/test', method=method, data='a')
call_kwargs = con.connection.request.call_args[1]
self.assertEqual(call_kwargs['headers']['Content-Length'], '1')
def test_cache_busting(self):
params1 = {'foo1': 'bar1', 'foo2': 'bar2'}
params2 = [('foo1', 'bar1'), ('foo2', 'bar2')]
con = Connection()
con.connection = Mock()
con.pre_connect_hook = Mock()
con.pre_connect_hook.return_value = {}, {}
con.cache_busting = False
con.request(action='/path', params=params1)
args, kwargs = con.pre_connect_hook.call_args
self.assertFalse('cache-busting' in args[0])
self.assertEqual(args[0], params1)
con.request(action='/path', params=params2)
args, kwargs = con.pre_connect_hook.call_args
self.assertFalse('cache-busting' in args[0])
self.assertEqual(args[0], params2)
con.cache_busting = True
con.request(action='/path', params=params1)
args, kwargs = con.pre_connect_hook.call_args
self.assertTrue('cache-busting' in args[0])
con.request(action='/path', params=params2)
args, kwargs = con.pre_connect_hook.call_args
self.assertTrue('cache-busting' in args[0][len(params2)])
def test_context_is_reset_after_request_has_finished(self):
context = {'foo': 'bar'}
def responseCls(connection, response):
connection.called = True
self.assertEqual(connection.context, context)
con = Connection()
con.called = False
con.connection = Mock()
con.responseCls = responseCls
con.set_context(context)
self.assertEqual(con.context, context)
con.request('/')
# Context should have been reset
self.assertTrue(con.called)
self.assertEqual(con.context, {})
# Context should also be reset if a method inside request throws
con = Connection()
con.connection = Mock()
con.set_context(context)
self.assertEqual(con.context, context)
con.connection.request = Mock(side_effect=ssl.SSLError())
try:
con.request('/')
except ssl.SSLError:
pass
self.assertEqual(con.context, {})
con.connection = Mock()
con.set_context(context)
self.assertEqual(con.context, context)
con.responseCls = Mock(side_effect=ValueError())
try:
con.request('/')
except ValueError:
pass
self.assertEqual(con.context, {})
def test_log_curl(self):
url = '/test/path'
body = None
headers = {}
con = LoggingConnection()
con.protocol = 'http'
con.host = 'example.com'
con.port = 80
for method in ['GET', 'POST', 'PUT', 'DELETE']:
cmd = con._log_curl(method=method, url=url, body=body,
headers=headers)
self.assertEqual(cmd, 'curl -i -X %s --compress http://example.com:80/test/path' %
(method))
# Should use --head for head requests
cmd = con._log_curl(method='HEAD', url=url, body=body, headers=headers)
self.assertEqual(cmd, 'curl -i --head --compress http://example.com:80/test/path')
if __name__ == '__main__':
sys.exit(unittest.main())
| [((32, 29, 32, 35), 'mock.Mock', 'Mock', ({}, {}), '()', False, 'from mock import Mock, call\n'), ((33, 33, 33, 39), 'mock.Mock', 'Mock', ({}, {}), '()', False, 'from mock import Mock, call\n'), ((43, 8, 43, 32), 'libcloud.common.base.Connection', 'Connection', (), '', False, 'from libcloud.common.base import Connection\n'), ((53, 14, 53, 26), 'libcloud.common.base.Connection', 'Connection', ({}, {}), '()', False, 'from libcloud.common.base import Connection\n'), ((54, 25, 54, 31), 'mock.Mock', 'Mock', ({}, {}), '()', False, 'from mock import Mock, call\n'), ((110, 14, 110, 26), 'libcloud.common.base.Connection', 'Connection', ({}, {}), '()', False, 'from libcloud.common.base import Connection\n'), ((111, 25, 111, 31), 'mock.Mock', 'Mock', ({}, {}), '()', False, 'from mock import Mock, call\n'), ((112, 31, 112, 37), 'mock.Mock', 'Mock', ({}, {}), '()', False, 'from mock import Mock, call\n'), ((143, 14, 143, 26), 'libcloud.common.base.Connection', 'Connection', ({}, {}), '()', False, 'from libcloud.common.base import Connection\n'), ((145, 25, 145, 31), 'mock.Mock', 'Mock', ({}, {}), '()', False, 'from mock import Mock, call\n'), ((158, 14, 158, 26), 'libcloud.common.base.Connection', 'Connection', ({}, {}), '()', False, 'from libcloud.common.base import Connection\n'), ((159, 25, 159, 31), 'mock.Mock', 'Mock', ({}, {}), '()', False, 'from mock import Mock, call\n'), ((173, 25, 173, 31), 'mock.Mock', 'Mock', ({}, {}), '()', False, 'from mock import Mock, call\n'), ((191, 14, 191, 33), 'libcloud.common.base.LoggingConnection', 'LoggingConnection', ({}, {}), '()', False, 'from libcloud.common.base import LoggingConnection\n'), ((207, 13, 207, 28), 'libcloud.test.unittest.main', 'unittest.main', ({}, {}), '()', False, 'from libcloud.test import unittest\n'), ((91, 26, 91, 54), 'mock.call', 'call', ({(91, 31, 91, 47): '"""Content-Length"""', (91, 49, 91, 53): '"""42"""'}, {}), "('Content-Length', '42')", False, 'from mock import Mock, call\n'), ((98, 26, 98, 54), 'mock.call', 'call', ({(98, 31, 98, 47): '"""Content-Length"""', (98, 49, 98, 53): '"""42"""'}, {}), "('Content-Length', '42')", False, 'from mock import Mock, call\n'), ((164, 50, 164, 64), 'ssl.SSLError', 'ssl.SSLError', ({}, {}), '()', False, 'import ssl\n')] |
mamadbiabon/iGibson | igibson/utils/data_utils/ext_object/scripts/step_1_visual_mesh.py | d416a470240eb7ad86e04fee475ae4bd67263a7c | import os
import sys
import bpy
script_dir = os.path.dirname(os.path.abspath(__file__))
utils_dir = os.path.join(script_dir, "../../blender_utils")
sys.path.append(utils_dir)
from utils import bake_model, clean_unused, export_ig_object, import_obj_folder
#############################################
# Parse command line arguments
#############################################
def get_arg(argv, flag, default=None):
if flag in argv:
return argv[argv.index(flag) + 1]
return default
should_bake = "--bake" in sys.argv
axis = ["X", "Y", "Z", "-X", "-Y", "-Z"]
import_axis_up = get_arg(sys.argv, "--up", default="Z")
if import_axis_up not in axis:
raise ValueError("Axis up not supported: {} (should be among X,Y,Z,-X,-Y,-Z)".format(import_axis_up))
import_axis_forward = get_arg(sys.argv, "--forward", default="X")
if import_axis_forward not in axis:
raise ValueError("Axis forward not supported: {} (should be among X,Y,Z,-X,-Y,-Z)".format(import_axis_forward))
source_dir = get_arg(sys.argv, "--source_dir")
if source_dir is None:
raise ValueError("Source directory not specified.")
dest_dir = get_arg(sys.argv, "--dest_dir")
if dest_dir is None:
raise ValueError("Destination directory not specified.")
os.makedirs(dest_dir, exist_ok=True)
model_id = os.path.basename(source_dir)
#############################################
# Importing obj files from source dir
#############################################
for on in bpy.context.scene.objects.keys():
obj = bpy.context.scene.objects[on]
bpy.data.objects.remove(obj)
clean_unused()
import_obj_folder(model_id, source_dir, up=import_axis_up, forward=import_axis_forward)
#############################################
# Optional UV Unwrapping
# This only needed if baking will be performed
#############################################
if should_bake:
uv_unwrapped = True
for o in bpy.context.scene.objects:
if not o.data.uv_layers:
uv_unwrapped = False
if not uv_unwrapped:
bpy.ops.object.mode_set(mode="OBJECT")
vl = bpy.context.view_layer
bpy.ops.object.select_all(action="DESELECT")
for on in bpy.context.scene.objects.keys():
obj = bpy.context.scene.objects[on]
new_uv = bpy.context.scene.objects[on].data.uv_layers.new(name="obj_uv")
vl.objects.active = obj
obj.select_set(True)
bpy.ops.object.editmode_toggle()
bpy.ops.mesh.select_all(action="SELECT")
bpy.ops.uv.smart_project(angle_limit=66, island_margin=0.02)
bpy.context.tool_settings.mesh_select_mode = (False, False, True)
bpy.ops.object.mode_set(mode="OBJECT")
#############################################
# Export models
#############################################
export_ig_object(dest_dir, save_material=not should_bake)
#############################################
# Optional Texture Baking
#############################################
if should_bake:
mat_dir = os.path.join(dest_dir, "material")
os.makedirs(mat_dir, exist_ok=True)
# bpy.ops.wm.open_mainfile(filepath=blend_path)
# import_ig_object(model_root, import_mat=True)
for obj in bpy.context.scene.objects:
obj.select_set(True)
bpy.context.view_layer.objects.active = obj
bpy.ops.object.select_all(action="SELECT")
bpy.ops.object.join()
channels = {
"DIFFUSE": (2048, 32),
"ROUGHNESS": (1024, 16),
"METALLIC": (1024, 16),
"NORMAL": (1024, 16),
}
bake_model(mat_dir, channels, overwrite=True)
bpy.ops.wm.quit_blender()
| [((7, 12, 7, 59), 'os.path.join', 'os.path.join', ({(7, 25, 7, 35): 'script_dir', (7, 37, 7, 58): '"""../../blender_utils"""'}, {}), "(script_dir, '../../blender_utils')", False, 'import os\n'), ((8, 0, 8, 26), 'sys.path.append', 'sys.path.append', ({(8, 16, 8, 25): 'utils_dir'}, {}), '(utils_dir)', False, 'import sys\n'), ((40, 0, 40, 36), 'os.makedirs', 'os.makedirs', (), '', False, 'import os\n'), ((42, 11, 42, 39), 'os.path.basename', 'os.path.basename', ({(42, 28, 42, 38): 'source_dir'}, {}), '(source_dir)', False, 'import os\n'), ((47, 10, 47, 42), 'bpy.context.scene.objects.keys', 'bpy.context.scene.objects.keys', ({}, {}), '()', False, 'import bpy\n'), ((50, 0, 50, 14), 'utils.clean_unused', 'clean_unused', ({}, {}), '()', False, 'from utils import bake_model, clean_unused, export_ig_object, import_obj_folder\n'), ((52, 0, 52, 87), 'utils.import_obj_folder', 'import_obj_folder', (), '', False, 'from utils import bake_model, clean_unused, export_ig_object, import_obj_folder\n'), ((82, 0, 82, 57), 'utils.export_ig_object', 'export_ig_object', (), '', False, 'from utils import bake_model, clean_unused, export_ig_object, import_obj_folder\n'), ((108, 0, 108, 25), 'bpy.ops.wm.quit_blender', 'bpy.ops.wm.quit_blender', ({}, {}), '()', False, 'import bpy\n'), ((6, 29, 6, 54), 'os.path.abspath', 'os.path.abspath', ({(6, 45, 6, 53): '__file__'}, {}), '(__file__)', False, 'import os\n'), ((49, 4, 49, 32), 'bpy.data.objects.remove', 'bpy.data.objects.remove', ({(49, 28, 49, 31): 'obj'}, {}), '(obj)', False, 'import bpy\n'), ((89, 14, 89, 48), 'os.path.join', 'os.path.join', ({(89, 27, 89, 35): 'dest_dir', (89, 37, 89, 47): '"""material"""'}, {}), "(dest_dir, 'material')", False, 'import os\n'), ((90, 4, 90, 39), 'os.makedirs', 'os.makedirs', (), '', False, 'import os\n'), ((97, 4, 97, 46), 'bpy.ops.object.select_all', 'bpy.ops.object.select_all', (), '', False, 'import bpy\n'), ((98, 4, 98, 25), 'bpy.ops.object.join', 'bpy.ops.object.join', ({}, {}), '()', False, 'import bpy\n'), ((106, 4, 106, 49), 'utils.bake_model', 'bake_model', (), '', False, 'from utils import bake_model, clean_unused, export_ig_object, import_obj_folder\n'), ((64, 8, 64, 46), 'bpy.ops.object.mode_set', 'bpy.ops.object.mode_set', (), '', False, 'import bpy\n'), ((66, 8, 66, 52), 'bpy.ops.object.select_all', 'bpy.ops.object.select_all', (), '', False, 'import bpy\n'), ((67, 18, 67, 50), 'bpy.context.scene.objects.keys', 'bpy.context.scene.objects.keys', ({}, {}), '()', False, 'import bpy\n'), ((72, 8, 72, 40), 'bpy.ops.object.editmode_toggle', 'bpy.ops.object.editmode_toggle', ({}, {}), '()', False, 'import bpy\n'), ((73, 8, 73, 48), 'bpy.ops.mesh.select_all', 'bpy.ops.mesh.select_all', (), '', False, 'import bpy\n'), ((74, 8, 74, 68), 'bpy.ops.uv.smart_project', 'bpy.ops.uv.smart_project', (), '', False, 'import bpy\n'), ((76, 8, 76, 46), 'bpy.ops.object.mode_set', 'bpy.ops.object.mode_set', (), '', False, 'import bpy\n')] |
mail2nsrajesh/python-ceilometerclient | ceilometerclient/common/base.py | 3b4e35abada626ce052f20d55c71fe12ab77052a | # Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Base utilities to build API operation managers and objects on top of.
"""
import copy
from ceilometerclient.apiclient import base
from ceilometerclient.apiclient import exceptions
from ceilometerclient import exc
def getid(obj):
"""Extracts object ID.
Abstracts the common pattern of allowing both an object or an
object's ID (UUID) as a parameter when dealing with relationships.
"""
try:
return obj.id
except AttributeError:
return obj
class Manager(object):
"""Managers interact with a particular type of API.
It works with samples, meters, alarms, etc. and provide CRUD operations for
them.
"""
resource_class = None
def __init__(self, api):
self.api = api
@property
def client(self):
"""Compatible with latest oslo-incubator.apiclient code."""
return self.api
def _create(self, url, body):
body = self.api.post(url, json=body).json()
if body:
return self.resource_class(self, body)
def _list(self, url, response_key=None, obj_class=None, body=None,
expect_single=False):
try:
resp = self.api.get(url)
except exceptions.NotFound:
raise exc.HTTPNotFound
if not resp.content:
raise exc.HTTPNotFound
body = resp.json()
if obj_class is None:
obj_class = self.resource_class
if response_key:
try:
data = body[response_key]
except KeyError:
return []
else:
data = body
if expect_single:
data = [data]
return [obj_class(self, res, loaded=True) for res in data if res]
def _update(self, url, body, response_key=None):
body = self.api.put(url, json=body).json()
# PUT requests may not return a body
if body:
return self.resource_class(self, body)
def _delete(self, url):
self.api.delete(url)
class Resource(base.Resource):
"""A resource represents a particular instance of an object.
Resource might be tenant, user, etc.
This is pretty much just a bag for attributes.
:param manager: Manager object
:param info: dictionary representing resource attributes
:param loaded: prevent lazy-loading if set to True
"""
def to_dict(self):
return copy.deepcopy(self._info)
| [((106, 15, 106, 40), 'copy.deepcopy', 'copy.deepcopy', ({(106, 29, 106, 39): 'self._info'}, {}), '(self._info)', False, 'import copy\n')] |
freyes/charm-azure-integrator | lib/charms/layer/azure.py | 9c96eed30388e5e7ae2ff590574890e27e845b5c | import json
import os
import re
import subprocess
from base64 import b64decode
from enum import Enum
from math import ceil, floor
from pathlib import Path
from urllib.error import HTTPError
from urllib.request import urlopen
import yaml
from charmhelpers.core import hookenv
from charmhelpers.core.unitdata import kv
from charms.layer import status
ENTITY_PREFIX = 'charm.azure'
MODEL_UUID = os.environ['JUJU_MODEL_UUID']
MAX_ROLE_NAME_LEN = 64
MAX_POLICY_NAME_LEN = 128
class StandardRole(Enum):
NETWORK_MANAGER = '4d97b98b-1d4f-4787-a291-c67834d212e7'
SECURITY_MANAGER = 'e3d13bf0-dd5a-482e-ba6b-9b8433878d10'
DNS_MANAGER = 'befefa01-2a29-4197-83a8-272ff33ce314'
OBJECT_STORE_READER = '2a2b9908-6ea1-4ae2-8e65-a410df84e7d1'
OBJECT_STORE_MANAGER = 'ba92f5b4-2d11-453d-a403-e96b0029c9fe'
# When debugging hooks, for some reason HOME is set to /home/ubuntu, whereas
# during normal hook execution, it's /root. Set it here to be consistent.
os.environ['HOME'] = '/root'
def log(msg, *args):
hookenv.log(msg.format(*args), hookenv.INFO)
def log_err(msg, *args):
hookenv.log(msg.format(*args), hookenv.ERROR)
def get_credentials():
"""
Get the credentials from either the config or the hook tool.
Prefers the config so that it can be overridden.
"""
no_creds_msg = 'missing credentials; set credentials config'
config = hookenv.config()
# try to use Juju's trust feature
try:
result = subprocess.run(['credential-get'],
check=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
creds = yaml.load(result.stdout.decode('utf8'))
creds_data = creds['credential']['attributes']
login_cli(creds_data)
return True
except FileNotFoundError:
pass # juju trust not available
except subprocess.CalledProcessError as e:
if 'permission denied' not in e.stderr.decode('utf8'):
raise
no_creds_msg = 'missing credentials access; grant with: juju trust'
# try credentials config
if config['credentials']:
try:
creds_data = b64decode(config['credentials']).decode('utf8')
login_cli(creds_data)
return True
except Exception:
status.blocked('invalid value for credentials config')
return False
# no creds provided
status.blocked(no_creds_msg)
return False
def login_cli(creds_data):
"""
Use the credentials to authenticate the Azure CLI.
"""
app_id = creds_data['application-id']
app_pass = creds_data['application-password']
sub_id = creds_data['subscription-id']
tenant_id = _get_tenant_id(sub_id)
try:
log('Forcing logout of Azure CLI')
_azure('logout')
except AzureError:
pass
try:
log('Logging in to Azure CLI')
_azure('login',
'--service-principal',
'-u', app_id,
'-p', app_pass,
'-t', tenant_id)
# cache the subscription ID for use in roles
kv().set('charm.azure.sub-id', sub_id)
except AzureError as e:
# redact the credential info from the exception message
stderr = re.sub(app_id, '<app-id>', e.args[0])
stderr = re.sub(app_pass, '<app-pass>', stderr)
stderr = re.sub(tenant_id, '<tenant-id>', stderr)
# from None suppresses the previous exception from the stack trace
raise AzureError(stderr) from None
def ensure_msi(request):
msi = _get_msi(request.vm_id)
if not msi:
log('Enabling Managed Service Identity')
result = _azure('vm', 'identity', 'assign',
'--name', request.vm_name,
'--resource-group', request.resource_group)
vm_identities = kv().get('charm.azure.vm-identities', {})
msi = vm_identities[request.vm_id] = result['systemAssignedIdentity']
kv().set('charm.azure.vm-identities', vm_identities)
log('Instance MSI is: {}', msi)
def send_additional_metadata(request):
"""
Get additional info about the requesting instance via the API that isn't
available from the metadata server.
"""
res_grp = _azure('group', 'show', '--name', request.resource_group)
# hard-code most of these because with Juju, they're always the same
# and the queries required to look them up are a PITA
request.send_additional_metadata(
resource_group_location=res_grp['location'],
vnet_name='juju-internal-network',
vnet_resource_group=request.resource_group,
subnet_name='juju-internal-subnet',
security_group_name='juju-internal-nsg',
)
def tag_instance(request):
"""
Tag the given instance with the given tags.
"""
log('Tagging instance with: {}', request.instance_tags)
_azure('vm', 'update',
'--name', request.vm_name,
'--resource-group', request.resource_group,
'--set', *['tags.{}={}'.format(tag, value)
for tag, value in request.instance_tags.items()])
def enable_instance_inspection(request):
"""
Enable instance inspection access for the given application.
"""
log('Enabling instance inspection')
_assign_role(request, _get_role('vm-reader'))
def enable_network_management(request):
"""
Enable network management for the given application.
"""
log('Enabling network management')
_assign_role(request, StandardRole.NETWORK_MANAGER)
def enable_security_management(request):
"""
Enable security management for the given application.
"""
log('Enabling security management')
_assign_role(request, StandardRole.SECURITY_MANAGER)
def enable_block_storage_management(request):
"""
Enable block storage (disk) management for the given application.
"""
log('Enabling block storage management')
_assign_role(request, _get_role('disk-manager'))
def enable_dns_management(request):
"""
Enable DNS management for the given application.
"""
log('Enabling DNS management')
_assign_role(request, StandardRole.DNS_MANAGER)
def enable_object_storage_access(request):
"""
Enable object storage read-only access for the given application.
"""
log('Enabling object storage read')
_assign_role(request, StandardRole.OBJECT_STORE_READER)
def enable_object_storage_management(request):
"""
Enable object storage management for the given application.
"""
log('Enabling object store management')
_assign_role(request, StandardRole.OBJECT_STORE_MANAGER)
def cleanup():
"""
Perform cleanup.
"""
pass
# Internal helpers
class AzureError(Exception):
"""
Exception class representing an error returned from the azure-cli tool.
"""
@classmethod
def get(cls, message):
"""
Factory method to create either an instance of this class or a
meta-subclass for certain `message`s.
"""
if 'already exists' in message:
return AlreadyExistsAzureError(message)
return AzureError(message)
class AlreadyExistsAzureError(AzureError):
"""
Meta-error subclass of AzureError representing something already existing.
"""
pass
def _elide(s, max_len, ellipsis='...'):
"""
Elide s in the middle to ensure it is under max_len.
That is, shorten the string, inserting an ellipsis where the removed
characters were to show that they've been removed.
"""
if len(s) > max_len:
hl = (max_len - len(ellipsis)) / 2
headl, taill = floor(hl), ceil(hl)
s = s[:headl] + ellipsis + s[-taill:]
return s
def _get_tenant_id(subscription_id):
"""
Translate the subscription ID into a tenant ID by making an unauthorized
request to the API and extracting the tenant ID from the WWW-Authenticate
header in the error response.
"""
url = ('https://management.azure.com/subscriptions/'
'{}?api-version=2018-03-01-01.6.1'.format(subscription_id))
try:
urlopen(url)
log_err('Error getting tenant ID: did not get "unauthorized" response')
return None
except HTTPError as e:
if 'WWW-Authenticate' not in e.headers:
log_err('Error getting tenant ID: missing WWW-Authenticate header')
return None
www_auth = e.headers['WWW-Authenticate']
match = re.search(r'authorization_uri="[^"]*/([^/"]*)"', www_auth)
if not match:
log_err('Error getting tenant ID: unable to find in {}', www_auth)
return None
return match.group(1)
def _azure(cmd, *args, return_stderr=False):
"""
Call the azure-cli tool.
"""
cmd = ['az', cmd]
cmd.extend(args)
result = subprocess.run(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout = result.stdout.decode('utf8').strip()
stderr = result.stderr.decode('utf8').strip()
if result.returncode != 0:
raise AzureError.get(stderr)
if return_stderr:
return stderr
if stdout:
stdout = json.loads(stdout)
return stdout
def _get_msi(vm_id):
"""
Get the Managed System Identity for the VM.
"""
vm_identities = kv().get('charm.azure.vm-identities', {})
return vm_identities.get(vm_id)
def _get_role(role_name):
"""
Translate short role name into a full role name and ensure that the
custom role is loaded.
The custom roles have to be applied to a specific subscription ID, but
the subscription ID applies to the entire credential, so will almost
certainly be reused, so there's not much danger in hitting the 2k
custom role limit.
"""
known_roles = kv().get('charm.azure.roles', {})
if role_name in known_roles:
return known_roles[role_name]
sub_id = kv().get('charm.azure.sub-id')
role_file = Path('files/roles/{}.json'.format(role_name))
role_data = json.loads(role_file.read_text())
role_fullname = role_data['Name'].format(sub_id)
scope = role_data['AssignableScopes'][0].format(sub_id)
role_data['Name'] = role_fullname
role_data['AssignableScopes'][0] = scope
try:
log('Ensuring role {}', role_fullname)
_azure('role', 'definition', 'create',
'--role-definition', json.dumps(role_data))
except AzureError as e:
if 'already exists' not in e.args[0]:
raise
known_roles[role_name] = role_fullname
return role_fullname
def _assign_role(request, role):
if isinstance(role, StandardRole):
role = role.value
msi = _get_msi(request.vm_id)
try:
_azure('role', 'assignment', 'create',
'--assignee-object-id', msi,
'--resource-group', request.resource_group,
'--role', role)
except AlreadyExistsAzureError:
pass
| [((54, 13, 54, 29), 'charmhelpers.core.hookenv.config', 'hookenv.config', ({}, {}), '()', False, 'from charmhelpers.core import hookenv\n'), ((83, 4, 83, 32), 'charms.layer.status.blocked', 'status.blocked', ({(83, 19, 83, 31): 'no_creds_msg'}, {}), '(no_creds_msg)', False, 'from charms.layer import status\n'), ((292, 13, 294, 51), 'subprocess.run', 'subprocess.run', (), '', False, 'import subprocess\n'), ((57, 17, 60, 55), 'subprocess.run', 'subprocess.run', (), '', False, 'import subprocess\n'), ((271, 8, 271, 20), 'urllib.request.urlopen', 'urlopen', ({(271, 16, 271, 19): 'url'}, {}), '(url)', False, 'from urllib.request import urlopen\n'), ((302, 17, 302, 35), 'json.loads', 'json.loads', ({(302, 28, 302, 34): 'stdout'}, {}), '(stdout)', False, 'import json\n'), ((111, 17, 111, 54), 're.sub', 're.sub', ({(111, 24, 111, 30): 'app_id', (111, 32, 111, 42): '"""<app-id>"""', (111, 44, 111, 53): 'e.args[0]'}, {}), "(app_id, '<app-id>', e.args[0])", False, 'import re\n'), ((112, 17, 112, 55), 're.sub', 're.sub', ({(112, 24, 112, 32): 'app_pass', (112, 34, 112, 46): '"""<app-pass>"""', (112, 48, 112, 54): 'stderr'}, {}), "(app_pass, '<app-pass>', stderr)", False, 'import re\n'), ((113, 17, 113, 57), 're.sub', 're.sub', ({(113, 24, 113, 33): 'tenant_id', (113, 35, 113, 48): '"""<tenant-id>"""', (113, 50, 113, 56): 'stderr'}, {}), "(tenant_id, '<tenant-id>', stderr)", False, 'import re\n'), ((257, 23, 257, 32), 'math.floor', 'floor', ({(257, 29, 257, 31): 'hl'}, {}), '(hl)', False, 'from math import ceil, floor\n'), ((257, 34, 257, 42), 'math.ceil', 'ceil', ({(257, 39, 257, 41): 'hl'}, {}), '(hl)', False, 'from math import ceil, floor\n'), ((279, 16, 279, 74), 're.search', 're.search', ({(279, 26, 279, 63): '"""authorization_uri="[^"]*/([^/"]*)\\""""', (279, 65, 279, 73): 'www_auth'}, {}), '(\'authorization_uri="[^"]*/([^/"]*)"\', www_auth)', False, 'import re\n'), ((310, 20, 310, 24), 'charmhelpers.core.unitdata.kv', 'kv', ({}, {}), '()', False, 'from charmhelpers.core.unitdata import kv\n'), ((324, 18, 324, 22), 'charmhelpers.core.unitdata.kv', 'kv', ({}, {}), '()', False, 'from charmhelpers.core.unitdata import kv\n'), ((327, 13, 327, 17), 'charmhelpers.core.unitdata.kv', 'kv', ({}, {}), '()', False, 'from charmhelpers.core.unitdata import kv\n'), ((337, 36, 337, 57), 'json.dumps', 'json.dumps', ({(337, 47, 337, 56): 'role_data'}, {}), '(role_data)', False, 'import json\n'), ((79, 12, 79, 66), 'charms.layer.status.blocked', 'status.blocked', ({(79, 27, 79, 65): '"""invalid value for credentials config"""'}, {}), "('invalid value for credentials config')", False, 'from charms.layer import status\n'), ((108, 8, 108, 12), 'charmhelpers.core.unitdata.kv', 'kv', ({}, {}), '()', False, 'from charmhelpers.core.unitdata import kv\n'), ((125, 24, 125, 28), 'charmhelpers.core.unitdata.kv', 'kv', ({}, {}), '()', False, 'from charmhelpers.core.unitdata import kv\n'), ((127, 8, 127, 12), 'charmhelpers.core.unitdata.kv', 'kv', ({}, {}), '()', False, 'from charmhelpers.core.unitdata import kv\n'), ((75, 25, 75, 57), 'base64.b64decode', 'b64decode', ({(75, 35, 75, 56): "config['credentials']"}, {}), "(config['credentials'])", False, 'from base64 import b64decode\n')] |
pankajk22/Computer-Networks-Assignments | Assignment-1/Code/server3.py | 5c227ef59c31ab52cde160568242dbbc84482bc5 | import socket
import csv
import traceback
import threading
s=socket.socket(socket.AF_INET,socket.SOCK_STREAM)
usrpass={}
def openfile():
filename="login_credentials.csv"
with open(filename,'r')as csvfile:
csv_file = csv.reader(csvfile, delimiter=",")
for col in csv_file:
usrpass[col[0]]=col[1]
usrpass.pop("Username")
#print(usrpass)
ihost=socket.gethostname()
host=socket.gethostbyname(ihost)
ihost=socket.gethostname()
host=socket.gethostbyname(ihost)
iport=[]
hostfile="host.csv"
with open(hostfile,'r')as host_file:
csv_hfile = csv.reader(host_file, delimiter=",")
for row in csv_hfile:
iport.append(row[1])
port=int(iport[4])
def socketbind():
try:
s.bind(('',port))
print("Bind with host at port number : "+str(port))
s.listen(10)
print("Socket is listening!!")
except socket.error as msg:
print("Error in Binding: "+ str(msg)+"\n Retrying....")
socketbind()
def socketaccept():
conn,add=s.accept()
print("connection is established with IP : "+str(add[0])+" and Port Number : "+str(add[1]))
conn.send(bytes("1","utf-8"))
conversation(conn)
conn.close()
def conversation(conn):
while True:
username=str(conn.recv(1024),"utf-8")
password=str(conn.recv(1024),"utf-8")
res=checkpass(username,password)
if res==1:
print("Valid Password!")
conn.send(bytes("1","utf-8"))
conn.send(bytes("1","utf-8"))
else:
conn.send(bytes("-1","utf-8"))
conn.send(bytes("-1","utf-8"))
# def checkusr(username):
# if username in usrpass:
# return 1
# else:
# print("Invalid Username")
# return -1
def checkpass(username,password):
if usrpass[username]==password:
return 1
else:
print("Invalid Password")
return -1
def main():
openfile()
socketbind()
socketaccept()
# count=0
# while (count<6):
# new_thread=threading.Thread(target =socketaccept)
# new_thread.start()
# count=count+1
main() | [((6, 2, 6, 50), 'socket.socket', 'socket.socket', ({(6, 16, 6, 30): 'socket.AF_INET', (6, 31, 6, 49): 'socket.SOCK_STREAM'}, {}), '(socket.AF_INET, socket.SOCK_STREAM)', False, 'import socket\n'), ((19, 6, 19, 26), 'socket.gethostname', 'socket.gethostname', ({}, {}), '()', False, 'import socket\n'), ((20, 5, 20, 32), 'socket.gethostbyname', 'socket.gethostbyname', ({(20, 26, 20, 31): 'ihost'}, {}), '(ihost)', False, 'import socket\n'), ((21, 6, 21, 26), 'socket.gethostname', 'socket.gethostname', ({}, {}), '()', False, 'import socket\n'), ((22, 5, 22, 32), 'socket.gethostbyname', 'socket.gethostbyname', ({(22, 26, 22, 31): 'ihost'}, {}), '(ihost)', False, 'import socket\n'), ((26, 16, 26, 52), 'csv.reader', 'csv.reader', (), '', False, 'import csv\n'), ((12, 19, 12, 53), 'csv.reader', 'csv.reader', (), '', False, 'import csv\n')] |
joaopfonseca/research | research/utils/_check_pipelines.py | 02659512218d077d9ef28d481178e62172ef18cd | from itertools import product
from sklearn.base import clone
from sklearn.preprocessing import FunctionTransformer
from sklearn.model_selection import ParameterGrid
from imblearn.pipeline import Pipeline
from rlearn.utils import check_random_states
def check_pipelines(objects_list, random_state, n_runs):
"""Extract estimators and parameters grids."""
# Create random states
random_states = check_random_states(random_state, n_runs)
pipelines = []
param_grid = []
for comb, rs in product(product(*objects_list), random_states):
name = "|".join([i[0] for i in comb])
# name, object, sub grid
comb = [
(nm, ob, ParameterGrid(sg))
if ob is not None
else (nm, FunctionTransformer(), ParameterGrid(sg))
for nm, ob, sg in comb
]
# Create estimator
if name not in [n[0] for n in pipelines]:
est = Pipeline([(nm, ob) for nm, ob, _ in comb])
pipelines.append((name, est))
# Create intermediate parameter grids
sub_grids = [
[{f"{nm}__{k}": v for k, v in param_def.items()} for param_def in sg]
for nm, obj, sg in comb
]
# Create parameter grids
for sub_grid in product(*sub_grids):
param_prefix = "" if len(comb) == 1 else f"{name}__"
grid = {"est_name": [name]}
grid.update(
{f"{param_prefix}{k}": [v] for d in sub_grid for k, v in d.items()}
)
random_states = {
f"{param_prefix}{param}": [rs]
for param in est.get_params()
if "random_state" in param
}
grid.update(random_states)
# Avoid multiple runs over pipelines without random state
if grid not in param_grid:
param_grid.append(grid)
return pipelines, param_grid
def check_pipelines_wrapper(
objects_list, wrapper, random_state, n_runs, wrapped_only=False
):
wrapper_label = wrapper[0]
wrapper_obj = wrapper[1]
wrapper_grid = wrapper[2]
estimators, param_grids = check_pipelines(objects_list, random_state, n_runs)
wrapped_estimators = [
(
f"{wrapper_label}|{name}",
clone(wrapper_obj).set_params(**{"classifier": pipeline}),
)
for name, pipeline in estimators
]
wrapped_param_grids = [
{
"est_name": [f'{wrapper_label}|{d["est_name"][0]}'],
**{
f'{wrapper_label}|{d["est_name"][0]}__classifier__{k}': v
for k, v in d.items()
if k != "est_name"
},
**{
f'{wrapper_label}|{d["est_name"][0]}__{k}': v
for k, v in wrapper_grid.items()
},
}
for d in param_grids
]
if wrapped_only:
return wrapped_estimators, wrapped_param_grids
else:
return (estimators + wrapped_estimators, param_grids + wrapped_param_grids)
| [((13, 20, 13, 61), 'rlearn.utils.check_random_states', 'check_random_states', ({(13, 40, 13, 52): 'random_state', (13, 54, 13, 60): 'n_runs'}, {}), '(random_state, n_runs)', False, 'from rlearn.utils import check_random_states\n'), ((17, 28, 17, 50), 'itertools.product', 'product', ({(17, 36, 17, 49): '*objects_list'}, {}), '(*objects_list)', False, 'from itertools import product\n'), ((40, 24, 40, 43), 'itertools.product', 'product', ({(40, 32, 40, 42): '*sub_grids'}, {}), '(*sub_grids)', False, 'from itertools import product\n'), ((30, 18, 30, 60), 'imblearn.pipeline.Pipeline', 'Pipeline', ({(30, 27, 30, 59): '[(nm, ob) for nm, ob, _ in comb]'}, {}), '([(nm, ob) for nm, ob, _ in comb])', False, 'from imblearn.pipeline import Pipeline\n'), ((22, 21, 22, 38), 'sklearn.model_selection.ParameterGrid', 'ParameterGrid', ({(22, 35, 22, 37): 'sg'}, {}), '(sg)', False, 'from sklearn.model_selection import ParameterGrid\n'), ((24, 22, 24, 43), 'sklearn.preprocessing.FunctionTransformer', 'FunctionTransformer', ({}, {}), '()', False, 'from sklearn.preprocessing import FunctionTransformer\n'), ((24, 45, 24, 62), 'sklearn.model_selection.ParameterGrid', 'ParameterGrid', ({(24, 59, 24, 61): 'sg'}, {}), '(sg)', False, 'from sklearn.model_selection import ParameterGrid\n'), ((72, 12, 72, 30), 'sklearn.base.clone', 'clone', ({(72, 18, 72, 29): 'wrapper_obj'}, {}), '(wrapper_obj)', False, 'from sklearn.base import clone\n')] |
PuzeLiu/mushroom-rl | mushroom_rl/utils/plots/common_plots.py | 99942b425e66b4ddcc26009d7105dde23841e95d | from mushroom_rl.utils.plots import PlotItemBuffer, DataBuffer
from mushroom_rl.utils.plots.plot_item_buffer import PlotItemBufferLimited
class RewardPerStep(PlotItemBuffer):
"""
Class that represents a plot for the reward at every step.
"""
def __init__(self, plot_buffer):
"""
Constructor.
Args:
plot_buffer (DataBuffer): data buffer to be used.
"""
title = "Step_Reward"
curves_params = [dict(data_buffer=plot_buffer)]
super().__init__(title, curves_params)
class RewardPerEpisode(PlotItemBuffer):
"""
Class that represents a plot for the accumulated reward per episode.
"""
def __init__(self, plot_buffer):
"""
Constructor.
Args:
plot_buffer (DataBuffer): data buffer to be used.
"""
title = "Episode_Reward"
curves_params = [dict(data_buffer=plot_buffer)]
super().__init__(title, curves_params)
class Actions(PlotItemBufferLimited):
"""
Class that represents a plot for the actions.
"""
def __init__(self, plot_buffers, maxs=None, mins=None):
"""
Constructor.
Args:
plot_buffer (DataBuffer): data buffer to be used;
maxs(list, None): list of max values of each data buffer plotted.
If an element is None, no max line is drawn;
mins(list, None): list of min values of each data buffer plotted.
If an element is None, no min line is drawn.
"""
title = "Actions"
super().__init__(title, plot_buffers, maxs=maxs, mins=mins)
class Observations(PlotItemBufferLimited):
"""
Class that represents a plot for the observations.
"""
def __init__(self, plot_buffers, maxs=None, mins=None, dotted_limits=None):
"""
Constructor.
Args:
plot_buffer (DataBuffer): data buffer to be used;
maxs(list, None): list of max values of each data buffer plotted.
If an element is None, no max line is drawn;
mins(list, None): list of min values of each data buffer plotted.
If an element is None, no min line is drawn.
dotted_limits (list, None): list of booleans. If True, the
corresponding limit is dotted; otherwise, it is printed as a
solid line.
"""
title = "Observations"
super().__init__(title, plot_buffers, maxs=maxs, mins=mins,
dotted_limits=dotted_limits)
class LenOfEpisodeTraining(PlotItemBuffer):
"""
Class that represents a plot for the length of the episode.
"""
def __init__(self, plot_buffer):
"""
Constructor.
Args:
plot_buffer (DataBuffer): data buffer to be used;
"""
title = "Len of Episode"
plot_params = [dict(data_buffer=plot_buffer)]
super().__init__(title, plot_params)
| [] |
helion-security/helion | libs/python-daemon-2.2.0/test/test_metadata.py | 1e5f22da9808c4d67bb773b93c5295c72fcaf45a | # -*- coding: utf-8 -*-
#
# test/test_metadata.py
# Part of ‘python-daemon’, an implementation of PEP 3143.
#
# This is free software, and you are welcome to redistribute it under
# certain conditions; see the end of this file for copyright
# information, grant of license, and disclaimer of warranty.
""" Unit test for ‘_metadata’ private module.
"""
from __future__ import (absolute_import, unicode_literals)
import collections
import errno
import functools
import json
import re
try:
# Python 3 standard library.
import urllib.parse as urlparse
except ImportError:
# Python 2 standard library.
import urlparse
import mock
import pkg_resources
import testtools.helpers
import testtools.matchers
from . import scaffold
from .scaffold import unicode
import daemon._metadata as metadata
class HasAttribute(testtools.matchers.Matcher):
""" A matcher to assert an object has a named attribute. """
def __init__(self, name):
self.attribute_name = name
def match(self, instance):
""" Assert the object `instance` has an attribute named `name`. """
result = None
if not testtools.helpers.safe_hasattr(instance, self.attribute_name):
result = AttributeNotFoundMismatch(instance, self.attribute_name)
return result
class AttributeNotFoundMismatch(testtools.matchers.Mismatch):
""" The specified instance does not have the named attribute. """
def __init__(self, instance, name):
self.instance = instance
self.attribute_name = name
def describe(self):
""" Emit a text description of this mismatch. """
text = (
"{instance!r}"
" has no attribute named {name!r}").format(
instance=self.instance, name=self.attribute_name)
return text
class metadata_value_TestCase(scaffold.TestCaseWithScenarios):
""" Test cases for metadata module values. """
expected_str_attributes = set([
'version_installed',
'author',
'copyright',
'license',
'url',
])
scenarios = [
(name, {'attribute_name': name})
for name in expected_str_attributes]
for (name, params) in scenarios:
if name == 'version_installed':
# No duck typing, this attribute might be None.
params['ducktype_attribute_name'] = NotImplemented
continue
# Expect an attribute of ‘str’ to test this value.
params['ducktype_attribute_name'] = 'isdigit'
def test_module_has_attribute(self):
""" Metadata should have expected value as a module attribute. """
self.assertThat(
metadata, HasAttribute(self.attribute_name))
def test_module_attribute_has_duck_type(self):
""" Metadata value should have expected duck-typing attribute. """
if self.ducktype_attribute_name == NotImplemented:
self.skipTest("Can't assert this attribute's type")
instance = getattr(metadata, self.attribute_name)
self.assertThat(
instance, HasAttribute(self.ducktype_attribute_name))
class YearRange_TestCase(scaffold.TestCaseWithScenarios):
""" Test cases for ‘YearRange’ class. """
scenarios = [
('simple', {
'begin_year': 1970,
'end_year': 1979,
'expected_text': "1970–1979",
}),
('same year', {
'begin_year': 1970,
'end_year': 1970,
'expected_text': "1970",
}),
('no end year', {
'begin_year': 1970,
'end_year': None,
'expected_text': "1970",
}),
]
def setUp(self):
""" Set up test fixtures. """
super(YearRange_TestCase, self).setUp()
self.test_instance = metadata.YearRange(
self.begin_year, self.end_year)
def test_text_representation_as_expected(self):
""" Text representation should be as expected. """
result = unicode(self.test_instance)
self.assertEqual(result, self.expected_text)
FakeYearRange = collections.namedtuple('FakeYearRange', ['begin', 'end'])
@mock.patch.object(metadata, 'YearRange', new=FakeYearRange)
class make_year_range_TestCase(scaffold.TestCaseWithScenarios):
""" Test cases for ‘make_year_range’ function. """
scenarios = [
('simple', {
'begin_year': "1970",
'end_date': "1979-01-01",
'expected_range': FakeYearRange(begin=1970, end=1979),
}),
('same year', {
'begin_year': "1970",
'end_date': "1970-01-01",
'expected_range': FakeYearRange(begin=1970, end=1970),
}),
('no end year', {
'begin_year': "1970",
'end_date': None,
'expected_range': FakeYearRange(begin=1970, end=None),
}),
('end date UNKNOWN token', {
'begin_year': "1970",
'end_date': "UNKNOWN",
'expected_range': FakeYearRange(begin=1970, end=None),
}),
('end date FUTURE token', {
'begin_year': "1970",
'end_date': "FUTURE",
'expected_range': FakeYearRange(begin=1970, end=None),
}),
]
def test_result_matches_expected_range(self):
""" Result should match expected YearRange. """
result = metadata.make_year_range(self.begin_year, self.end_date)
self.assertEqual(result, self.expected_range)
class metadata_content_TestCase(scaffold.TestCase):
""" Test cases for content of metadata. """
def test_copyright_formatted_correctly(self):
""" Copyright statement should be formatted correctly. """
regex_pattern = (
"Copyright © "
"\d{4}" # Four-digit year.
"(?:–\d{4})?" # Optional range dash and four-digit year.
)
regex_flags = re.UNICODE
self.assertThat(
metadata.copyright,
testtools.matchers.MatchesRegex(regex_pattern, regex_flags))
def test_author_formatted_correctly(self):
""" Author information should be formatted correctly. """
regex_pattern = (
".+ " # Name.
"<[^>]+>" # Email address, in angle brackets.
)
regex_flags = re.UNICODE
self.assertThat(
metadata.author,
testtools.matchers.MatchesRegex(regex_pattern, regex_flags))
def test_copyright_contains_author(self):
""" Copyright information should contain author information. """
self.assertThat(
metadata.copyright,
testtools.matchers.Contains(metadata.author))
def test_url_parses_correctly(self):
""" Homepage URL should parse correctly. """
result = urlparse.urlparse(metadata.url)
self.assertIsInstance(
result, urlparse.ParseResult,
"URL value {url!r} did not parse correctly".format(
url=metadata.url))
try:
FileNotFoundError
except NameError:
# Python 2 uses IOError.
FileNotFoundError = functools.partial(IOError, errno.ENOENT)
version_info_filename = "version_info.json"
def fake_func_has_metadata(testcase, resource_name):
""" Fake the behaviour of ‘pkg_resources.Distribution.has_metadata’. """
if (
resource_name != testcase.version_info_filename
or not hasattr(testcase, 'test_version_info')):
return False
return True
def fake_func_get_metadata(testcase, resource_name):
""" Fake the behaviour of ‘pkg_resources.Distribution.get_metadata’. """
if not fake_func_has_metadata(testcase, resource_name):
error = FileNotFoundError(resource_name)
raise error
content = testcase.test_version_info
return content
def fake_func_get_distribution(testcase, distribution_name):
""" Fake the behaviour of ‘pkg_resources.get_distribution’. """
if distribution_name != metadata.distribution_name:
raise pkg_resources.DistributionNotFound
if hasattr(testcase, 'get_distribution_error'):
raise testcase.get_distribution_error
mock_distribution = testcase.mock_distribution
mock_distribution.has_metadata.side_effect = functools.partial(
fake_func_has_metadata, testcase)
mock_distribution.get_metadata.side_effect = functools.partial(
fake_func_get_metadata, testcase)
return mock_distribution
@mock.patch.object(metadata, 'distribution_name', new="mock-dist")
class get_distribution_version_info_TestCase(scaffold.TestCaseWithScenarios):
""" Test cases for ‘get_distribution_version_info’ function. """
default_version_info = {
'release_date': "UNKNOWN",
'version': "UNKNOWN",
'maintainer': "UNKNOWN",
}
scenarios = [
('version 0.0', {
'test_version_info': json.dumps({
'version': "0.0",
}),
'expected_version_info': {'version': "0.0"},
}),
('version 1.0', {
'test_version_info': json.dumps({
'version': "1.0",
}),
'expected_version_info': {'version': "1.0"},
}),
('file lorem_ipsum.json', {
'test_filename': "lorem_ipsum.json",
'version_info_filename': "lorem_ipsum.json",
'test_version_info': json.dumps({
'version': "1.0",
}),
'expected_resource_name': "lorem_ipsum.json",
'expected_version_info': {'version': "1.0"},
}),
('not installed', {
'get_distribution_error': pkg_resources.DistributionNotFound(),
'expected_version_info': default_version_info,
}),
('no version_info', {
'expected_version_info': default_version_info,
}),
('wrong filename', {
'test_filename': "lorem_ipsum.json",
'test_version_info': json.dumps({
'version': "1.0",
}),
'expected_resource_name': "lorem_ipsum.json",
'expected_version_info': default_version_info,
}),
]
def setUp(self):
""" Set up test fixtures. """
super(get_distribution_version_info_TestCase, self).setUp()
self.test_args = {}
if hasattr(self, 'test_filename'):
self.test_args['filename'] = self.test_filename
if not hasattr(self, 'version_info_filename'):
self.version_info_filename = version_info_filename
if not hasattr(self, 'expected_resource_name'):
self.expected_resource_name = version_info_filename
self.mock_distribution = mock.MagicMock()
func_patcher_get_distribution = mock.patch.object(
pkg_resources, 'get_distribution')
func_patcher_get_distribution.start()
self.addCleanup(func_patcher_get_distribution.stop)
pkg_resources.get_distribution.side_effect = functools.partial(
fake_func_get_distribution, self)
def test_requests_installed_distribution(self):
""" The package distribution should be retrieved. """
expected_distribution_name = metadata.distribution_name
metadata.get_distribution_version_info(**self.test_args)
pkg_resources.get_distribution.assert_called_with(
expected_distribution_name)
def test_requests_specified_filename(self):
""" The specified metadata resource name should be requested. """
if hasattr(self, 'get_distribution_error'):
self.skipTest("No access to distribution")
metadata.get_distribution_version_info(**self.test_args)
self.mock_distribution.has_metadata.assert_called_with(
self.expected_resource_name)
def test_result_matches_expected_items(self):
""" The result should match the expected items. """
version_info = metadata.get_distribution_version_info(**self.test_args)
self.assertEqual(self.expected_version_info, version_info)
# Copyright © 2008–2018 Ben Finney <[email protected]>
#
# This is free software: you may copy, modify, and/or distribute this work
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; version 3 of that license or any later version.
# No warranty expressed or implied. See the file ‘LICENSE.GPL-3’ for details.
# Local variables:
# coding: utf-8
# mode: python
# End:
# vim: fileencoding=utf-8 filetype=python :
| [((138, 16, 138, 73), 'collections.namedtuple', 'collections.namedtuple', ({(138, 39, 138, 54): '"""FakeYearRange"""', (138, 56, 138, 72): "['begin', 'end']"}, {}), "('FakeYearRange', ['begin', 'end'])", False, 'import collections\n'), ((140, 1, 140, 60), 'mock.patch.object', 'mock.patch.object', (), '', False, 'import mock\n'), ((259, 1, 259, 66), 'mock.patch.object', 'mock.patch.object', (), '', False, 'import mock\n'), ((252, 49, 253, 45), 'functools.partial', 'functools.partial', ({(253, 12, 253, 34): 'fake_func_has_metadata', (253, 36, 253, 44): 'testcase'}, {}), '(fake_func_has_metadata, testcase)', False, 'import functools\n'), ((254, 49, 255, 45), 'functools.partial', 'functools.partial', ({(255, 12, 255, 34): 'fake_func_get_metadata', (255, 36, 255, 44): 'testcase'}, {}), '(fake_func_get_metadata, testcase)', False, 'import functools\n'), ((129, 29, 130, 47), 'daemon._metadata.YearRange', 'metadata.YearRange', ({(130, 16, 130, 31): 'self.begin_year', (130, 33, 130, 46): 'self.end_year'}, {}), '(self.begin_year, self.end_year)', True, 'import daemon._metadata as metadata\n'), ((174, 17, 174, 73), 'daemon._metadata.make_year_range', 'metadata.make_year_range', ({(174, 42, 174, 57): 'self.begin_year', (174, 59, 174, 72): 'self.end_date'}, {}), '(self.begin_year, self.end_date)', True, 'import daemon._metadata as metadata\n'), ((212, 17, 212, 48), 'urlparse.urlparse', 'urlparse.urlparse', ({(212, 35, 212, 47): 'metadata.url'}, {}), '(metadata.url)', False, 'import urlparse\n'), ((223, 24, 223, 64), 'functools.partial', 'functools.partial', ({(223, 42, 223, 49): 'IOError', (223, 51, 223, 63): 'errno.ENOENT'}, {}), '(IOError, errno.ENOENT)', False, 'import functools\n'), ((322, 33, 322, 49), 'mock.MagicMock', 'mock.MagicMock', ({}, {}), '()', False, 'import mock\n'), ((323, 40, 324, 50), 'mock.patch.object', 'mock.patch.object', ({(324, 16, 324, 29): 'pkg_resources', (324, 31, 324, 49): '"""get_distribution"""'}, {}), "(pkg_resources, 'get_distribution')", False, 'import mock\n'), ((327, 53, 328, 49), 'functools.partial', 'functools.partial', ({(328, 16, 328, 42): 'fake_func_get_distribution', (328, 44, 328, 48): 'self'}, {}), '(fake_func_get_distribution, self)', False, 'import functools\n'), ((333, 8, 333, 64), 'daemon._metadata.get_distribution_version_info', 'metadata.get_distribution_version_info', ({}, {}), '(**self.test_args)', True, 'import daemon._metadata as metadata\n'), ((334, 8, 335, 43), 'pkg_resources.get_distribution.assert_called_with', 'pkg_resources.get_distribution.assert_called_with', ({(335, 16, 335, 42): 'expected_distribution_name'}, {}), '(expected_distribution_name)', False, 'import pkg_resources\n'), ((341, 8, 341, 64), 'daemon._metadata.get_distribution_version_info', 'metadata.get_distribution_version_info', ({}, {}), '(**self.test_args)', True, 'import daemon._metadata as metadata\n'), ((347, 23, 347, 79), 'daemon._metadata.get_distribution_version_info', 'metadata.get_distribution_version_info', ({}, {}), '(**self.test_args)', True, 'import daemon._metadata as metadata\n'), ((271, 37, 273, 22), 'json.dumps', 'json.dumps', ({(271, 48, 273, 21): "{'version': '0.0'}"}, {}), "({'version': '0.0'})", False, 'import json\n'), ((277, 37, 279, 22), 'json.dumps', 'json.dumps', ({(277, 48, 279, 21): "{'version': '1.0'}"}, {}), "({'version': '1.0'})", False, 'import json\n'), ((285, 37, 287, 22), 'json.dumps', 'json.dumps', ({(285, 48, 287, 21): "{'version': '1.0'}"}, {}), "({'version': '1.0'})", False, 'import json\n'), ((292, 42, 292, 78), 'pkg_resources.DistributionNotFound', 'pkg_resources.DistributionNotFound', ({}, {}), '()', False, 'import pkg_resources\n'), ((300, 37, 302, 22), 'json.dumps', 'json.dumps', ({(300, 48, 302, 21): "{'version': '1.0'}"}, {}), "({'version': '1.0'})", False, 'import json\n')] |
wheatdog/CDM | objectModel/Python/cdm/persistence/cdmfolder/types/purpose_reference.py | 8b6698f4a8b4f44132b12d97f9f261afcfeb798c | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
from typing import Union, List
from .purpose import *
from .trait_reference import TraitReference
from cdm.utilities import JObject
class PurposeReference(JObject):
def __init__(self):
super().__init__()
self.purposeReference = None # type: Union[str, Purpose]
self.appliedTraits = [] # type: List[Union[str, TraitReference]]
| [] |
cyberpunk317/inverted_index | text_preprocessing/normalizer.py | f49ae3ca4f0255928986c1610c5ff8ee38c5f1ff | import re
from typing import Union, List
import nltk
from bs4 import BeautifulSoup
class Normalizer:
def __init__(self):
self.lemmatizer = nltk.stem.WordNetLemmatizer()
def normalize(self, x: Union[list, str]) -> List[str]:
"""
Accepts text (possibly tokenized) and makes it suitable for machine processing
"""
x = self._remove_stop_words(x)
x = self._denoise(x)
x = self._lemmatize(x)
return x
def _remove_stop_words(self, x: Union[list, str]) -> List[str]:
"""
Removes stop words from text in english
"""
if isinstance(x, str):
x = x.split(' ')
stop_words = set(nltk.corpus.stopwords.words('english'))
return [w for w in x if not w in stop_words]
def _lemmatize(self, x: Union[list, str]) -> List[str]:
"""
Removes endings,
"""
if isinstance(x, list):
x = ' '.join(x)
x = self.lemmatizer.lemmatize(x)
return x
def _denoise(self, x: Union[list, str]) -> str:
if isinstance(x, list):
x = ' '.join(x)
def strip_html(x):
soup = BeautifulSoup(x, "html.parser")
x = soup.get_text()
return x
def remove_between_square_brackets(x):
x = re.sub('\[[^]]*\]', '', x)
x = re.sub(r'http\S+', '', x)
return x
def remove_rating(x):
return re.sub('\W\d/\d+\S*', '', x)
x = x.lower()
x = re.sub(',|\.|!|\?', '', x)
x = strip_html(x)
x = remove_between_square_brackets(x)
x = remove_rating(x)
return x | [((11, 26, 11, 55), 'nltk.stem.WordNetLemmatizer', 'nltk.stem.WordNetLemmatizer', ({}, {}), '()', False, 'import nltk\n'), ((58, 12, 58, 38), 're.sub', 're.sub', ({(58, 19, 58, 30): '""",|\\\\.|!|\\\\?"""', (58, 32, 58, 34): '""""""', (58, 36, 58, 37): 'x'}, {}), "(',|\\\\.|!|\\\\?', '', x)", False, 'import re\n'), ((28, 25, 28, 63), 'nltk.corpus.stopwords.words', 'nltk.corpus.stopwords.words', ({(28, 53, 28, 62): '"""english"""'}, {}), "('english')", False, 'import nltk\n'), ((45, 19, 45, 50), 'bs4.BeautifulSoup', 'BeautifulSoup', ({(45, 33, 45, 34): 'x', (45, 36, 45, 49): '"""html.parser"""'}, {}), "(x, 'html.parser')", False, 'from bs4 import BeautifulSoup\n'), ((50, 16, 50, 42), 're.sub', 're.sub', ({(50, 23, 50, 34): '"""\\\\[[^]]*\\\\]"""', (50, 36, 50, 38): '""""""', (50, 40, 50, 41): 'x'}, {}), "('\\\\[[^]]*\\\\]', '', x)", False, 'import re\n'), ((51, 16, 51, 41), 're.sub', 're.sub', ({(51, 23, 51, 33): '"""http\\\\S+"""', (51, 35, 51, 37): '""""""', (51, 39, 51, 40): 'x'}, {}), "('http\\\\S+', '', x)", False, 'import re\n'), ((55, 19, 55, 47), 're.sub', 're.sub', ({(55, 26, 55, 39): '"""\\\\W\\\\d/\\\\d+\\\\S*"""', (55, 41, 55, 43): '""""""', (55, 45, 55, 46): 'x'}, {}), "('\\\\W\\\\d/\\\\d+\\\\S*', '', x)", False, 'import re\n')] |
MarcoMancha/BreastCancerDetector | env/lib/python3.7/site-packages/prompt_toolkit/filters/cli.py | be0dfdcebd1ae66da6d0cf48e2525c24942ae877 | """
For backwards-compatibility. keep this file.
(Many people are going to have key bindings that rely on this file.)
"""
from __future__ import unicode_literals
from .app import *
__all__ = [
# Old names.
'HasArg',
'HasCompletions',
'HasFocus',
'HasSelection',
'HasValidationError',
'IsDone',
'IsReadOnly',
'IsMultiline',
'RendererHeightIsKnown',
'InEditingMode',
'InPasteMode',
'ViMode',
'ViNavigationMode',
'ViInsertMode',
'ViInsertMultipleMode',
'ViReplaceMode',
'ViSelectionMode',
'ViWaitingForTextObjectMode',
'ViDigraphMode',
'EmacsMode',
'EmacsInsertMode',
'EmacsSelectionMode',
'IsSearching',
'HasSearch',
'ControlIsSearchable',
]
# Keep the original classnames for backwards compatibility.
HasValidationError = lambda: has_validation_error
HasArg = lambda: has_arg
IsDone = lambda: is_done
RendererHeightIsKnown = lambda: renderer_height_is_known
ViNavigationMode = lambda: vi_navigation_mode
InPasteMode = lambda: in_paste_mode
EmacsMode = lambda: emacs_mode
EmacsInsertMode = lambda: emacs_insert_mode
ViMode = lambda: vi_mode
IsSearching = lambda: is_searching
HasSearch = lambda: is_searching
ControlIsSearchable = lambda: control_is_searchable
EmacsSelectionMode = lambda: emacs_selection_mode
ViDigraphMode = lambda: vi_digraph_mode
ViWaitingForTextObjectMode = lambda: vi_waiting_for_text_object_mode
ViSelectionMode = lambda: vi_selection_mode
ViReplaceMode = lambda: vi_replace_mode
ViInsertMultipleMode = lambda: vi_insert_multiple_mode
ViInsertMode = lambda: vi_insert_mode
HasSelection = lambda: has_selection
HasCompletions = lambda: has_completions
IsReadOnly = lambda: is_read_only
IsMultiline = lambda: is_multiline
HasFocus = has_focus # No lambda here! (Has_focus is callable that returns a callable.)
InEditingMode = in_editing_mode
| [] |
shilpasayura/bk | genetic/spaces.py | 2b0a1aa9300da80e201264bcf80226b3c5ff4ad6 | #spaces.py
'''
AlgoHack Genetic Algorithm for University Semaster Planning
Version 0.03 2018
Niranjan Meegammana Shilpasayura.org
'''
import xdb
def crt_spaces_table(cursor,drop=False):
if (drop):
sql="DROP TABLE IF EXISTS spaces;"
success, count=xdb.runSQL(cursor, sql)
sql='''CREATE TABLE IF NOT EXISTS spaces (
spid INTEGER PRIMARY KEY AUTOINCREMENT,
name varchar(30),
sptype INTEGER,
fitness INTEGER,
gid INTEGER DEFAULT 0,
semid INTEGER DEFAULT 0)
'''
success, count=xdb.runSQL(cursor, sql)
return success
def insert_spaces(cursor,nlect,nlabs,gid,semid, delay):
# nlabs is number of labs
# nlecs is number of lecture halls
# if gid =0 common for all groups else dedicated
# if semid=0 common for all semasters else dedicated
sql="SELECT * FROM spaces LIMIT 1";
success, count=xdb.runSQL(cursor, sql)
if (count > 0):
print("spaces table: Records exist")
return False, 0
sqls=""
fitness=1
for i in range (nlect):
name="Lect Hall " + str(i+1)
sptype=1
sqls=sqls +'INSERT INTO spaces (name,sptype,fitness,gid,semid) VALUES ('+ '"{}",{}, {},{},{}'.format(name, sptype,fitness,gid,semid) +');'
for i in range (nlabs):
name="Lab " + str(i+1)
sptype=2
sqls=sqls +'INSERT INTO spaces (name,sptype,fitness,gid,semid) VALUES ('+ '"{}",{}, {},{},{}'.format(name, sptype,fitness,gid,semid) +');'
success, count=xdb.runSQL_stmts(cursor, sqls,delay)
return success, count
if __name__ == "__main__":
delay=0.05
conn=xdb.opendb('genetic56.db')
cursor =conn.cursor() # create a cursor object
success=crt_spaces_table(cursor, True) # create spaces table
#dedicated lecture hall, lab for group and semaster
success, count =insert_spaces(cursor,1,1,1,1,delay) # generate records
xdb.commit(conn)
xdb.closedb(conn)
| [((23, 19, 23, 42), 'xdb.runSQL', 'xdb.runSQL', ({(23, 30, 23, 36): 'cursor', (23, 38, 23, 41): 'sql'}, {}), '(cursor, sql)', False, 'import xdb\n'), ((32, 19, 32, 42), 'xdb.runSQL', 'xdb.runSQL', ({(32, 30, 32, 36): 'cursor', (32, 38, 32, 41): 'sql'}, {}), '(cursor, sql)', False, 'import xdb\n'), ((50, 19, 50, 55), 'xdb.runSQL_stmts', 'xdb.runSQL_stmts', ({(50, 36, 50, 42): 'cursor', (50, 44, 50, 48): 'sqls', (50, 49, 50, 54): 'delay'}, {}), '(cursor, sqls, delay)', False, 'import xdb\n'), ((55, 9, 55, 35), 'xdb.opendb', 'xdb.opendb', ({(55, 20, 55, 34): '"""genetic56.db"""'}, {}), "('genetic56.db')", False, 'import xdb\n'), ((60, 4, 60, 20), 'xdb.commit', 'xdb.commit', ({(60, 15, 60, 19): 'conn'}, {}), '(conn)', False, 'import xdb\n'), ((61, 4, 61, 21), 'xdb.closedb', 'xdb.closedb', ({(61, 16, 61, 20): 'conn'}, {}), '(conn)', False, 'import xdb\n'), ((13, 23, 13, 46), 'xdb.runSQL', 'xdb.runSQL', ({(13, 34, 13, 40): 'cursor', (13, 42, 13, 45): 'sql'}, {}), '(cursor, sql)', False, 'import xdb\n')] |
hyansuper/flask-video-streaming | threaded_remote_pi_camera.py | a6ba19519b9ba5470e59e535552b3e8c448d57ae | import urllib.request
import cv2
import numpy as np
import time
import threading
class ThreadedRemotePiCamera:
def __init__(self, pi_address, resolution=(320,240), framerate=10, hflip=False, vflip=False):
if hflip and vflip:
self.flip = -1
elif hflip:
self.flip = 0
elif vflip:
self.flip = 1
else:
self.flip = None
self.stream = urllib.request.urlopen('http://%s:5000/video_feed?w=%d&h=%d&fps=%d' % ((pi_address,)+resolution+(framerate,)))
self.total_bytes = b''
self.ev = threading.Event()
self.th = threading.Thread(target=self.run, daemon=True)
self.running = True
self.frame = None
self.th.start()
def run(self):
while self.running:
self.frame = self.get_frame()
self.ev.set()
self.stream.close()
def read(self):
'''
while self.frame is None:
time.sleep(.1)
f = self.frame
self.frame = None
return f
'''
self.ev.wait()
self.ev.clear()
return self.frame
def get_frame(self):
while True:
self.total_bytes += self.stream.read(1024)
end = self.total_bytes.find(b'\xff\xd9') # JPEG end
if not end == -1:
start = self.total_bytes.find(b'\xff\xd8') # JPEG start
jpg = cv2.imdecode(np.fromstring(self.total_bytes[start: end+2], dtype=np.uint8), cv2.IMREAD_COLOR)
if self.flip is not None:
jpg = cv2.flip(jpg, self.flip)
self.total_bytes = self.total_bytes[end+2:]
return jpg
def release(self):
self.running = False
self.th.join()
def frames(self):
while True:
yield self.read()
def __iter__(self):
return self.frames()
def __enter__(self):
return self
def __exit__(self, *args):
self.release()
def __del__(self):
self.release()
| [((20, 18, 20, 35), 'threading.Event', 'threading.Event', ({}, {}), '()', False, 'import threading\n'), ((21, 18, 21, 64), 'threading.Thread', 'threading.Thread', (), '', False, 'import threading\n'), ((47, 35, 47, 96), 'numpy.fromstring', 'np.fromstring', (), '', True, 'import numpy as np\n'), ((49, 26, 49, 50), 'cv2.flip', 'cv2.flip', ({(49, 35, 49, 38): 'jpg', (49, 40, 49, 49): 'self.flip'}, {}), '(jpg, self.flip)', False, 'import cv2\n')] |
jalawala/custom-kubernetes-scheduler | scheduler/misc/Ec2SpotCustomScheduler_jan19.py | 07ccba57610048185a245257a1501f6273399d80 | #! /usr/bin/python3
import time
import random
import json
import os
from pprint import pprint
from kubernetes.client.rest import ApiException
from pint import UnitRegistry
from collections import defaultdict
from kubernetes import client, config, watch
from timeloop import Timeloop
from datetime import timedelta
config.load_kube_config()
#config.load_incluster_config()
# doing this computation within a k8s cluster
#k8s.config.load_incluster_config()
core_api = client.CoreV1Api()
apis_api = client.AppsV1Api()
#sdclient = SdcClient(<Your Sysdig API token>)
sysdig_metric = "net.http.request.time"
metrics = [{ "id": sysdig_metric, "aggregations": { "time": "timeAvg", "group": "avg" } }]
#scheduler_name = "Ec2SpotK8sScheduler"
CustomSchedulerName ='K8SCustomScheduler'
ureg = UnitRegistry()
ureg.load_definitions('kubernetes_units.txt')
pendingPodsList = []
failedPodsList = []
runningPodsList =[]
nodesListPerNodeLabel = {}
Q_ = ureg.Quantity
def scheduler(name, node, namespace):
target=client.V1ObjectReference(api_version='v1', kind="Node", name=node)
meta=client.V1ObjectMeta()
meta.name=name
body=client.V1Binding(metadata=meta, target=target)
return core_api.create_namespaced_binding(namespace, body, _preload_content=False)
#tl = Timeloop()
#@tl.job(interval=timedelta(seconds=10))
def RunEc2SpotCustomScheduler():
#global pendingPodsList
#global failedPodsList
CustomKubeSchedulingClusterDeploymentData = get_custom_deployments()
pprint("CustomKubeSchedulingClusterDeploymentData={}".format(CustomKubeSchedulingClusterDeploymentData))
for namespace, deploymentCustomSchedulingData in CustomKubeSchedulingClusterDeploymentData.items():
print("namespace={} deploymentCustomSchedulingData={}".format(namespace, deploymentCustomSchedulingData))
if deploymentCustomSchedulingData != {}:
CustomSchedulePerNamespace(namespace, deploymentCustomSchedulingData)
def CustomSchedulePerNamespace(namespace, deploymentCustomSchedulingData):
global runningPodsList
global pendingPodsList
global failedPodsList
global nodesListPerNodeLabel
print("namespace={} deploymentCustomSchedulingData={}".format(namespace, deploymentCustomSchedulingData))
#exit(0)
#namespace = 'default'
#lifecycleList = ['OnDemand', 'Ec2Spot']
for deploymentName, CustomSchedulingData in deploymentCustomSchedulingData.items():
print("deploymentName={} CustomSchedulingData={}".format(deploymentName, CustomSchedulingData))
#exit(0)
#podsList = getPodsListForDeployment(namespace, deploymentName)
runningPodsList = []
pendingPodsList = []
failedPodsList =[]
getPodsListForDeployment(namespace, deploymentName)
NumOfPodsRunning = len (runningPodsList)
NumOfPodsPending = len (pendingPodsList)
NumOfPodsFailed = len (failedPodsList)
#print("NumOfPodsRunning={} runningPodsList={}".format(NumOfPodsRunning, runningPodsList))
#print("NumOfPodsPending={} pendingPodsList={}".format(NumOfPodsPending, pendingPodsList))
#print("NumOfPodsFailed={} failedPodsList={}".format(NumOfPodsFailed, failedPodsList))
get_node_available_nodes_list(CustomSchedulingData)
for i, p in enumerate (runningPodsList):
pprint("i={} running pod_name={} node_name={}".format(i,p['node_name'], p['name']))
for i, p in enumerate (pendingPodsList):
pprint("i={} pending pod_name={} node_name={}".format(i,p['node_name'], p['name']))
for i, p in enumerate (failedPodsList):
pprint("i={} failed pod_name={} node_name={}".format(i,p['node_name'], p['name']))
#print("nodeLabel={} NumOfAlreadyRunningPods={}".format(nodeLabel, NumOfAlreadyRunningPods))
print("lifecycle={} NumOfNodes={}".format(lifecycle, len(NodesList)))
for nodeLabel, in NodesList.keys():
pprint("node_name={}".format(n))
#exit(0)
#runningPodsList = podsList['runningPodsList']
#pendingPodsList = podsList['pendingPodsList']
#failedPodsList = podsList['failedPodsList']
for nodeLabel, numOfReplicas in CustomSchedulingData.items():
print("Scheduling numOfReplicas={} on nodeLabel={}".format(numOfReplicas, nodeLabel))
#pprint(podsList)
#lifecycle = 'OnDemand'
#NodesList = get_node_available_nodes_list(lifecycle)
#pprint(NodesList)
NumOfPodsRunningAlready = 0
podsAlreadyRunningOnNodeLabelList = []
for podRunning in runningPodsList:
if podRunning['node_name'] in nodesListPerNodeLabel[nodeLabel].keys():
podsAlreadyRunningOnNodeLabelList.append(podRunning)
NumOfAlreadyRunningPods = len (podsAlreadyRunningOnNodeLabelList)
for i, p in enumerate (podsAlreadyRunningOnNodeLabelList):
pprint("running pod i={} nodeLabel={} node_name={} name={}".format(i,nodeLabel, p['node_name'], p['name']))
if NumOfAlreadyRunningPods == NumOfPodsToBeRunning:
print("NumOfAlreadyRunningPods == NumOfPodsToBeRunning = {}. So no need to Schedule".format(NumOfAlreadyRunningPods))
elif NumOfAlreadyRunningPods < NumOfPodsToBeRunning:
NumOfPodsToBeScheduled = NumOfPodsToBeRunning - NumOfAlreadyRunningPods
try:
schedulePods(NumOfPodsToBeScheduled, NodesList)
except Exception as e:
pprint(e)
elif NumOfAlreadyRunningPods > NumOfPodsToBeRunning:
NumOfPodsToDeleted = NumOfAlreadyRunningPods - NumOfPodsToBeRunning
try:
deletePods(NumOfPodsToDeleted, podsAlreadyRunningOnNodeLabelList)
except Exception as e:
pprint(e)
pendingPodsList = []
NumOfPodsFailed = []
#pprint(podsList)
#lifecycle = 'OnDemand'
#lifecycle = 'Ec2Spot'
#get_node_available_nodes_list(lifecycle)
def deletePods(NumOfPodsToDeleted, podsAlreadyRunningOnNodeLabelList):
namespace = 'default'
for i in range(0, NumOfPodsToDeleted):
pod = podsAlreadyRunningOnNodeLabelList[i]
grace_period_seconds = 30
body = client.V1DeleteOptions()
#body = {}
pprint("deletePods i={} pod={} NumOfPodsToDeleted={}".format(i, pod['name'], NumOfPodsToDeleted ))
response = core_api.delete_namespaced_pod(name=pod['name'], namespace=namespace, grace_period_seconds=grace_period_seconds, body=body)
pprint(response)
def schedulePods(NumOfPodsToBeScheduled, NodesList):
global pendingPodsList
global failedPodsList
namespace = 'default'
if NumOfPodsToBeScheduled > len(pendingPodsList):
pprint("schedulePods NumOfPodsToBeScheduled={} is greater than number of pending pods={}. So skipping schedulePods".format(NumOfPodsToBeScheduled, len(pendingPodsList)))
return
for i in range(NumOfPodsToBeScheduled):
pod = pendingPodsList[0]
print("schedulePods Trying to schedule i={} NumOfPodsToBeScheduled={} pod={} with cpu_req={} mem_req={}".format(i, NumOfPodsToBeScheduled, pod['name'], pod['cpu_req'], pod['mem_req']))
for node, stats in NodesList.items():
print("schedulePods Checking for free resources on node={} with cpu_free={} mem_free={}".format(node, stats['cpu_free'], stats['mem_free']))
#pprint(node)
if pod['cpu_req'] <= stats['cpu_free'] and pod['mem_req'] <= stats['mem_free']:
print("schedulePods scheduling pod={} onto the node={}".format(pod['name'], node))
res = scheduler(pod['name'], node, namespace)
pprint(res)
stats['cpu_free'] = stats['cpu_free'] - pod['cpu_req']
stats['mem_free'] = stats['mem_free'] - pod['mem_req']
pendingPodsList.remove(pod)
break
def getPodsListForDeployment(namespace, deploymentName):
#global pendingPodsList
#runningPodsList =[]
#failedPodsList =[]
#podsList = {}
#namespace='default'
#name='Ec2SpotK8sScheduler'
#field_selector = ("spec.scheduler_name=" + CustomSchedulerName)
field_selector = ("spec.schedulerName=" + CustomSchedulerName)
pods = core_api.list_namespaced_pod(namespace=namespace, field_selector=field_selector).to_dict()
#pods = core_api.list_namespaced_pod(namespace=namespace).to_dict()
#print("pods={}".format(pods))
for pod in pods['items']:
#pprint(pod)
#print("node_name={}".format(pod['spec']['node_name']))
#return ""
stats = {}
cpureqs,cpulmts,memreqs,memlmts = [], [], [], []
if deploymentName in pod['metadata']['name'] and pod['spec']['scheduler_name'] == CustomSchedulerName:
for container in pod['spec']['containers']:
res = container['resources']
reqs = defaultdict(lambda: 0, res['requests'] or {})
lmts = defaultdict(lambda: 0, res['limits'] or {})
cpureqs.append(Q_(reqs["cpu"]))
memreqs.append(Q_(reqs["memory"]))
cpulmts.append(Q_(lmts["cpu"]))
memlmts.append(Q_(lmts["memory"]))
stats["cpu_req"] = sum(cpureqs)
stats["cpu_lmt"] = sum(cpulmts)
stats["mem_req"] = sum(memreqs)
stats["mem_lmt"] = sum(memlmts)
stats["name"] = pod['metadata']['name']
stats["status"] = pod['status']['phase']
if stats["status"] == 'Pending':
pendingPodsList.append(stats)
elif stats["status"] == 'Running':
stats["node_name"] = pod['spec']['node_name']
runningPodsList.append(stats)
elif stats["status"] == 'Failed':
failedPodsList.append(stats)
#podsList['pendingPodsList'] = pendingPodsList
#podsList['runningPodsList'] = runningPodsList
#podsList['failedPodsList'] = failedPodsList
#pprint(podsList)
#pprint("pendingPodsList={} runningPodsList={} failedPodsList={}".format(runningPodsList, runningPodsList, failedPodsList )
#return pendingPodsList,runningPodsList,failedPodsList
#return podsList
def get_custom_deployments():
CustomKubeSchedulingClusterDeploymentData = {}
#namespaceList =[]
namespacedataList = core_api.list_namespace().to_dict()['items']
for namespaceData in namespacedataList:
namespace = namespaceData['metadata']['name']
CustomKubeSchedulingClusterDeploymentData[namespace] = get_custom_deployments_per_namespace(namespace)
#namespaceList.append(name)
print("CustomKubeSchedulingClusterDeploymentData={}".format(CustomKubeSchedulingClusterDeploymentData))
return CustomKubeSchedulingClusterDeploymentData
def get_custom_deployments_per_namespace(namespace):
#CustomKubeSchedulingDeploymentData = []
CustomKubeSchedulingDeploymentData = {}
#namespace='default'
#name = 'nginx'
name = '1'
#field_selector = ("metadata.name=" + name)
field_selector = ("metadata.annotations.OnDemandBase=" + name)
# get deployment by namespace
#resp = apis_api.list_namespaced_deployment(namespace=namespace, field_selector=field_selector)
resp = apis_api.list_namespaced_deployment(namespace=namespace)
for deployment in resp.items:
#pprint(deployment.metadata.annotations)
#pprint(deployment)
deploymentData = {}
CustomPodScheduleStrategy = {}
annotations = deployment.metadata.annotations
if 'UseCustomKubeScheduler' in annotations.keys():
if annotations['UseCustomKubeScheduler'] == 'true':
deploymentName = deployment.metadata.name
numOfReplicas = deployment.spec.replicas
#deploymentData[deploymentName] = deployment.metadata.name
Strategy = annotations['CustomPodScheduleStrategy']
#deploymentData['pod_replicas'] = deployment.spec.replicas
#deploymentData['CustomPodScheduleStrategy'] = get_pods_custom_pod_schedule_strategy(Strategy, deployment.spec.replicas)
CustomKubeSchedulingDeploymentData[deploymentName] = get_pods_custom_pod_schedule_strategy(Strategy, numOfReplicas)
#deploymentData['NumOfOnDemandPodsToBeRunning'] = int (deploymentData['OnDemandBase'] + (deploymentData['pod_replicas'] - deploymentData['OnDemandBase']) * deploymentData['OnDemandAbovePercentage'] / 100)
#deploymentData['NumOfSpotPodsToBeRunning'] = deploymentData['pod_replicas'] - deploymentData['NumOfOnDemandPodsToBeRunning']
#CustomKubeSchedulingDeploymentData.append(deploymentData)
return CustomKubeSchedulingDeploymentData
#print("OnDemandBase={}, OnDemandAbovePercentage={} SpotASGName={} OnDemandASGName={} pod_replicas={} NumOfOnDemandPods={} NumOfSpotPods={}".format(OnDemandBase, OnDemandAbovePercentage, SpotASGName, OnDemandASGName, pod_replicas, NumOfOnDemandPods, NumOfSpotPods))
def get_pods_custom_pod_schedule_strategy(Strategy, numOfReplicas):
print("Strategy={} numOfReplicas={}".format(Strategy, numOfReplicas))
CustomPodScheduleStrategy = {}
nodeLabelToReplicas = {}
nodeLabelToWights = {}
totalWeight = 0
StrategyList = Strategy.split(':')
print("StrategyList={}".format(StrategyList))
numOfBaseValues = 0
for nodeStrategy in StrategyList:
print("nodeStrategy: {}".format(nodeStrategy))
nodeStrategyPartsList = nodeStrategy.split(',')
base = 0
weight = 0
nodeLabel = ''
for nodeStrategyPart in nodeStrategyPartsList:
nodeStrategySubPartList = nodeStrategyPart.split('=')
if nodeStrategySubPartList[0] == 'base':
if numOfBaseValues != 0:
print("base value cannot be non-zero for more than node strategy")
exit(1)
else:
numOfBaseValues += 1
base = int(nodeStrategySubPartList[1])
if base <= numOfReplicas:
numOfReplicas -= base
else:
base = numOfReplicas
numOfReplicas = 0
print("base={}".format(nodeStrategySubPartList[1]))
elif nodeStrategySubPartList[0] == 'weight':
weight = int(nodeStrategySubPartList[1])
totalWeight += weight
print("weight={}".format(weight))
else:
nodeLabel = nodeStrategyPart
print("label key={} value={}".format(nodeStrategySubPartList[0], nodeStrategySubPartList[1]))
#nodeLabelToReplicas [nodeLabel] = base
nodeLabelToWights [nodeLabel] = weight
CustomPodScheduleStrategy [nodeLabel] = base
print("nodeLabelToReplicas={} nodeLabelToWights={}".format(nodeLabelToReplicas, nodeLabelToWights))
print("numOfBaseValues = {} totalWeight={} numOfReplicas={}".format(numOfBaseValues, totalWeight, numOfReplicas))
print("CustomPodScheduleStrategy = {}".format(CustomPodScheduleStrategy))
totalNumOfLables = len (CustomPodScheduleStrategy)
labelNum = 0
for key, replicas in CustomPodScheduleStrategy.items():
weight = nodeLabelToWights[key]
print("key: {} replicas={} weight={}, totalWeight={}".format(key, replicas, weight, totalWeight))
if labelNum == totalNumOfLables - 1:
weightReplicas = numOfReplicas
replicas = replicas + weightReplicas
else:
weightReplicas = int (numOfReplicas * (weight/totalWeight))
replicas = replicas + weightReplicas
labelNum += 1
numOfReplicas -= weightReplicas
print("weightReplicas: {} replicas={} labelNum={}, numOfReplicas={}".format(weightReplicas, replicas, labelNum, numOfReplicas))
CustomPodScheduleStrategy[key] = replicas
print("CustomPodScheduleStrategy = {}".format(CustomPodScheduleStrategy))
print("numOfBaseValues = {} totalWeight={} numOfReplicas={}".format(numOfBaseValues, totalWeight, numOfReplicas))
return CustomPodScheduleStrategy
__all__ = ["get_node_available_nodes_list"]
def get_node_available_nodes_list(CustomSchedulingData):
global nodesListPerNodeLabel
#data = []
#data = {}
for nodeLabel in CustomSchedulingData.keys():
nodesListPerNodeLabel[nodeLabel] = {}
nodeLabelParts = nodeLabel.split('=')
nodeLabelKey = nodeLabelParts[0]
nodeLabelValue = nodeLabelParts[1]
#selector = "metadata.labels."+nodeLabelParts[0]+"="+nodeLabelParts[1]
#selector = "metadata.labels.nodesize="+nodeLabelParts[1]
#print("selector={}".format(selector))
#name = 'ip-192-168-73-104.ec2.internal'
#selector = "metadata.name"+"="+name
#print("selector={}".format(selector))
#field_selector = (selector)
#resp = core_api.list_node(field_selector=field_selector).to_dict()['items']
#pprint("resp={}".format(resp))
#exit(0)
availableNodesData = {}
for node in core_api.list_node().to_dict()['items']:
#pprint(node)
node_labels = node['metadata']['labels']
if nodeLabelKey in node_labels.keys():
if node_labels[nodeLabelKey] == nodeLabelValue:
stats = {}
node_name = node['metadata']['name']
allocatable = node['status']['allocatable']
max_pods = int(int(allocatable["pods"]) * 1.5)
field_selector = ("status.phase!=Succeeded,status.phase!=Failed," +
"spec.nodeName=" + node_name)
stats["cpu_alloc"] = Q_(allocatable["cpu"])
stats["mem_alloc"] = Q_(allocatable["memory"])
#stats["lifecycle"] = lifecycle
pods = core_api.list_pod_for_all_namespaces(limit=max_pods,
field_selector=field_selector).to_dict()['items']
# compute the allocated resources
cpureqs,cpulmts,memreqs,memlmts = [], [], [], []
for pod in pods:
#pprint(pod)
for container in pod['spec']['containers']:
res = container['resources']
reqs = defaultdict(lambda: 0, res['requests'] or {})
lmts = defaultdict(lambda: 0, res['limits'] or {})
cpureqs.append(Q_(reqs["cpu"]))
memreqs.append(Q_(reqs["memory"]))
cpulmts.append(Q_(lmts["cpu"]))
memlmts.append(Q_(lmts["memory"]))
stats["cpu_req"] = sum(cpureqs)
stats["cpu_lmt"] = sum(cpulmts)
stats["cpu_req_per"] = (stats["cpu_req"] / stats["cpu_alloc"] * 100)
stats["cpu_lmt_per"] = (stats["cpu_lmt"] / stats["cpu_alloc"] * 100)
stats["mem_req"] = sum(memreqs)
stats["mem_lmt"] = sum(memlmts)
stats["mem_req_per"] = (stats["mem_req"] / stats["mem_alloc"] * 100)
stats["mem_lmt_per"] = (stats["mem_lmt"] / stats["mem_alloc"] * 100)
stats["cpu_free"] = stats["cpu_alloc"] - stats["cpu_req"]
stats["mem_free"] = stats["mem_alloc"] - stats["mem_req"]
#stats["name"] = node['metadata']['name']
#data.append(stats)
availableNodesData[node_name] = stats
nodesListPerNodeLabel[nodeLabel] = availableNodesData
#print(nodesListPerNodeLabel)
#for nodeLabel, availableNodesData in nodesListPerNodeLabel.items():
#print("nodeLabel={} availableNodesData={}".format(nodeLabel, availableNodesData))
#exit(0)
#pprint(data)
return data
if __name__ == '__main__':
#ready_nodes = nodes_available()
#pprint(ready_nodes)
#name='review-v1-787d8fbfbb-ltdzt'
node='ip-10-0-3-253.ec2.internal'
#namespace='ecommerce'
#ret=scheduler(name, node, namespace)
#pprint(ret)
#main()
#test()
#testpod()
#check_node_resources(node)
#RunEc2SpotCustomScheduler()
#getPodsListForDeployment(' ')
#lifecycle = 'OnDemand'
#lifecycle = 'Ec2Spot'
#get_node_available_nodes_list(lifecycle)
#RunEc2SpotCustomScheduler()
#NumOfPodsToDeleted = 1
#podsAlreadyRunningOnNodeLabelList = []
#d ={'name':'nginx-66cb875766-vx6bp'}
#podsAlreadyRunningOnNodeLabelList.append(d)
#deletePods(NumOfPodsToDeleted, podsAlreadyRunningOnNodeLabelList)
#deploymentName='nginx'
#deploymentName = 'kube-ops-view'
#getPodsListForDeployment(deploymentName)
#testlist()
#tl.start(block=True)
while True:
RunEc2SpotCustomScheduler()
time.sleep(10)
| [((17, 0, 17, 25), 'kubernetes.config.load_kube_config', 'config.load_kube_config', ({}, {}), '()', False, 'from kubernetes import client, config, watch\n'), ((21, 11, 21, 29), 'kubernetes.client.CoreV1Api', 'client.CoreV1Api', ({}, {}), '()', False, 'from kubernetes import client, config, watch\n'), ((22, 11, 22, 29), 'kubernetes.client.AppsV1Api', 'client.AppsV1Api', ({}, {}), '()', False, 'from kubernetes import client, config, watch\n'), ((30, 7, 30, 21), 'pint.UnitRegistry', 'UnitRegistry', ({}, {}), '()', False, 'from pint import UnitRegistry\n'), ((44, 11, 44, 77), 'kubernetes.client.V1ObjectReference', 'client.V1ObjectReference', (), '', False, 'from kubernetes import client, config, watch\n'), ((45, 9, 45, 30), 'kubernetes.client.V1ObjectMeta', 'client.V1ObjectMeta', ({}, {}), '()', False, 'from kubernetes import client, config, watch\n'), ((47, 9, 47, 56), 'kubernetes.client.V1Binding', 'client.V1Binding', (), '', False, 'from kubernetes import client, config, watch\n'), ((180, 15, 180, 39), 'kubernetes.client.V1DeleteOptions', 'client.V1DeleteOptions', ({}, {}), '()', False, 'from kubernetes import client, config, watch\n'), ((184, 8, 184, 24), 'pprint.pprint', 'pprint', ({(184, 15, 184, 23): 'response'}, {}), '(response)', False, 'from pprint import pprint\n'), ((529, 8, 529, 22), 'time.sleep', 'time.sleep', ({(529, 19, 529, 21): '(10)'}, {}), '(10)', False, 'import time\n'), ((208, 16, 208, 27), 'pprint.pprint', 'pprint', ({(208, 23, 208, 26): 'res'}, {}), '(res)', False, 'from pprint import pprint\n'), ((243, 23, 243, 68), 'collections.defaultdict', 'defaultdict', ({(243, 35, 243, 44): 'lambda : 0', (243, 46, 243, 67): "res['requests'] or {}"}, {}), "(lambda : 0, res['requests'] or {})", False, 'from collections import defaultdict\n'), ((244, 23, 244, 66), 'collections.defaultdict', 'defaultdict', ({(244, 35, 244, 44): 'lambda : 0', (244, 46, 244, 65): "res['limits'] or {}"}, {}), "(lambda : 0, res['limits'] or {})", False, 'from collections import defaultdict\n'), ((159, 20, 159, 29), 'pprint.pprint', 'pprint', ({(159, 27, 159, 28): 'e'}, {}), '(e)', False, 'from pprint import pprint\n'), ((464, 35, 464, 80), 'collections.defaultdict', 'defaultdict', ({(464, 47, 464, 56): 'lambda : 0', (464, 58, 464, 79): "res['requests'] or {}"}, {}), "(lambda : 0, res['requests'] or {})", False, 'from collections import defaultdict\n'), ((465, 35, 465, 78), 'collections.defaultdict', 'defaultdict', ({(465, 47, 465, 56): 'lambda : 0', (465, 58, 465, 77): "res['limits'] or {}"}, {}), "(lambda : 0, res['limits'] or {})", False, 'from collections import defaultdict\n'), ((165, 20, 165, 29), 'pprint.pprint', 'pprint', ({(165, 27, 165, 28): 'e'}, {}), '(e)', False, 'from pprint import pprint\n')] |
DewiBrynJones/docker-deepspeech-cy | local/utils/validate_label_locale.py | 99159a746651bd848a8309da7f676045913f3d25 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from clean_transcript import clean_transcript
ALPHABET_FILE_PATH = "/DeepSpeech/bin/bangor_welsh/alphabet.txt"
def validate_label(label):
clean = clean_transcript(ALPHABET_FILE_PATH)
cleaned, transcript = clean.clean(label)
if cleaned:
return transcript.lower()
return None
| [((10, 12, 10, 48), 'clean_transcript.clean_transcript', 'clean_transcript', ({(10, 29, 10, 47): 'ALPHABET_FILE_PATH'}, {}), '(ALPHABET_FILE_PATH)', False, 'from clean_transcript import clean_transcript\n')] |
dumpmemory/state-spaces | src/models/nn/adaptive_softmax.py | 2a85503cb3e9e86cc05753950d4a249df9a0fffb | # Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List, Optional
import functools
import torch
import torch.nn as nn
import torch.nn.functional as F
class OptionalParameterList(nn.ParameterList):
def extra_repr(self):
child_lines = []
for k, p in self._parameters.items():
if p is not None:
size_str = 'x'.join(str(size) for size in p.size())
device_str = '' if not p.is_cuda else ' (GPU {})'.format(p.get_device())
parastr = 'Parameter containing: [{} of size {}{}]'.format(
torch.typename(p), size_str, device_str)
child_lines.append(' (' + str(k) + '): ' + parastr)
tmpstr = '\n'.join(child_lines)
return tmpstr
class ProjectedAdaptiveLogSoftmax(nn.Module):
def __init__(self, n_token, d_embed, d_proj, cutoffs, div_val=1,
tie_projs=None, out_layers_weights=None, out_projs=None,
keep_order=False,
bias_scale=0.0,
dropout=0.0,
):
super().__init__()
self.n_token = n_token
self.d_embed = d_embed
self.d_proj = d_proj
self.cutoffs = list(cutoffs) + [n_token]
self.cutoff_ends = [0] + self.cutoffs
self.div_val = div_val
self.shortlist_size = self.cutoffs[0]
self.n_clusters = len(self.cutoffs) - 1
self.head_size = self.shortlist_size + self.n_clusters
# [21-09-15 AG]: bake the first False into the definition, just as [0] is built into the cutoffs
if tie_projs is None: tie_projs = []
elif isinstance(tie_projs, bool): tie_projs = [tie_projs] * len(cutoffs)
else: tie_projs = list(tie_projs)
tie_projs = [False] + tie_projs
self.tie_projs = tie_projs
if self.n_clusters > 0:
self.cluster_weight = nn.Parameter(torch.zeros(self.n_clusters, self.d_embed))
self.cluster_bias = nn.Parameter(torch.zeros(self.n_clusters))
if not out_layers_weights:
self.out_layers_weights = nn.ParameterList()
else:
self.out_layers_weights = out_layers_weights
self.out_layers_biases = nn.ParameterList()
self.shared_out_projs = out_projs
self.out_projs = OptionalParameterList()
self.dropout = dropout
self.drop = nn.Dropout(dropout)
if div_val == 1:
if d_proj != d_embed:
for i in range(len(self.cutoffs)):
if tie_projs[i]:
self.out_projs.append(None)
else:
self.out_projs.append(
nn.Parameter(torch.zeros(d_proj, d_embed))
)
else:
# self.out_projs = [None] * len(self.cutoffs)
self.out_projs.append(None)
self.out_layers_biases.append(
nn.Parameter(torch.zeros(n_token))
)
if not out_layers_weights:
self.out_layers_weights.append(
nn.Parameter(torch.zeros(n_token, d_embed))
)
else:
for i in range(len(self.cutoffs)):
l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i+1]
d_emb_i = d_embed // (div_val ** i)
if tie_projs[i]:
self.out_projs.append(None)
else:
self.out_projs.append(
nn.Parameter(torch.zeros(d_proj, d_emb_i))
)
self.out_layers_biases.append(
nn.Parameter(torch.zeros(r_idx - l_idx))
)
if not out_layers_weights:
self.out_layers_weights.append(
nn.Parameter(torch.zeros(r_idx - l_idx, d_emb_i))
)
for bias in self.out_layers_biases:
bound = bias_scale * d_proj ** -.5
nn.init.uniform_(bias, -bound, bound)
self.keep_order = keep_order
def _compute_logit(self, hidden, weight, bias, proj):
if proj is None:
logit = F.linear(hidden, weight, bias=bias)
else:
if self.dropout > 0.0:
logit = hidden @ proj
logit = self.drop(logit)
logit = logit @ weight.t()
else:
logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
if bias is not None:
logit = logit + bias
return logit
def get_out_proj(self, i):
if self.tie_projs[i]:
if len(self.shared_out_projs) == 0:
return None
elif len(self.shared_out_projs) == 1:
return self.shared_out_projs[0]
else:
return self.shared_out_projs[i]
else:
return self.out_projs[i]
def forward(self, hidden, target, keep_order=False, key_padding_mask=None, *args, **kwargs):
# [21-09-15 AG]: TODO may need to handle key_padding_mask
'''
hidden :: [len*bsz x d_proj]
target :: [len*bsz]
'''
hidden = hidden.reshape(-1, hidden.size(-1))
target = target.reshape(-1)
if hidden.size(0) != target.size(0):
print(hidden.shape, target.shape)
raise RuntimeError('Input and target should have the same size '
'in the batch dimension.')
if self.n_clusters == 0:
logit = self._compute_logit(hidden, self.out_layers_weights[0],
self.out_layers_biases[0], self.get_out_proj(0))
nll = -F.log_softmax(logit, dim=-1) \
.gather(1, target.unsqueeze(1)).squeeze(1)
else:
# construct weights and biases
weights, biases = [], []
for i in range(len(self.cutoffs)):
if self.div_val == 1:
l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1]
weight_i = self.out_layers_weights[0][l_idx:r_idx]
bias_i = self.out_layers_biases[0][l_idx:r_idx]
else:
weight_i = self.out_layers_weights[i]
bias_i = self.out_layers_biases[i]
if i == 0:
weight_i = torch.cat(
[weight_i, self.cluster_weight], dim=0)
bias_i = torch.cat(
[bias_i, self.cluster_bias], dim=0)
weights.append(weight_i)
biases.append(bias_i)
head_weight, head_bias, head_proj = weights[0], biases[0], self.get_out_proj(0)
head_logit = self._compute_logit(hidden, head_weight, head_bias, head_proj)
head_logprob = F.log_softmax(head_logit, dim=1)
nll = torch.zeros_like(target, dtype=hidden.dtype, device=hidden.device)
offset = 0
cutoff_values = [0] + self.cutoffs
for i in range(len(cutoff_values) - 1):
l_idx, r_idx = cutoff_values[i], cutoff_values[i + 1]
mask_i = (target >= l_idx) & (target < r_idx)
indices_i = mask_i.nonzero(as_tuple=False).squeeze()
if indices_i.numel() == 0:
continue
target_i = target.index_select(0, indices_i) - l_idx
head_logprob_i = head_logprob.index_select(0, indices_i)
if i == 0:
logprob_i = head_logprob_i.gather(1, target_i[:, None]).squeeze(1)
else:
weight_i, bias_i, proj_i = weights[i], biases[i], self.get_out_proj(i)
hidden_i = hidden.index_select(0, indices_i)
tail_logit_i = self._compute_logit(hidden_i, weight_i, bias_i, proj_i)
tail_logprob_i = F.log_softmax(tail_logit_i, dim=1)
logprob_i = head_logprob_i[:, -i] \
+ tail_logprob_i.gather(1, target_i[:, None]).squeeze(1)
if self.keep_order or keep_order:
nll.index_copy_(0, indices_i, -logprob_i)
else:
nll[offset:offset+logprob_i.size(0)].copy_(-logprob_i)
offset += logprob_i.size(0)
return nll.mean() # TODO maybe cases for length or padding_mask
class AdaptiveEmbedding(nn.Module):
""" Copy of transformers.AdaptiveEmbedding that works with fp16 by replacing the index_put_ operation
Initialization has been fixed for the case when d_proj = d_embed
"""
def __init__(self, n_token, d_embed, d_proj, cutoffs : List[int], div_val=1, init_scale=1.0, sample_softmax=False, dropout=0.0):
super().__init__()
self.n_token = n_token
self.d_embed = d_embed
self.cutoffs = list(cutoffs) + [n_token]
self.div_val = div_val
self.d_proj = d_proj
self.drop = nn.Dropout(dropout) if dropout > 0.0 else nn.Identity()
self.emb_scale = d_proj ** 0.5
self.cutoff_ends = [0] + self.cutoffs
self.emb_layers = nn.ModuleList()
self.emb_projs = nn.ParameterList()
if div_val == 1:
self.emb_layers.append(nn.Embedding(n_token, d_embed, sparse=sample_softmax > 0))
_init_embed(self.emb_layers[-1].weight, d_embed, init_scale)
# torch.nn.init.normal_(self.emb_layers[-1].weight, mean=0, std=init_scale * d_embed ** -.5)
if d_proj != d_embed: # TODO
# self.emb_projs.append(nn.Parameter(torch.FloatTensor(d_proj, d_embed)))
self.emb_projs.append(nn.Parameter(torch.FloatTensor(d_proj, d_embed)))
# torch.nn.init.normal_(self.emb_projs[-1], mean=0, std=init_scale * 1./self.emb_scale)
_init_proj(self.emb_projs[-1], d_proj, init_scale)
else:
for i in range(len(self.cutoffs)):
l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1]
d_emb_i = d_embed // (div_val ** i)
self.emb_layers.append(nn.Embedding(r_idx - l_idx, d_emb_i))
# torch.nn.init.normal_(self.emb_layers[-1].weight, mean=0, std=init_scale * d_emb_i ** -.5)
_init_embed(self.emb_layers[-1].weight, d_emb_i, init_scale)
self.emb_projs.append(nn.Parameter(torch.FloatTensor(d_proj, d_emb_i)))
# torch.nn.init.normal_(self.emb_projs[-1], mean=0, std=init_scale * 1./self.emb_scale)
_init_proj(self.emb_projs[-1], d_proj, init_scale)
def forward(self, inp, *args, **kwargs):
if self.div_val == 1:
embed = self.emb_layers[0](inp)
embed = self.drop(embed)
if self.d_proj != self.d_embed:
embed = F.linear(embed, self.emb_projs[0])
else:
param = next(self.parameters())
inp_flat = inp.view(-1)
# Changes
# emb_flat = torch.zeros([inp_flat.size(0), self.d_proj], dtype=param.dtype, device=param.device)
embeddings = []
indices = torch.zeros_like(inp_flat) # empty should work as long as cutoffs[-1] > max token
_total_tokens = 0
# emb_flat = inp.new_zeros(inp_flat.size(0), self.d_proj)
for i in range(len(self.cutoffs)):
l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1]
mask_i = (inp_flat >= l_idx) & (inp_flat < r_idx)
indices_i = mask_i.nonzero().squeeze(-1) # shape (_tokens,)
_tokens = indices_i.numel()
if _tokens == 0:
continue
inp_i = inp_flat.index_select(0, indices_i) - l_idx
emb_i = self.emb_layers[i](inp_i)
emb_i = self.drop(emb_i)
emb_i = F.linear(emb_i, self.emb_projs[i])
# Changes
embeddings.append(emb_i)
indices.index_put_(
(indices_i,),
torch.arange(_tokens, device=inp.device) + _total_tokens
)
_total_tokens += _tokens
# emb_flat.index_copy_(0, indices_i, emb_i)
embeddings = torch.cat(embeddings, dim=0)
emb_flat = embeddings[indices]
embed_shape = inp.size() + (self.d_proj,)
embed = emb_flat.view(embed_shape)
embed.mul_(self.emb_scale)
# embed.div_(self.emb_scale)
return embed
def _init_weight(weight, d : int, init_scale : Optional[float], default=None):
assert init_scale or default
if init_scale is None:
std = default
else:
std = init_scale * (d ** -0.5)
nn.init.normal_(weight, mean=0, std=std)
_init_embed = functools.partial(_init_weight, default=0.02)
_init_proj = functools.partial(_init_weight, default=0.01)
### Just for this codebase, we need to squeeze the last dimension because inputs are always given as (B, L, D) instead of (B, L)
import src.models.nn.utils as U
# AdaptiveEmbedding = U.Squeeze(AdaptiveEmbedding)
| [((341, 14, 341, 59), 'functools.partial', 'functools.partial', (), '', False, 'import functools\n'), ((342, 13, 342, 58), 'functools.partial', 'functools.partial', (), '', False, 'import functools\n'), ((339, 4, 339, 44), 'torch.nn.init.normal_', 'nn.init.normal_', (), '', True, 'import torch.nn as nn\n'), ((74, 33, 74, 51), 'torch.nn.ParameterList', 'nn.ParameterList', ({}, {}), '()', True, 'import torch.nn as nn\n'), ((80, 20, 80, 39), 'torch.nn.Dropout', 'nn.Dropout', ({(80, 31, 80, 38): 'dropout'}, {}), '(dropout)', True, 'import torch.nn as nn\n'), ((258, 26, 258, 41), 'torch.nn.ModuleList', 'nn.ModuleList', ({}, {}), '()', True, 'import torch.nn as nn\n'), ((259, 25, 259, 43), 'torch.nn.ParameterList', 'nn.ParameterList', ({}, {}), '()', True, 'import torch.nn as nn\n'), ((70, 38, 70, 56), 'torch.nn.ParameterList', 'nn.ParameterList', ({}, {}), '()', True, 'import torch.nn as nn\n'), ((124, 12, 124, 49), 'torch.nn.init.uniform_', 'nn.init.uniform_', ({(124, 29, 124, 33): 'bias', (124, 35, 124, 41): '(-bound)', (124, 43, 124, 48): 'bound'}, {}), '(bias, -bound, bound)', True, 'import torch.nn as nn\n'), ((131, 20, 131, 55), 'torch.nn.functional.linear', 'F.linear', (), '', True, 'import torch.nn.functional as F\n'), ((197, 27, 197, 59), 'torch.nn.functional.log_softmax', 'F.log_softmax', (), '', True, 'import torch.nn.functional as F\n'), ((199, 18, 199, 84), 'torch.zeros_like', 'torch.zeros_like', (), '', False, 'import torch\n'), ((252, 20, 252, 39), 'torch.nn.Dropout', 'nn.Dropout', ({(252, 31, 252, 38): 'dropout'}, {}), '(dropout)', True, 'import torch.nn as nn\n'), ((252, 62, 252, 75), 'torch.nn.Identity', 'nn.Identity', ({}, {}), '()', True, 'import torch.nn as nn\n'), ((293, 22, 293, 48), 'torch.zeros_like', 'torch.zeros_like', ({(293, 39, 293, 47): 'inp_flat'}, {}), '(inp_flat)', False, 'import torch\n'), ((321, 25, 321, 53), 'torch.cat', 'torch.cat', (), '', False, 'import torch\n'), ((66, 47, 66, 89), 'torch.zeros', 'torch.zeros', ({(66, 59, 66, 74): 'self.n_clusters', (66, 76, 66, 88): 'self.d_embed'}, {}), '(self.n_clusters, self.d_embed)', False, 'import torch\n'), ((67, 45, 67, 73), 'torch.zeros', 'torch.zeros', ({(67, 57, 67, 72): 'self.n_clusters'}, {}), '(self.n_clusters)', False, 'import torch\n'), ((261, 35, 261, 92), 'torch.nn.Embedding', 'nn.Embedding', (), '', True, 'import torch.nn as nn\n'), ((285, 24, 285, 58), 'torch.nn.functional.linear', 'F.linear', ({(285, 33, 285, 38): 'embed', (285, 40, 285, 57): 'self.emb_projs[0]'}, {}), '(embed, self.emb_projs[0])', True, 'import torch.nn.functional as F\n'), ((310, 24, 310, 58), 'torch.nn.functional.linear', 'F.linear', ({(310, 33, 310, 38): 'emb_i', (310, 40, 310, 57): 'self.emb_projs[i]'}, {}), '(emb_i, self.emb_projs[i])', True, 'import torch.nn.functional as F\n'), ((31, 20, 31, 37), 'torch.typename', 'torch.typename', ({(31, 35, 31, 36): 'p'}, {}), '(p)', False, 'import torch\n'), ((96, 29, 96, 49), 'torch.zeros', 'torch.zeros', ({(96, 41, 96, 48): 'n_token'}, {}), '(n_token)', False, 'import torch\n'), ((186, 31, 187, 63), 'torch.cat', 'torch.cat', (), '', False, 'import torch\n'), ((188, 29, 189, 59), 'torch.cat', 'torch.cat', (), '', False, 'import torch\n'), ((223, 37, 223, 71), 'torch.nn.functional.log_softmax', 'F.log_softmax', (), '', True, 'import torch.nn.functional as F\n'), ((273, 39, 273, 75), 'torch.nn.Embedding', 'nn.Embedding', ({(273, 52, 273, 65): '(r_idx - l_idx)', (273, 67, 273, 74): 'd_emb_i'}, {}), '(r_idx - l_idx, d_emb_i)', True, 'import torch.nn as nn\n'), ((101, 33, 101, 62), 'torch.zeros', 'torch.zeros', ({(101, 45, 101, 52): 'n_token', (101, 54, 101, 61): 'd_embed'}, {}), '(n_token, d_embed)', False, 'import torch\n'), ((116, 33, 116, 59), 'torch.zeros', 'torch.zeros', ({(116, 45, 116, 58): '(r_idx - l_idx)'}, {}), '(r_idx - l_idx)', False, 'import torch\n'), ((266, 51, 266, 85), 'torch.FloatTensor', 'torch.FloatTensor', ({(266, 69, 266, 75): 'd_proj', (266, 77, 266, 84): 'd_embed'}, {}), '(d_proj, d_embed)', False, 'import torch\n'), ((276, 51, 276, 85), 'torch.FloatTensor', 'torch.FloatTensor', ({(276, 69, 276, 75): 'd_proj', (276, 77, 276, 84): 'd_emb_i'}, {}), '(d_proj, d_emb_i)', False, 'import torch\n'), ((316, 20, 316, 60), 'torch.arange', 'torch.arange', (), '', False, 'import torch\n'), ((112, 37, 112, 65), 'torch.zeros', 'torch.zeros', ({(112, 49, 112, 55): 'd_proj', (112, 57, 112, 64): 'd_emb_i'}, {}), '(d_proj, d_emb_i)', False, 'import torch\n'), ((120, 37, 120, 72), 'torch.zeros', 'torch.zeros', ({(120, 49, 120, 62): '(r_idx - l_idx)', (120, 64, 120, 71): 'd_emb_i'}, {}), '(r_idx - l_idx, d_emb_i)', False, 'import torch\n'), ((89, 41, 89, 69), 'torch.zeros', 'torch.zeros', ({(89, 53, 89, 59): 'd_proj', (89, 61, 89, 68): 'd_embed'}, {}), '(d_proj, d_embed)', False, 'import torch\n'), ((171, 19, 171, 47), 'torch.nn.functional.log_softmax', 'F.log_softmax', (), '', True, 'import torch.nn.functional as F\n')] |
CityOfPhiladelphia/the-el | the_el/cli.py | e3a97afc55d41f2e5fd76cef60ad9393dfa23547 | import json
import csv
import sys
import os
import re
import codecs
import logging
from logging.config import dictConfig
import click
import yaml
from sqlalchemy import create_engine
from jsontableschema_sql import Storage
from smart_open import smart_open
from . import postgres
from . import carto
csv.field_size_limit(sys.maxsize)
def get_logger(logging_config):
try:
with open(logging_config) as file:
config = yaml.load(file)
dictConfig(config)
except:
FORMAT = '[%(asctime)-15s] %(levelname)s [%(name)s] %(message)s'
logging.basicConfig(format=FORMAT, level=logging.INFO, stream=sys.stderr)
logger = logging.getLogger('the_el')
def exception_handler(type, value, tb):
logger.exception("Uncaught exception: {}".format(str(value)), exc_info=(type, value, tb))
sys.excepthook = exception_handler
return logger
@click.group()
def main():
pass
def get_connection_string(connection_string):
connection_string = os.getenv('CONNECTION_STRING', connection_string)
if connection_string == None:
raise Exception('`CONNECTION_STRING` environment variable or `--connection-string` option required')
return connection_string
def create_storage_adaptor(connection_string, db_schema, geometry_support, from_srid=None, to_srid=None):
engine = create_engine(connection_string)
storage = Storage(engine, dbschema=db_schema, geometry_support=geometry_support, from_srid=from_srid, to_srid=to_srid, views=True)
return engine, storage
def fopen(file, mode='r'):
if file == None:
if mode == 'r':
return sys.stdin
elif mode == 'w':
return sys.stdout
else:
return smart_open(file, mode=mode)
def get_table_schema(table_schema_path):
with fopen(table_schema_path) as file:
contents = file.read()
if not isinstance(contents, str):
contents = contents.decode('utf-8')
return json.loads(contents)
@main.command()
@click.argument('table_name')
@click.option('--connection-string')
@click.option('-o','--output-file')
@click.option('--db-schema')
@click.option('--geometry-support')
def describe_table(table_name, connection_string, output_file, db_schema, geometry_support):
connection_string = get_connection_string(connection_string)
engine, storage = create_storage_adaptor(connection_string, db_schema, geometry_support)
descriptor = storage.describe(table_name)
with fopen(output_file, mode='w') as file:
json.dump(descriptor, file)
@main.command()
@click.argument('table_name')
@click.argument('table_schema_path')
@click.option('--connection-string')
@click.option('--db-schema')
@click.option('--indexes-fields')
@click.option('--geometry-support')
@click.option('--if-not-exists', is_flag=True, default=False)
@click.option('--logging-config', default='logging_config.conf')
def create_table(table_name,
table_schema_path,
connection_string,
db_schema,
indexes_fields,
geometry_support,
if_not_exists,
logging_config):
logger = get_logger(logging_config)
table_schema = get_table_schema(table_schema_path)
if indexes_fields != None:
indexes_fields = indexes_fields.split(',')
if re.match(carto.carto_connection_string_regex, connection_string) != None:
load_postgis = geometry_support == 'postgis'
logger.info('{} - Creating table using Carto'.format(table_name))
return carto.create_table(logger, table_name, load_postgis, table_schema, if_not_exists, indexes_fields, connection_string)
connection_string = get_connection_string(connection_string)
engine, storage = create_storage_adaptor(connection_string, db_schema, geometry_support)
logger.info('{} - Creating table using SQLAlchemy'.format(table_name))
storage.create(table_name, table_schema, indexes_fields=indexes_fields)
@main.command()
@click.argument('table_name')
@click.option('--table-schema-path')
@click.option('--connection-string')
@click.option('-f','--input-file')
@click.option('--db-schema')
@click.option('--geometry-support')
@click.option('--from-srid')
@click.option('--skip-headers', is_flag=True)
@click.option('--indexes-fields')
@click.option('--upsert', is_flag=True)
@click.option('--truncate/--no-truncate', is_flag=True, default=False)
@click.option('--logging-config', default='logging_config.conf')
def write(table_name,
table_schema_path,
connection_string,
input_file,
db_schema,
geometry_support,
from_srid,
skip_headers,
indexes_fields,
upsert,
truncate,
logging_config):
logger = get_logger(logging_config)
table_schema = get_table_schema(table_schema_path)
## TODO: csv settings? use Frictionless Data csv standard?
## TODO: support line delimted json?
with fopen(input_file) as file:
rows = csv.reader(file)
if skip_headers:
next(rows)
if re.match(carto.carto_connection_string_regex, connection_string) != None:
load_postgis = geometry_support == 'postgis'
if indexes_fields != None:
indexes_fields = indexes_fields.split(',')
logger.info('{} - Writing to table using Carto'.format(table_name))
carto.load(logger,
db_schema,
table_name,
load_postgis,
table_schema,
connection_string,
rows,
indexes_fields,
truncate)
else:
connection_string = get_connection_string(connection_string)
engine, storage = create_storage_adaptor(connection_string, db_schema, geometry_support, from_srid=from_srid)
## TODO: truncate? carto does. Makes this idempotent
logger.info('{} - Writing to table using SQLAlchemy'.format(table_name))
if table_schema_path != None:
table_schema = get_table_schema(table_schema_path)
storage.describe(table_name, descriptor=table_schema)
else:
storage.describe(table_name)
if upsert:
postgres.upsert(engine, db_schema, table_name, table_schema, rows)
elif geometry_support == None and engine.dialect.driver == 'psycopg2':
postgres.copy_from(engine, table_name, table_schema, rows)
else:
storage.write(table_name, rows)
@main.command()
@click.argument('table_name')
@click.option('--connection-string')
@click.option('-o','--output-file')
@click.option('--db-schema')
@click.option('--geometry-support')
@click.option('--from-srid')
@click.option('--to-srid')
@click.option('--logging-config', default='logging_config.conf')
def read(table_name, connection_string, output_file, db_schema, geometry_support, from_srid, to_srid, logging_config):
logger = get_logger(logging_config)
connection_string = get_connection_string(connection_string)
engine, storage = create_storage_adaptor(connection_string, db_schema, geometry_support, from_srid=from_srid, to_srid=to_srid)
## TODO: csv settings? use Frictionless Data csv standard?
## TODO: support line delimited json?
with fopen(output_file, mode='w') as file:
writer = csv.writer(file)
descriptor = storage.describe(table_name)
fields = map(lambda x: x['name'], descriptor['fields'])
writer.writerow(fields)
if geometry_support == None and engine.dialect.driver == 'psycopg2':
postgres.copy_to(engine, table_name, file)
else:
for row in storage.iter(table_name):
row_out = []
for field in row:
if isinstance(field, dict) or isinstance(field, list):
field = json.dumps(field)
row_out.append(field)
writer.writerow(row_out)
@main.command()
@click.argument('new_table_name')
@click.argument('old_table_name')
@click.option('--connection-string')
@click.option('--db-schema')
@click.option('--select-users', help='Users to grant SELECT on updated table')
@click.option('--logging-config', default='logging_config.conf')
def swap_table(new_table_name, old_table_name, connection_string, db_schema, select_users, logging_config):
logger = get_logger(logging_config)
if re.match(carto.carto_connection_string_regex, connection_string) != None:
if select_users != None:
select_users = select_users.split(',')
else:
select_users = []
logger.info('Swapping tables using Carto: {} - {}'.format(new_table_name, old_table_name))
return carto.swap_table(logger, db_schema, new_table_name, old_table_name, select_users, connection_string)
connection_string = get_connection_string(connection_string)
engine = create_engine(connection_string)
if engine.dialect.driver == 'psycopg2':
logger.info('Swapping tables using psycopg2: {} - {}'.format(new_table_name, old_table_name))
conn = engine.raw_connection()
try:
with conn.cursor() as cur:
sql = 'ALTER TABLE "{}" RENAME TO "{}_old";'.format(old_table_name, old_table_name) +\
'ALTER TABLE "{}" RENAME TO "{}";'.format(new_table_name, old_table_name) +\
'DROP TABLE "{}_old";'.format(old_table_name)
cur.execute(sql)
conn.commit()
except:
conn.rollback()
raise
conn.close()
elif engine.dialect.driver == 'cx_oracle':
logger.info('Swapping tables using cx_Oracle: {} - {}'.format(new_table_name, old_table_name))
conn = engine.connect()
if select_users != None:
select_users = select_users.split(',')
else:
select_users = []
grants_sql = []
for user in select_users:
grants_sql.append('GRANT SELECT ON {} TO {}'.format(old_table_name, user.strip()))
# Oracle does not allow table modification within a transaction, so make individual transactions:
sql1 = 'ALTER TABLE {} RENAME TO {}_old'.format(old_table_name, old_table_name)
sql2 = 'ALTER TABLE {} RENAME TO {}'.format(new_table_name, old_table_name)
sql3 = 'DROP TABLE {}_old'.format(old_table_name)
try:
conn.execute(sql1)
except:
logger.error("Could not rename {} table. Does it exist?".format(old_table_name))
raise
try:
conn.execute(sql2)
except:
logger.error("Could not rename {} table. Does it exist?".format(new_table_name))
rb_sql = 'ALTER TABLE {}_old RENAME TO {}'.format(old_table_name, old_table_name)
conn.execute(rb_sql)
raise
try:
conn.execute(sql3)
except:
logger.error("Could not drop {}_old table. Do you have permission?".format(old_table_name))
rb_sql1 = 'DROP TABLE {}'.format(old_table_name)
conn.execute(rb_sql1)
rb_sql2 = 'ALTER TABLE {}_old RENAME TO {}'.format(old_table_name, old_table_name)
conn.execute(rb_sql2)
raise
try:
for sql in grants_sql:
conn.execute(sql)
except:
logger.error("Could not grant all permissions to {}.".format(old_table_name))
raise
else:
raise Exception('`{}` not supported by swap_table'.format(engine.dialect.driver))
| [((19, 0, 19, 33), 'csv.field_size_limit', 'csv.field_size_limit', ({(19, 21, 19, 32): 'sys.maxsize'}, {}), '(sys.maxsize)', False, 'import csv\n'), ((39, 1, 39, 14), 'click.group', 'click.group', ({}, {}), '()', False, 'import click\n'), ((71, 1, 71, 29), 'click.argument', 'click.argument', ({(71, 16, 71, 28): '"""table_name"""'}, {}), "('table_name')", False, 'import click\n'), ((72, 1, 72, 36), 'click.option', 'click.option', ({(72, 14, 72, 35): '"""--connection-string"""'}, {}), "('--connection-string')", False, 'import click\n'), ((73, 1, 73, 35), 'click.option', 'click.option', ({(73, 14, 73, 18): '"""-o"""', (73, 19, 73, 34): '"""--output-file"""'}, {}), "('-o', '--output-file')", False, 'import click\n'), ((74, 1, 74, 28), 'click.option', 'click.option', ({(74, 14, 74, 27): '"""--db-schema"""'}, {}), "('--db-schema')", False, 'import click\n'), ((75, 1, 75, 35), 'click.option', 'click.option', ({(75, 14, 75, 34): '"""--geometry-support"""'}, {}), "('--geometry-support')", False, 'import click\n'), ((86, 1, 86, 29), 'click.argument', 'click.argument', ({(86, 16, 86, 28): '"""table_name"""'}, {}), "('table_name')", False, 'import click\n'), ((87, 1, 87, 36), 'click.argument', 'click.argument', ({(87, 16, 87, 35): '"""table_schema_path"""'}, {}), "('table_schema_path')", False, 'import click\n'), ((88, 1, 88, 36), 'click.option', 'click.option', ({(88, 14, 88, 35): '"""--connection-string"""'}, {}), "('--connection-string')", False, 'import click\n'), ((89, 1, 89, 28), 'click.option', 'click.option', ({(89, 14, 89, 27): '"""--db-schema"""'}, {}), "('--db-schema')", False, 'import click\n'), ((90, 1, 90, 33), 'click.option', 'click.option', ({(90, 14, 90, 32): '"""--indexes-fields"""'}, {}), "('--indexes-fields')", False, 'import click\n'), ((91, 1, 91, 35), 'click.option', 'click.option', ({(91, 14, 91, 34): '"""--geometry-support"""'}, {}), "('--geometry-support')", False, 'import click\n'), ((92, 1, 92, 61), 'click.option', 'click.option', (), '', False, 'import click\n'), ((93, 1, 93, 64), 'click.option', 'click.option', (), '', False, 'import click\n'), ((122, 1, 122, 29), 'click.argument', 'click.argument', ({(122, 16, 122, 28): '"""table_name"""'}, {}), "('table_name')", False, 'import click\n'), ((123, 1, 123, 36), 'click.option', 'click.option', ({(123, 14, 123, 35): '"""--table-schema-path"""'}, {}), "('--table-schema-path')", False, 'import click\n'), ((124, 1, 124, 36), 'click.option', 'click.option', ({(124, 14, 124, 35): '"""--connection-string"""'}, {}), "('--connection-string')", False, 'import click\n'), ((125, 1, 125, 34), 'click.option', 'click.option', ({(125, 14, 125, 18): '"""-f"""', (125, 19, 125, 33): '"""--input-file"""'}, {}), "('-f', '--input-file')", False, 'import click\n'), ((126, 1, 126, 28), 'click.option', 'click.option', ({(126, 14, 126, 27): '"""--db-schema"""'}, {}), "('--db-schema')", False, 'import click\n'), ((127, 1, 127, 35), 'click.option', 'click.option', ({(127, 14, 127, 34): '"""--geometry-support"""'}, {}), "('--geometry-support')", False, 'import click\n'), ((128, 1, 128, 28), 'click.option', 'click.option', ({(128, 14, 128, 27): '"""--from-srid"""'}, {}), "('--from-srid')", False, 'import click\n'), ((129, 1, 129, 45), 'click.option', 'click.option', (), '', False, 'import click\n'), ((130, 1, 130, 33), 'click.option', 'click.option', ({(130, 14, 130, 32): '"""--indexes-fields"""'}, {}), "('--indexes-fields')", False, 'import click\n'), ((131, 1, 131, 39), 'click.option', 'click.option', (), '', False, 'import click\n'), ((132, 1, 132, 70), 'click.option', 'click.option', (), '', False, 'import click\n'), ((133, 1, 133, 64), 'click.option', 'click.option', (), '', False, 'import click\n'), ((198, 1, 198, 29), 'click.argument', 'click.argument', ({(198, 16, 198, 28): '"""table_name"""'}, {}), "('table_name')", False, 'import click\n'), ((199, 1, 199, 36), 'click.option', 'click.option', ({(199, 14, 199, 35): '"""--connection-string"""'}, {}), "('--connection-string')", False, 'import click\n'), ((200, 1, 200, 35), 'click.option', 'click.option', ({(200, 14, 200, 18): '"""-o"""', (200, 19, 200, 34): '"""--output-file"""'}, {}), "('-o', '--output-file')", False, 'import click\n'), ((201, 1, 201, 28), 'click.option', 'click.option', ({(201, 14, 201, 27): '"""--db-schema"""'}, {}), "('--db-schema')", False, 'import click\n'), ((202, 1, 202, 35), 'click.option', 'click.option', ({(202, 14, 202, 34): '"""--geometry-support"""'}, {}), "('--geometry-support')", False, 'import click\n'), ((203, 1, 203, 28), 'click.option', 'click.option', ({(203, 14, 203, 27): '"""--from-srid"""'}, {}), "('--from-srid')", False, 'import click\n'), ((204, 1, 204, 26), 'click.option', 'click.option', ({(204, 14, 204, 25): '"""--to-srid"""'}, {}), "('--to-srid')", False, 'import click\n'), ((205, 1, 205, 64), 'click.option', 'click.option', (), '', False, 'import click\n'), ((234, 1, 234, 33), 'click.argument', 'click.argument', ({(234, 16, 234, 32): '"""new_table_name"""'}, {}), "('new_table_name')", False, 'import click\n'), ((235, 1, 235, 33), 'click.argument', 'click.argument', ({(235, 16, 235, 32): '"""old_table_name"""'}, {}), "('old_table_name')", False, 'import click\n'), ((236, 1, 236, 36), 'click.option', 'click.option', ({(236, 14, 236, 35): '"""--connection-string"""'}, {}), "('--connection-string')", False, 'import click\n'), ((237, 1, 237, 28), 'click.option', 'click.option', ({(237, 14, 237, 27): '"""--db-schema"""'}, {}), "('--db-schema')", False, 'import click\n'), ((238, 1, 238, 78), 'click.option', 'click.option', (), '', False, 'import click\n'), ((239, 1, 239, 64), 'click.option', 'click.option', (), '', False, 'import click\n'), ((30, 13, 30, 40), 'logging.getLogger', 'logging.getLogger', ({(30, 31, 30, 39): '"""the_el"""'}, {}), "('the_el')", False, 'import logging\n'), ((44, 24, 44, 73), 'os.getenv', 'os.getenv', ({(44, 34, 44, 53): '"""CONNECTION_STRING"""', (44, 55, 44, 72): 'connection_string'}, {}), "('CONNECTION_STRING', connection_string)", False, 'import os\n'), ((50, 13, 50, 45), 'sqlalchemy.create_engine', 'create_engine', ({(50, 27, 50, 44): 'connection_string'}, {}), '(connection_string)', False, 'from sqlalchemy import create_engine\n'), ((51, 14, 51, 134), 'jsontableschema_sql.Storage', 'Storage', (), '', False, 'from jsontableschema_sql import Storage\n'), ((252, 13, 252, 45), 'sqlalchemy.create_engine', 'create_engine', ({(252, 27, 252, 44): 'connection_string'}, {}), '(connection_string)', False, 'from sqlalchemy import create_engine\n'), ((25, 8, 25, 26), 'logging.config.dictConfig', 'dictConfig', ({(25, 19, 25, 25): 'config'}, {}), '(config)', False, 'from logging.config import dictConfig\n'), ((61, 15, 61, 42), 'smart_open.smart_open', 'smart_open', (), '', False, 'from smart_open import smart_open\n'), ((68, 15, 68, 35), 'json.loads', 'json.loads', ({(68, 26, 68, 34): 'contents'}, {}), '(contents)', False, 'import json\n'), ((83, 8, 83, 35), 'json.dump', 'json.dump', ({(83, 18, 83, 28): 'descriptor', (83, 30, 83, 34): 'file'}, {}), '(descriptor, file)', False, 'import json\n'), ((109, 7, 109, 71), 're.match', 're.match', ({(109, 16, 109, 51): 'carto.carto_connection_string_regex', (109, 53, 109, 70): 'connection_string'}, {}), '(carto.carto_connection_string_regex, connection_string)', False, 'import re\n'), ((153, 15, 153, 31), 'csv.reader', 'csv.reader', ({(153, 26, 153, 30): 'file'}, {}), '(file)', False, 'import csv\n'), ((216, 17, 216, 33), 'csv.writer', 'csv.writer', ({(216, 28, 216, 32): 'file'}, {}), '(file)', False, 'import csv\n'), ((243, 7, 243, 71), 're.match', 're.match', ({(243, 16, 243, 51): 'carto.carto_connection_string_regex', (243, 53, 243, 70): 'connection_string'}, {}), '(carto.carto_connection_string_regex, connection_string)', False, 'import re\n'), ((24, 21, 24, 36), 'yaml.load', 'yaml.load', ({(24, 31, 24, 35): 'file'}, {}), '(file)', False, 'import yaml\n'), ((28, 8, 28, 81), 'logging.basicConfig', 'logging.basicConfig', (), '', False, 'import logging\n'), ((158, 11, 158, 75), 're.match', 're.match', ({(158, 20, 158, 55): 'carto.carto_connection_string_regex', (158, 57, 158, 74): 'connection_string'}, {}), '(carto.carto_connection_string_regex, connection_string)', False, 'import re\n'), ((229, 32, 229, 49), 'json.dumps', 'json.dumps', ({(229, 43, 229, 48): 'field'}, {}), '(field)', False, 'import json\n')] |
vadam5/NeMo | examples/asr/experimental/speech_to_text_sclite.py | 3c5db09539293c3c19a6bb7437011f91261119af | # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This script is based on speech_to_text_infer.py and allows you to score the hypotheses
with sclite. A local installation from https://github.com/usnistgov/SCTK is required.
Hypotheses and references are first saved in trn format and are scored after applying a glm
file (if provided).
"""
import errno
import json
import os
import subprocess
from argparse import ArgumentParser
import torch
from nemo.collections.asr.metrics.wer import WER
from nemo.collections.asr.models import EncDecCTCModel
from nemo.utils import logging
try:
from torch.cuda.amp import autocast
except ImportError:
from contextlib import contextmanager
@contextmanager
def autocast(enabled=None):
yield
def score_with_sctk(sctk_dir, ref_fname, hyp_fname, out_dir, glm=""):
sclite_path = os.path.join(sctk_dir, "bin", "sclite")
if not os.path.exists(sclite_path):
raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), sclite_path)
# apply glm
if os.path.exists(glm):
rfilter_path = os.path.join(sctk_dir, "bin", "rfilter1")
if not os.path.exists(rfilter_path):
raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), rfilter_path)
hypglm = os.path.join(out_dir, os.path.basename(hyp_fname)) + ".glm"
rfilt_cmd = [rfilter_path] + [glm]
with open(hypglm, "w") as hypf, open(hyp_fname, "r") as hyp_in:
subprocess.run(rfilt_cmd, stdin=hyp_in, stdout=hypf)
refglm = os.path.join(out_dir, os.path.basename(ref_fname)) + ".glm"
with open(refglm, "w") as reff, open(ref_fname, "r") as ref_in:
subprocess.run(rfilt_cmd, stdin=ref_in, stdout=reff)
else:
refglm = ref_fname
hypglm = hyp_fname
_ = subprocess.check_output(f"{sclite_path} -h {hypglm} -r {refglm} -i wsj -o all", shell=True)
can_gpu = torch.cuda.is_available()
def get_utt_info(manifest_path):
info_list = []
with open(manifest_path, "r") as utt_f:
for line in utt_f:
utt = json.loads(line)
info_list.append(utt)
return info_list
def main():
parser = ArgumentParser()
parser.add_argument(
"--asr_model", type=str, default="QuartzNet15x5Base-En", required=False, help="Pass: 'QuartzNet15x5Base-En'",
)
parser.add_argument("--dataset", type=str, required=True, help="path to evaluation data")
parser.add_argument("--batch_size", type=int, default=4)
parser.add_argument(
"--dont_normalize_text",
default=False,
action='store_true',
help="Turn off trasnscript normalization. Recommended for non-English.",
)
parser.add_argument("--out_dir", type=str, required=True, help="Destination dir for output files")
parser.add_argument("--sctk_dir", type=str, required=False, default="", help="Path to sctk root dir")
parser.add_argument("--glm", type=str, required=False, default="", help="Path to glm file")
args = parser.parse_args()
torch.set_grad_enabled(False)
if not os.path.exists(args.out_dir):
os.makedirs(args.out_dir)
use_sctk = os.path.exists(args.sctk_dir)
if args.asr_model.endswith('.nemo'):
logging.info(f"Using local ASR model from {args.asr_model}")
asr_model = EncDecCTCModel.restore_from(restore_path=args.asr_model)
else:
logging.info(f"Using NGC cloud ASR model {args.asr_model}")
asr_model = EncDecCTCModel.from_pretrained(model_name=args.asr_model)
asr_model.setup_test_data(
test_data_config={
'sample_rate': 16000,
'manifest_filepath': args.dataset,
'labels': asr_model.decoder.vocabulary,
'batch_size': args.batch_size,
'normalize_transcripts': not args.dont_normalize_text,
}
)
if can_gpu:
asr_model = asr_model.cuda()
asr_model.eval()
labels_map = dict([(i, asr_model.decoder.vocabulary[i]) for i in range(len(asr_model.decoder.vocabulary))])
wer = WER(vocabulary=asr_model.decoder.vocabulary)
hypotheses = []
references = []
all_log_probs = []
for test_batch in asr_model.test_dataloader():
if can_gpu:
test_batch = [x.cuda() for x in test_batch]
with autocast():
log_probs, encoded_len, greedy_predictions = asr_model(
input_signal=test_batch[0], input_signal_length=test_batch[1]
)
for r in log_probs.cpu().numpy():
all_log_probs.append(r)
hypotheses += wer.ctc_decoder_predictions_tensor(greedy_predictions)
for batch_ind in range(greedy_predictions.shape[0]):
reference = ''.join([labels_map[c] for c in test_batch[2][batch_ind].cpu().detach().numpy()])
references.append(reference)
del test_batch
info_list = get_utt_info(args.dataset)
hypfile = os.path.join(args.out_dir, "hyp.trn")
reffile = os.path.join(args.out_dir, "ref.trn")
with open(hypfile, "w") as hyp_f, open(reffile, "w") as ref_f:
for i in range(len(hypotheses)):
utt_id = os.path.splitext(os.path.basename(info_list[i]['audio_filepath']))[0]
# rfilter in sctk likes each transcript to have a space at the beginning
hyp_f.write(" " + hypotheses[i] + " (" + utt_id + ")" + "\n")
ref_f.write(" " + references[i] + " (" + utt_id + ")" + "\n")
if use_sctk:
score_with_sctk(args.sctk_dir, reffile, hypfile, args.out_dir, glm=args.glm)
if __name__ == '__main__':
main() # noqa pylint: disable=no-value-for-parameter
| [((67, 10, 67, 35), 'torch.cuda.is_available', 'torch.cuda.is_available', ({}, {}), '()', False, 'import torch\n'), ((45, 18, 45, 57), 'os.path.join', 'os.path.join', ({(45, 31, 45, 39): 'sctk_dir', (45, 41, 45, 46): '"""bin"""', (45, 48, 45, 56): '"""sclite"""'}, {}), "(sctk_dir, 'bin', 'sclite')", False, 'import os\n'), ((49, 7, 49, 26), 'os.path.exists', 'os.path.exists', ({(49, 22, 49, 25): 'glm'}, {}), '(glm)', False, 'import os\n'), ((64, 8, 64, 100), 'subprocess.check_output', 'subprocess.check_output', (), '', False, 'import subprocess\n'), ((81, 13, 81, 29), 'argparse.ArgumentParser', 'ArgumentParser', ({}, {}), '()', False, 'from argparse import ArgumentParser\n'), ((97, 4, 97, 33), 'torch.set_grad_enabled', 'torch.set_grad_enabled', ({(97, 27, 97, 32): '(False)'}, {}), '(False)', False, 'import torch\n'), ((102, 15, 102, 44), 'os.path.exists', 'os.path.exists', ({(102, 30, 102, 43): 'args.sctk_dir'}, {}), '(args.sctk_dir)', False, 'import os\n'), ((124, 10, 124, 54), 'nemo.collections.asr.metrics.wer.WER', 'WER', (), '', False, 'from nemo.collections.asr.metrics.wer import WER\n'), ((144, 14, 144, 51), 'os.path.join', 'os.path.join', ({(144, 27, 144, 39): 'args.out_dir', (144, 41, 144, 50): '"""hyp.trn"""'}, {}), "(args.out_dir, 'hyp.trn')", False, 'import os\n'), ((145, 14, 145, 51), 'os.path.join', 'os.path.join', ({(145, 27, 145, 39): 'args.out_dir', (145, 41, 145, 50): '"""ref.trn"""'}, {}), "(args.out_dir, 'ref.trn')", False, 'import os\n'), ((46, 11, 46, 38), 'os.path.exists', 'os.path.exists', ({(46, 26, 46, 37): 'sclite_path'}, {}), '(sclite_path)', False, 'import os\n'), ((50, 23, 50, 64), 'os.path.join', 'os.path.join', ({(50, 36, 50, 44): 'sctk_dir', (50, 46, 50, 51): '"""bin"""', (50, 53, 50, 63): '"""rfilter1"""'}, {}), "(sctk_dir, 'bin', 'rfilter1')", False, 'import os\n'), ((99, 11, 99, 39), 'os.path.exists', 'os.path.exists', ({(99, 26, 99, 38): 'args.out_dir'}, {}), '(args.out_dir)', False, 'import os\n'), ((100, 8, 100, 33), 'os.makedirs', 'os.makedirs', ({(100, 20, 100, 32): 'args.out_dir'}, {}), '(args.out_dir)', False, 'import os\n'), ((105, 8, 105, 68), 'nemo.utils.logging.info', 'logging.info', ({(105, 21, 105, 67): 'f"""Using local ASR model from {args.asr_model}"""'}, {}), "(f'Using local ASR model from {args.asr_model}')", False, 'from nemo.utils import logging\n'), ((106, 20, 106, 76), 'nemo.collections.asr.models.EncDecCTCModel.restore_from', 'EncDecCTCModel.restore_from', (), '', False, 'from nemo.collections.asr.models import EncDecCTCModel\n'), ((108, 8, 108, 67), 'nemo.utils.logging.info', 'logging.info', ({(108, 21, 108, 66): 'f"""Using NGC cloud ASR model {args.asr_model}"""'}, {}), "(f'Using NGC cloud ASR model {args.asr_model}')", False, 'from nemo.utils import logging\n'), ((109, 20, 109, 77), 'nemo.collections.asr.models.EncDecCTCModel.from_pretrained', 'EncDecCTCModel.from_pretrained', (), '', False, 'from nemo.collections.asr.models import EncDecCTCModel\n'), ((47, 46, 47, 71), 'os.strerror', 'os.strerror', ({(47, 58, 47, 70): 'errno.ENOENT'}, {}), '(errno.ENOENT)', False, 'import os\n'), ((51, 15, 51, 43), 'os.path.exists', 'os.path.exists', ({(51, 30, 51, 42): 'rfilter_path'}, {}), '(rfilter_path)', False, 'import os\n'), ((56, 12, 56, 64), 'subprocess.run', 'subprocess.run', (), '', False, 'import subprocess\n'), ((59, 12, 59, 64), 'subprocess.run', 'subprocess.run', (), '', False, 'import subprocess\n'), ((74, 18, 74, 34), 'json.loads', 'json.loads', ({(74, 29, 74, 33): 'line'}, {}), '(line)', False, 'import json\n'), ((131, 13, 131, 23), 'torch.cuda.amp.autocast', 'autocast', ({}, {}), '()', False, 'from torch.cuda.amp import autocast\n'), ((52, 50, 52, 75), 'os.strerror', 'os.strerror', ({(52, 62, 52, 74): 'errno.ENOENT'}, {}), '(errno.ENOENT)', False, 'import os\n'), ((53, 39, 53, 66), 'os.path.basename', 'os.path.basename', ({(53, 56, 53, 65): 'hyp_fname'}, {}), '(hyp_fname)', False, 'import os\n'), ((57, 39, 57, 66), 'os.path.basename', 'os.path.basename', ({(57, 56, 57, 65): 'ref_fname'}, {}), '(ref_fname)', False, 'import os\n'), ((148, 38, 148, 86), 'os.path.basename', 'os.path.basename', ({(148, 55, 148, 85): "info_list[i]['audio_filepath']"}, {}), "(info_list[i]['audio_filepath'])", False, 'import os\n')] |
pj0620/acca-video-series | accalib/utils.py | 1b09548014cc899ded5a8fdd1293f7fc121a98bc | from manimlib.imports import *
from manimlib.utils import bezier
import numpy as np
class VectorInterpolator:
def __init__(self,points):
self.points = points
self.n = len(self.points)
self.dists = [0]
for i in range(len(self.points)):
self.dists += [np.linalg.norm(
self.points[i] -
self.points[(i+1) % self.n]
)+self.dists[i]]
def interpolate(self,alpha):
dist = alpha*self.dists[-1]
idx = self.interpolate_index(dist)
mult = (dist - self.dists[idx])/np.linalg.norm(self.points[(idx+1)%self.n]-self.points[idx])
return self.points[idx] + \
mult*(self.points[(idx+1)%self.n]-self.points[idx])
def interpolate_index(self,dist):
def is_solution(idx):
if idx == self.n-1:
return self.dists[idx] <= dist
else:
return ((self.dists[cur] <= dist) and
(self.dists[(cur+1)%self.n] >= dist))
# binary search
step_size=int(self.n / 4)
cur=int(self.n / 2)
while not is_solution(cur):
if self.dists[cur] > dist:
cur -= step_size
else:
cur += step_size
step_size = max(int(step_size/2), 1)
return cur | [((20, 40, 20, 100), 'numpy.linalg.norm', 'np.linalg.norm', ({(20, 55, 20, 99): '(self.points[(idx + 1) % self.n] - self.points[idx])'}, {}), '(self.points[(idx + 1) % self.n] - self.points[idx])', True, 'import numpy as np\n'), ((12, 27, 15, 13), 'numpy.linalg.norm', 'np.linalg.norm', ({(13, 16, 14, 43): '(self.points[i] - self.points[(i + 1) % self.n])'}, {}), '(self.points[i] - self.points[(i + 1) % self.n])', True, 'import numpy as np\n')] |
def-mycroft/rapid-plotly | setup.py | 87ba5d9e6894e2c3288435aae9a377647b006e79 | from setuptools import setup
setup(name='rapid_plotly',
version='0.1',
description='Convenience functions to rapidly create beautiful Plotly graphs',
author='Joseph Dasenbrock',
author_email='[email protected]',
packages=['rapid_plotly'],
zip_safe=False)
| [((3, 0, 9, 21), 'setuptools.setup', 'setup', (), '', False, 'from setuptools import setup\n')] |
enerqi/bridge-bidding-systems | dodo.py | 30ea2bf6f8bc0b786df4de8571063509d971236f | #! /usr/bin/doit -f
# https://pydoit.org
# `pip install [--user] doit` adds `doit.exe` to the PATH
# - Note `doit auto`, the file watcher only works on Linux/Mac
# - All commands are relative to dodo.py (doit runs in the working dir of dodo.py
# even if ran from a different directory `doit -f path/to/dodo.py`)
from glob import glob
import json
from os import environ
from os.path import abspath, basename, dirname, exists, expanduser, join, splitext
from shutil import copyfile
from typing import Iterator, List, NewType, Optional
from doit.tools import title_with_actions
Path = NewType("Path", str)
home = Path(expanduser("~"))
bml_tools_dir = Path(environ.get("BML_TOOLS_DIRECTORY", join(home, "dev/bml")))
bml_includes_cache_file = ".include-deps.json"
def bml_include_dependencies(bml_path: Path) -> List[Path]:
# bml files can include others, so spend time scanning every bml file
# for new include directives every time a bml file is saved
def includes(file_handle) -> Iterator[Path]:
for line in file_handle.readlines():
line = line.strip()
if line.startswith("#INCLUDE"):
include_directive_tokens = line.split(maxsplit=1)
if len(include_directive_tokens) > 1:
# We assume the file name is not quoted, just a free form path string
included_file = include_directive_tokens[1].strip()
yield Path(included_file)
with open(bml_path, encoding='utf-8') as f:
unique_deps = {include for include in includes(f) if include != bml_path}
return list(unique_deps)
def read_bml_includes_cache(bml_path: Path) -> Optional[List[Path]]:
if not exists(bml_includes_cache_file):
return None
with open(bml_includes_cache_file, encoding='utf-8') as f:
try:
existing_deps = json.load(f)
except Exception:
# Manually edited messed up json perhaps
return None
if bml_path in existing_deps:
return existing_deps[bml_path]
else:
return None # Manually edited perhaps (assuming we got the task order correct)
def update_bml_includes_cache(bml_path: Path, bml_deps: List[Path]):
existing_deps = {}
if exists(bml_includes_cache_file):
with open(bml_includes_cache_file, encoding='utf-8') as f:
try:
existing_deps = json.load(f)
except Exception:
pass
existing_deps[bml_path] = bml_deps
with open(bml_includes_cache_file, "w", encoding='utf-8') as f:
json.dump(existing_deps, f, indent=4)
def task_bml_include_cache():
"""Populate the bml include cache."""
input_bml_file_paths = glob("*.bml")
def calc_include_deps_and_cache(file_dep) -> None:
bml_path = Path(file_dep)
bml_deps = bml_include_dependencies(bml_path)
update_bml_includes_cache(bml_path, bml_deps)
for bml_path in input_bml_file_paths:
# We don't use a target as doit cannot deal with more than one input file affecting the same output file
# and we are using a single cache file instead of one cache file per input file.
# This does mean that we are using the order of the tasks in this file to have the include cache updated
# before the html task reads the include cache as part of determining changing file dependencies
# The html task itself cannot use the include cache file as a doit file_dep dependency as it is being updated
# by other unrelated bml file changes.
# Actually, using a different notion of an update (not just tracking file modifications) if another feature of
# doit that could be applied if interested enough.
yield {
'name': basename(bml_path),
'actions': [(calc_include_deps_and_cache, [bml_path])],
'file_dep': [bml_path],
'title': title_with_actions
}
def task_bml2html():
"""Create html file from bridge bidding markup language file."""
bml2html_path = Path(join(bml_tools_dir, "bml2html.py"))
input_bml_file_paths = glob("*.bml")
def html_output_path(bml_path: Path) -> Path:
return Path(splitext(bml_path)[0] + ".html")
for bml_path in input_bml_file_paths:
bml_deps = read_bml_includes_cache(bml_path)
if bml_deps is None:
bml_deps = bml_include_dependencies(bml_path)
update_bml_includes_cache(bml_path, bml_deps)
yield {
'name': basename(bml_path),
'actions': [f"python {bml2html_path} {bml_path}"],
'file_dep': [bml_path] + bml_deps,
'targets': [html_output_path(bml_path)],
'title': title_with_actions
}
def task_bmlcss():
"""Copy the bml CSS style sheet to this directory."""
css_basename = "bml.css"
src_css_file = Path(join(bml_tools_dir, css_basename))
def copy_file() -> None:
# OS neutral compared to running a shell command
copyfile(src_css_file, css_basename)
return {
'actions': [copy_file],
'file_dep': [src_css_file],
'targets': [css_basename],
'title': title_with_actions
}
def task_publish_main_bidding():
"""Copy the main bidding html and css document to the web server root."""
src_file = "bidding-system.html"
dst_file = f"W:/{src_file}"
css_file = "bml.css"
dst_css = f"W:/{css_file}"
def copy_file(dependencies, targets) -> None:
copyfile(dependencies[0], targets[0])
for src, dst in [(src_file, dst_file), (css_file, dst_css)]:
yield {
'name': basename(src),
'actions': [copy_file],
'file_dep': [src],
'targets': [dst],
'title': title_with_actions
}
| [((16, 7, 16, 27), 'typing.NewType', 'NewType', ({(16, 15, 16, 21): '"""Path"""', (16, 23, 16, 26): 'str'}, {}), "('Path', str)", False, 'from typing import Iterator, List, NewType, Optional\n'), ((18, 12, 18, 27), 'os.path.expanduser', 'expanduser', ({(18, 23, 18, 26): '"""~"""'}, {}), "('~')", False, 'from os.path import abspath, basename, dirname, exists, expanduser, join, splitext\n'), ((60, 7, 60, 38), 'os.path.exists', 'exists', ({(60, 14, 60, 37): 'bml_includes_cache_file'}, {}), '(bml_includes_cache_file)', False, 'from os.path import abspath, basename, dirname, exists, expanduser, join, splitext\n'), ((75, 27, 75, 40), 'glob.glob', 'glob', ({(75, 32, 75, 39): '"""*.bml"""'}, {}), "('*.bml')", False, 'from glob import glob\n'), ((102, 27, 102, 40), 'glob.glob', 'glob', ({(102, 32, 102, 39): '"""*.bml"""'}, {}), "('*.bml')", False, 'from glob import glob\n'), ((19, 56, 19, 77), 'os.path.join', 'join', ({(19, 61, 19, 65): 'home', (19, 67, 19, 76): '"""dev/bml"""'}, {}), "(home, 'dev/bml')", False, 'from os.path import abspath, basename, dirname, exists, expanduser, join, splitext\n'), ((42, 11, 42, 42), 'os.path.exists', 'exists', ({(42, 18, 42, 41): 'bml_includes_cache_file'}, {}), '(bml_includes_cache_file)', False, 'from os.path import abspath, basename, dirname, exists, expanduser, join, splitext\n'), ((70, 8, 70, 45), 'json.dump', 'json.dump', (), '', False, 'import json\n'), ((101, 25, 101, 59), 'os.path.join', 'join', ({(101, 30, 101, 43): 'bml_tools_dir', (101, 45, 101, 58): '"""bml2html.py"""'}, {}), "(bml_tools_dir, 'bml2html.py')", False, 'from os.path import abspath, basename, dirname, exists, expanduser, join, splitext\n'), ((125, 24, 125, 57), 'os.path.join', 'join', ({(125, 29, 125, 42): 'bml_tools_dir', (125, 44, 125, 56): 'css_basename'}, {}), '(bml_tools_dir, css_basename)', False, 'from os.path import abspath, basename, dirname, exists, expanduser, join, splitext\n'), ((129, 8, 129, 44), 'shutil.copyfile', 'copyfile', ({(129, 17, 129, 29): 'src_css_file', (129, 31, 129, 43): 'css_basename'}, {}), '(src_css_file, css_basename)', False, 'from shutil import copyfile\n'), ((147, 8, 147, 45), 'shutil.copyfile', 'copyfile', ({(147, 17, 147, 32): 'dependencies[0]', (147, 34, 147, 44): 'targets[0]'}, {}), '(dependencies[0], targets[0])', False, 'from shutil import copyfile\n'), ((47, 28, 47, 40), 'json.load', 'json.load', ({(47, 38, 47, 39): 'f'}, {}), '(f)', False, 'import json\n'), ((63, 32, 63, 44), 'json.load', 'json.load', ({(63, 42, 63, 43): 'f'}, {}), '(f)', False, 'import json\n'), ((92, 20, 92, 38), 'os.path.basename', 'basename', ({(92, 29, 92, 37): 'bml_path'}, {}), '(bml_path)', False, 'from os.path import abspath, basename, dirname, exists, expanduser, join, splitext\n'), ((114, 20, 114, 38), 'os.path.basename', 'basename', ({(114, 29, 114, 37): 'bml_path'}, {}), '(bml_path)', False, 'from os.path import abspath, basename, dirname, exists, expanduser, join, splitext\n'), ((151, 20, 151, 33), 'os.path.basename', 'basename', ({(151, 29, 151, 32): 'src'}, {}), '(src)', False, 'from os.path import abspath, basename, dirname, exists, expanduser, join, splitext\n'), ((105, 20, 105, 38), 'os.path.splitext', 'splitext', ({(105, 29, 105, 37): 'bml_path'}, {}), '(bml_path)', False, 'from os.path import abspath, basename, dirname, exists, expanduser, join, splitext\n')] |
hustbill/Python-auto | learn/hard-way/EmptyFileError.py | 9f43bc2613a64a373927047ac52d8e90ffe644f8 | class EmptyFileError(Exception):
pass
filenames = ["myfile1", "nonExistent", "emptyFile", "myfile2"]
for file in filenames:
try:
f = open(file, 'r')
line = f.readline()
if line == "":
f.close()
raise EmptyFileError("%s: is empty" % file)
# except IOError as error:
# print("%s: could not be opened: %s" % (file, error.strerror)
## except EmptyFileError as error:
# print(error)
# else:
# print("%s: %s" % (file, f.readline()))
# finally:
# print("Done processing", file)
| [] |
jimconner/digital_sky | plugins/crumbling_in.py | 9427cd19dbd9fb1c82ca12fa8f962532d700c67f | # Crumbling In
# Like randomised coloured dots and then they
# increase on both sides getting closer and closer into the middle.
import sys, traceback, random
from numpy import array,full
class animation():
def __init__(self,datastore):
self.max_led = datastore.LED_COUNT
self.pos = 0
self.direction=0
self.cols = [ \
[255,0,0,0], \
[0,255,0,0], \
[0,0,255,0], \
[0,0,0,255], \
[255,255,0,0], \
[255,0,255,0], \
[0,255,255,0], \
[0,0,255,64], \
]
self.row=full((self.max_led,4),0)
def emit_row(self):
try:
if self.pos >= self.max_led/2:
self.direction=1
if self.pos <= 0:
self.direction=0
col=self.cols[random.randint(0,7)]
if self.direction==1:
col=[0,0,0,0]
self.row[self.pos]=col
self.row[(self.max_led-1)-self.pos]=col
if self.direction==0:
self.pos+=1
else:
self.pos-=1
return self.row
except Exception as err:
print(err)
traceback.print_exc(file=sys.stdout)
| [((23, 17, 23, 41), 'numpy.full', 'full', ({(23, 22, 23, 38): '(self.max_led, 4)', (23, 39, 23, 40): '0'}, {}), '((self.max_led, 4), 0)', False, 'from numpy import array, full\n'), ((44, 12, 44, 48), 'traceback.print_exc', 'traceback.print_exc', (), '', False, 'import sys, traceback, random\n'), ((31, 26, 31, 45), 'random.randint', 'random.randint', ({(31, 41, 31, 42): '(0)', (31, 43, 31, 44): '(7)'}, {}), '(0, 7)', False, 'import sys, traceback, random\n')] |
KBIbiopharma/pybleau | pybleau/app/plotting/tests/test_plot_config.py | 5cdfce603ad29af874f74f0f527adc6b4c9066e8 | from __future__ import division
from unittest import skipIf, TestCase
import os
from pandas import DataFrame
import numpy as np
from numpy.testing import assert_array_equal
BACKEND_AVAILABLE = os.environ.get("ETS_TOOLKIT", "qt4") != "null"
if BACKEND_AVAILABLE:
from app_common.apptools.testing_utils import assert_obj_gui_works
from pybleau.app.plotting.plot_config import HeatmapPlotConfigurator, \
HEATMAP_PLOT_TYPE, HistogramPlotConfigurator, HIST_PLOT_TYPE, \
LinePlotConfigurator, BarPlotConfigurator, ScatterPlotConfigurator, \
SCATTER_PLOT_TYPE, CMAP_SCATTER_PLOT_TYPE, LINE_PLOT_TYPE, \
BAR_PLOT_TYPE
LEN = 16
TEST_DF = DataFrame({"a": [1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4],
"b": [1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4],
"c": [1, 2, 3, 4, 2, 3, 1, 1, 4, 4, 5, 6, 4, 4, 5, 6],
"d": list("ababcabcdabcdeab"),
"e": np.random.randn(LEN),
"f": range(LEN),
# Highly repetitive column to split the entire data into 2
"g": np.array(["0", "1"] * (LEN // 2)),
"h": np.array([0, 1] * (LEN // 2), dtype=bool),
})
class BasePlotConfig(object):
def test_creation_fails_if_no_df(self):
with self.assertRaises(ValueError):
config = self.configurator()
config.to_dict()
def test_bring_up(self):
obj = self.configurator(data_source=TEST_DF)
assert_obj_gui_works(obj)
# Assertion utilities -----------------------------------------------------
def assert_editor_options(self, editor):
editor_options = editor.values
if self.numerical_cols_only:
for col in editor_options:
if col != "index":
self.assertIn(TEST_DF[col].dtype, (np.int64, np.float64))
else:
self.assertEqual(set(editor_options),
set(TEST_DF.columns) | {"index"})
class BaseXYPlotConfig(BasePlotConfig):
def test_plot_basic(self):
config = self.configurator(data_source=TEST_DF, x_col_name="a",
y_col_name="b")
self.assertEqual(config.plot_type, self.basic_type)
config_dict = config.to_dict()
self.assertIsInstance(config_dict, dict)
self.assertIn("x_arr", config_dict)
assert_array_equal(config_dict["x_arr"], TEST_DF["a"].values)
self.assertIn("y_arr", config_dict)
assert_array_equal(config_dict["y_arr"], TEST_DF["b"].values)
def test_data_choices(self):
""" Make sure different configurators provide the right data choices.
"""
config = self.configurator(data_source=TEST_DF, x_col_name="a",
y_col_name="b")
view_items = config._data_selection_items()
x_editor = view_items[0].content[0].editor
self.assert_editor_options(x_editor)
y_editor = view_items[1].content[0].editor
self.assert_editor_options(y_editor)
def test_plot_colored_by_str_col(self):
# Color by a column filled with boolean values
config = self.configurator(data_source=TEST_DF, x_col_name="a",
y_col_name="b", z_col_name="d")
self.assertIn(config.plot_type, self.basic_type)
config_dict = config.to_dict()
self.assertIsInstance(config_dict, dict)
self.assertIn("x_arr", config_dict)
self.assertIsInstance(config_dict["x_arr"], dict)
d_values = TEST_DF["d"].unique()
self.assertEqual(set(config_dict["x_arr"].keys()), set(d_values))
for arr in config_dict["x_arr"].values():
self.assertIsInstance(arr, np.ndarray)
# For example:
assert_array_equal(config_dict["x_arr"]["c"], np.array([1, 4, 4]))
self.assertIn("y_arr", config_dict)
self.assertIsInstance(config_dict["y_arr"], dict)
self.assertEqual(set(config_dict["y_arr"].keys()), set(d_values))
for arr in config_dict["y_arr"].values():
self.assertIsInstance(arr, np.ndarray)
# For example:
assert_array_equal(config_dict["y_arr"]["c"], np.array([2, 2, 3]))
def test_plot_colored_by_bool_col(self):
# Color by a column filled with boolean values
config = self.configurator(data_source=TEST_DF, x_col_name="a",
y_col_name="b", z_col_name="h")
self.assertIn(config.plot_type, self.basic_type)
config_dict = config.to_dict()
self.assertIsInstance(config_dict, dict)
self.assertIn("x_arr", config_dict)
self.assertIsInstance(config_dict["x_arr"], dict)
hue_values = set(TEST_DF["h"])
self.assertEqual(set(config_dict["x_arr"].keys()), hue_values)
assert_array_equal(config_dict["x_arr"][False], TEST_DF["a"][::2])
assert_array_equal(config_dict["x_arr"][True], TEST_DF["a"][1::2])
self.assertIn("y_arr", config_dict)
self.assertIsInstance(config_dict["y_arr"], dict)
self.assertEqual(set(config_dict["y_arr"].keys()), hue_values)
assert_array_equal(config_dict["y_arr"][False], TEST_DF["b"][::2])
assert_array_equal(config_dict["y_arr"][True], TEST_DF["b"][1::2])
def test_plot_colored_by_NON_EXISTENT_col(self):
config = self.configurator(data_source=TEST_DF, x_col_name="a",
y_col_name="b", z_col_name="NON-EXISTENT")
with self.assertRaises(KeyError):
config.to_dict()
@skipIf(not BACKEND_AVAILABLE, "No UI backend available")
class TestScatterPlotConfig(TestCase, BaseXYPlotConfig):
def setUp(self):
self.configurator = ScatterPlotConfigurator
self.basic_type = SCATTER_PLOT_TYPE
self.numerical_cols_only = True
def test_plot_scatter_colored_by_int_col(self):
config = self.configurator(data_source=TEST_DF, x_col_name="a",
y_col_name="b", z_col_name="c")
self.assertEqual(config.plot_type, CMAP_SCATTER_PLOT_TYPE)
config_dict = config.to_dict()
self.assertIsInstance(config_dict, dict)
self.assertIn("x_arr", config_dict)
self.assertIsInstance(config_dict["x_arr"], np.ndarray)
self.assertIn("y_arr", config_dict)
self.assertIsInstance(config_dict["y_arr"], np.ndarray)
self.assertIn("z_arr", config_dict)
self.assertIsInstance(config_dict["z_arr"], np.ndarray)
def test_plot_scatter_colored_by_float_col(self):
config = self.configurator(data_source=TEST_DF, x_col_name="a",
y_col_name="b", z_col_name="e")
self.assertEqual(config.plot_type, CMAP_SCATTER_PLOT_TYPE)
config_dict = config.to_dict()
self.assertIsInstance(config_dict, dict)
self.assertIn("x_arr", config_dict)
self.assertIsInstance(config_dict["x_arr"], np.ndarray)
self.assertIn("y_arr", config_dict)
self.assertIsInstance(config_dict["y_arr"], np.ndarray)
self.assertIn("z_arr", config_dict)
self.assertIsInstance(config_dict["z_arr"], np.ndarray)
def test_style_colorize_by_float_changes_on_color_column_change(self):
""" The dtype of the column to colorize controls colorize_by_float.
"""
# Color by a string:
config = self.configurator(data_source=TEST_DF, x_col_name="a",
y_col_name="b", z_col_name="d")
self.assertFalse(config.plot_style.colorize_by_float)
# Color by a float:
config.z_col_name = "e"
self.assertTrue(config.plot_style.colorize_by_float)
def test_scatter_data_selection_columns(self):
config = self.configurator(data_source=TEST_DF, x_col_name="a",
y_col_name="b", z_col_name="d")
columns = config._data_selection_columns()
expected = config._numerical_columns
self.assertCountEqual(columns, expected)
def test_scatter_color_selection_columns(self):
config = self.configurator(data_source=TEST_DF, x_col_name="a",
y_col_name="b", z_col_name="d")
columns = config._color_selection_columns()
expected = [""] + config._available_columns
self.assertCountEqual(columns, expected)
@skipIf(not BACKEND_AVAILABLE, "No UI backend available")
class TestLinePlotConfig(TestCase, BaseXYPlotConfig):
def setUp(self):
self.configurator = LinePlotConfigurator
self.basic_type = LINE_PLOT_TYPE
self.numerical_cols_only = True
def test_line_data_selection_columns(self):
config = self.configurator(data_source=TEST_DF, x_col_name="a",
y_col_name="b", z_col_name="d")
columns = config._data_selection_columns()
expected = config._numerical_columns
self.assertCountEqual(columns, expected)
def test_line_color_selection_columns(self):
config = self.configurator(data_source=TEST_DF, x_col_name="a",
y_col_name="b", z_col_name="d")
columns = config._color_selection_columns()
expected = [""] + config._available_columns
self.assertCountEqual(columns, expected)
@skipIf(not BACKEND_AVAILABLE, "No UI backend available")
class TestBarPlotConfig(TestCase, BaseXYPlotConfig):
def setUp(self):
self.configurator = BarPlotConfigurator
self.basic_type = BAR_PLOT_TYPE
self.numerical_cols_only = False
def test_data_choices(self):
""" Make sure different configurators provide the right data choices.
"""
config = self.configurator(data_source=TEST_DF, x_col_name="a",
y_col_name="b")
view_items = config._data_selection_items()
x_editor = view_items[0].content[3].content[0].content[0].editor
self.assert_editor_options(x_editor)
def test_melt_mode_no_effect(self):
config = self.configurator(data_source=TEST_DF, melt_source_data=True)
self.assertEqual(config.plot_type, self.basic_type)
# No columns to melt, so no transformation:
self.assertIs(config.data_source, TEST_DF)
self.assertIs(config.transformed_data, TEST_DF)
def test_melt_mode_with_melted_columns(self):
config = self.configurator(data_source=TEST_DF, melt_source_data=True,
columns_to_melt=["e", "f"])
self.assertIsNot(config.transformed_data, TEST_DF)
self.assertIs(config.data_source, TEST_DF)
# Pulling the x_arr forces a reset of the x_col_name
x_values = np.array(["e"]*LEN+["f"]*LEN)
assert_array_equal(config.x_arr, x_values)
self.assertEqual(config.x_col_name, "variable")
self.assertEqual(len(config.y_arr), 2 * LEN)
self.assertEqual(config.y_col_name, "value")
config_dict = config.to_dict()
self.assertIsInstance(config_dict, dict)
self.assertIn("x_arr", config_dict)
assert_array_equal(config_dict["x_arr"], x_values)
self.assertIn("y_arr", config_dict)
self.assertEqual(len(config_dict["y_arr"]), 2 * LEN)
def test_melt_mode_with_melted_columns_and_str_color(self):
config = self.configurator(data_source=TEST_DF, melt_source_data=True,
columns_to_melt=["e", "f"], z_col_name="g")
self.assertIsNot(config.transformed_data, TEST_DF)
self.assertIs(config.data_source, TEST_DF)
hue_values = TEST_DF["g"].unique()
# Pulling the x_arr forces a reset of the x_col_name
x_values = np.array(["e"] * (LEN // 2) + ["f"] * (LEN // 2))
self.assertEqual(set(config.x_arr.keys()), set(hue_values))
for key in hue_values:
assert_array_equal(config.x_arr[key], x_values)
self.assertEqual(config.x_col_name, "variable")
for key in hue_values:
self.assertEqual(len(config.y_arr[key]), LEN)
self.assertEqual(config.y_col_name, "value")
self.assertIn("g", config.transformed_data.columns)
config_dict = config.to_dict()
self.assertIsInstance(config_dict, dict)
self.assertIn("x_arr", config_dict)
self.assertEqual(set(config_dict["x_arr"].keys()), set(hue_values))
for key in hue_values:
assert_array_equal(config_dict["x_arr"][key], x_values)
self.assertIn("y_arr", config_dict)
for key in hue_values:
self.assertEqual(len(config_dict["y_arr"][key]), LEN)
def test_melt_mode_with_melted_columns_and_bool_color(self):
config = self.configurator(data_source=TEST_DF, melt_source_data=True,
columns_to_melt=["e", "f"], z_col_name="h")
self.assertIsNot(config.transformed_data, TEST_DF)
self.assertIs(config.data_source, TEST_DF)
hue_values = TEST_DF["h"].unique()
# Pulling the x_arr forces a reset of the x_col_name
x_values = np.array(["e"] * (LEN // 2) + ["f"] * (LEN // 2))
self.assertEqual(set(config.x_arr.keys()), set(hue_values))
for key in hue_values:
assert_array_equal(config.x_arr[key], x_values)
self.assertEqual(config.x_col_name, "variable")
for key in hue_values:
self.assertEqual(len(config.y_arr[key]), LEN)
self.assertEqual(config.y_col_name, "value")
self.assertIn("h", config.transformed_data.columns)
config_dict = config.to_dict()
self.assertIsInstance(config_dict, dict)
self.assertIn("x_arr", config_dict)
self.assertEqual(set(config_dict["x_arr"].keys()), set(hue_values))
for key in hue_values:
assert_array_equal(config_dict["x_arr"][key], x_values)
self.assertIn("y_arr", config_dict)
for key in hue_values:
self.assertEqual(len(config_dict["y_arr"][key]), LEN)
@skipIf(not BACKEND_AVAILABLE, "No UI backend available")
class TestHistogramPlotConfig(BasePlotConfig, TestCase):
def setUp(self):
self.configurator = HistogramPlotConfigurator
self.basic_type = HIST_PLOT_TYPE
self.numerical_cols_only = True
# Tests -------------------------------------------------------------------
def test_plot_basic(self):
config = self.configurator(data_source=TEST_DF, x_col_name="a")
self.assertEqual(config.plot_type, self.basic_type)
config_dict = config.to_dict()
self.assertIsInstance(config_dict, dict)
self.assertIn("x_arr", config_dict)
assert_array_equal(config_dict["x_arr"], TEST_DF["a"].values)
def test_plot_NON_EXISTENT_col(self):
config = self.configurator(data_source=TEST_DF,
x_col_name="NON-EXISTENT")
with self.assertRaises(KeyError):
config.to_dict()
def test_data_choices(self):
""" Make sure different configurators provide the right data choices.
"""
config = self.configurator(data_source=TEST_DF, x_col_name="a")
view_items = config._data_selection_items()
x_editor = view_items[0].content[0].editor
self.assert_editor_options(x_editor)
@skipIf(not BACKEND_AVAILABLE, "No UI backend available")
class TestHeatmapPlotConfig(BasePlotConfig, TestCase):
def setUp(self):
self.configurator = HeatmapPlotConfigurator
self.basic_type = HEATMAP_PLOT_TYPE
self.numerical_cols_only = True
# Tests -------------------------------------------------------------------
def test_plot_basic(self):
config = self.configurator(data_source=TEST_DF, x_col_name="a",
y_col_name="b", z_col_name="e")
self.assertEqual(config.plot_type, self.basic_type)
config_dict = config.to_dict()
self.assertIsInstance(config_dict, dict)
def test_plot_colored_by_NON_EXISTENT_col(self):
config = self.configurator(data_source=TEST_DF, x_col_name="a",
y_col_name="b", z_col_name="NON-EXISTENT")
with self.assertRaises(KeyError):
config.to_dict()
def test_data_choices(self):
""" Make sure different configurators provide the right data choices.
Passing non-numerical
"""
config = self.configurator(data_source=TEST_DF, x_col_name="a",
y_col_name="b", z_col_name="e")
view_items = config._data_selection_items()
x_editor = view_items[0].content[0].editor
self.assert_editor_options(x_editor)
y_editor = view_items[1].content[0].editor
self.assert_editor_options(y_editor)
| [((135, 1, 135, 57), 'unittest.skipIf', 'skipIf', ({(135, 8, 135, 29): '(not BACKEND_AVAILABLE)', (135, 31, 135, 56): '"""No UI backend available"""'}, {}), "(not BACKEND_AVAILABLE, 'No UI backend available')", False, 'from unittest import skipIf, TestCase\n'), ((196, 1, 196, 57), 'unittest.skipIf', 'skipIf', ({(196, 8, 196, 29): '(not BACKEND_AVAILABLE)', (196, 31, 196, 56): '"""No UI backend available"""'}, {}), "(not BACKEND_AVAILABLE, 'No UI backend available')", False, 'from unittest import skipIf, TestCase\n'), ((218, 1, 218, 57), 'unittest.skipIf', 'skipIf', ({(218, 8, 218, 29): '(not BACKEND_AVAILABLE)', (218, 31, 218, 56): '"""No UI backend available"""'}, {}), "(not BACKEND_AVAILABLE, 'No UI backend available')", False, 'from unittest import skipIf, TestCase\n'), ((330, 1, 330, 57), 'unittest.skipIf', 'skipIf', ({(330, 8, 330, 29): '(not BACKEND_AVAILABLE)', (330, 31, 330, 56): '"""No UI backend available"""'}, {}), "(not BACKEND_AVAILABLE, 'No UI backend available')", False, 'from unittest import skipIf, TestCase\n'), ((363, 1, 363, 57), 'unittest.skipIf', 'skipIf', ({(363, 8, 363, 29): '(not BACKEND_AVAILABLE)', (363, 31, 363, 56): '"""No UI backend available"""'}, {}), "(not BACKEND_AVAILABLE, 'No UI backend available')", False, 'from unittest import skipIf, TestCase\n'), ((9, 20, 9, 56), 'os.environ.get', 'os.environ.get', ({(9, 35, 9, 48): '"""ETS_TOOLKIT"""', (9, 50, 9, 55): '"""qt4"""'}, {}), "('ETS_TOOLKIT', 'qt4')", False, 'import os\n'), ((25, 26, 25, 46), 'numpy.random.randn', 'np.random.randn', ({(25, 42, 25, 45): 'LEN'}, {}), '(LEN)', True, 'import numpy as np\n'), ((28, 26, 28, 59), 'numpy.array', 'np.array', ({(28, 35, 28, 58): "['0', '1'] * (LEN // 2)"}, {}), "(['0', '1'] * (LEN // 2))", True, 'import numpy as np\n'), ((29, 26, 29, 67), 'numpy.array', 'np.array', (), '', True, 'import numpy as np\n'), ((41, 8, 41, 33), 'app_common.apptools.testing_utils.assert_obj_gui_works', 'assert_obj_gui_works', ({(41, 29, 41, 32): 'obj'}, {}), '(obj)', False, 'from app_common.apptools.testing_utils import assert_obj_gui_works\n'), ((66, 8, 66, 69), 'numpy.testing.assert_array_equal', 'assert_array_equal', ({(66, 27, 66, 47): "config_dict['x_arr']", (66, 49, 66, 68): "TEST_DF['a'].values"}, {}), "(config_dict['x_arr'], TEST_DF['a'].values)", False, 'from numpy.testing import assert_array_equal\n'), ((68, 8, 68, 69), 'numpy.testing.assert_array_equal', 'assert_array_equal', ({(68, 27, 68, 47): "config_dict['y_arr']", (68, 49, 68, 68): "TEST_DF['b'].values"}, {}), "(config_dict['y_arr'], TEST_DF['b'].values)", False, 'from numpy.testing import assert_array_equal\n'), ((119, 8, 119, 74), 'numpy.testing.assert_array_equal', 'assert_array_equal', ({(119, 27, 119, 54): "config_dict['x_arr'][False]", (119, 56, 119, 73): "TEST_DF['a'][::2]"}, {}), "(config_dict['x_arr'][False], TEST_DF['a'][::2])", False, 'from numpy.testing import assert_array_equal\n'), ((120, 8, 120, 74), 'numpy.testing.assert_array_equal', 'assert_array_equal', ({(120, 27, 120, 53): "config_dict['x_arr'][True]", (120, 55, 120, 73): "TEST_DF['a'][1::2]"}, {}), "(config_dict['x_arr'][True], TEST_DF['a'][1::2])", False, 'from numpy.testing import assert_array_equal\n'), ((125, 8, 125, 74), 'numpy.testing.assert_array_equal', 'assert_array_equal', ({(125, 27, 125, 54): "config_dict['y_arr'][False]", (125, 56, 125, 73): "TEST_DF['b'][::2]"}, {}), "(config_dict['y_arr'][False], TEST_DF['b'][::2])", False, 'from numpy.testing import assert_array_equal\n'), ((126, 8, 126, 74), 'numpy.testing.assert_array_equal', 'assert_array_equal', ({(126, 27, 126, 53): "config_dict['y_arr'][True]", (126, 55, 126, 73): "TEST_DF['b'][1::2]"}, {}), "(config_dict['y_arr'][True], TEST_DF['b'][1::2])", False, 'from numpy.testing import assert_array_equal\n'), ((249, 19, 249, 48), 'numpy.array', 'np.array', ({(249, 28, 249, 47): "['e'] * LEN + ['f'] * LEN"}, {}), "(['e'] * LEN + ['f'] * LEN)", True, 'import numpy as np\n'), ((250, 8, 250, 50), 'numpy.testing.assert_array_equal', 'assert_array_equal', ({(250, 27, 250, 39): 'config.x_arr', (250, 41, 250, 49): 'x_values'}, {}), '(config.x_arr, x_values)', False, 'from numpy.testing import assert_array_equal\n'), ((259, 8, 259, 58), 'numpy.testing.assert_array_equal', 'assert_array_equal', ({(259, 27, 259, 47): "config_dict['x_arr']", (259, 49, 259, 57): 'x_values'}, {}), "(config_dict['x_arr'], x_values)", False, 'from numpy.testing import assert_array_equal\n'), ((273, 19, 273, 68), 'numpy.array', 'np.array', ({(273, 28, 273, 67): "['e'] * (LEN // 2) + ['f'] * (LEN // 2)"}, {}), "(['e'] * (LEN // 2) + ['f'] * (LEN // 2))", True, 'import numpy as np\n'), ((306, 19, 306, 68), 'numpy.array', 'np.array', ({(306, 28, 306, 67): "['e'] * (LEN // 2) + ['f'] * (LEN // 2)"}, {}), "(['e'] * (LEN // 2) + ['f'] * (LEN // 2))", True, 'import numpy as np\n'), ((346, 8, 346, 69), 'numpy.testing.assert_array_equal', 'assert_array_equal', ({(346, 27, 346, 47): "config_dict['x_arr']", (346, 49, 346, 68): "TEST_DF['a'].values"}, {}), "(config_dict['x_arr'], TEST_DF['a'].values)", False, 'from numpy.testing import assert_array_equal\n'), ((96, 54, 96, 73), 'numpy.array', 'np.array', ({(96, 63, 96, 72): '[1, 4, 4]'}, {}), '([1, 4, 4])', True, 'import numpy as np\n'), ((104, 54, 104, 73), 'numpy.array', 'np.array', ({(104, 63, 104, 72): '[2, 2, 3]'}, {}), '([2, 2, 3])', True, 'import numpy as np\n'), ((276, 12, 276, 59), 'numpy.testing.assert_array_equal', 'assert_array_equal', ({(276, 31, 276, 48): 'config.x_arr[key]', (276, 50, 276, 58): 'x_values'}, {}), '(config.x_arr[key], x_values)', False, 'from numpy.testing import assert_array_equal\n'), ((290, 12, 290, 67), 'numpy.testing.assert_array_equal', 'assert_array_equal', ({(290, 31, 290, 56): "config_dict['x_arr'][key]", (290, 58, 290, 66): 'x_values'}, {}), "(config_dict['x_arr'][key], x_values)", False, 'from numpy.testing import assert_array_equal\n'), ((309, 12, 309, 59), 'numpy.testing.assert_array_equal', 'assert_array_equal', ({(309, 31, 309, 48): 'config.x_arr[key]', (309, 50, 309, 58): 'x_values'}, {}), '(config.x_arr[key], x_values)', False, 'from numpy.testing import assert_array_equal\n'), ((323, 12, 323, 67), 'numpy.testing.assert_array_equal', 'assert_array_equal', ({(323, 31, 323, 56): "config_dict['x_arr'][key]", (323, 58, 323, 66): 'x_values'}, {}), "(config_dict['x_arr'][key], x_values)", False, 'from numpy.testing import assert_array_equal\n')] |
thomasrockhu/bfg9000 | test/integration/languages/test_mixed.py | 1cd1226eab9bed2fc2ec6acccf7864fdcf2ed31a | import os.path
from .. import *
class TestMixed(IntegrationTest):
def __init__(self, *args, **kwargs):
super().__init__(os.path.join('languages', 'mixed'), *args, **kwargs)
def test_build(self):
self.build(executable('program'))
self.assertOutput([executable('program')], 'hello from c++!\n')
class TestMixedLibrary(IntegrationTest):
def __init__(self, *args, **kwargs):
super().__init__(os.path.join('languages', 'mixed_library'), *args,
**kwargs)
def test_build(self):
self.build(executable('program'))
self.assertOutput([executable('program')], 'hello, library!\n')
@skip_if('fortran' not in test_features, 'skipping fortran tests')
# XXX: This fails on macOS, probably because of a version mismatch somewhere.
@skip_if(env.host_platform.genus == 'darwin', 'fortran on os x is weird')
class TestMixedFortran(IntegrationTest):
def __init__(self, *args, **kwargs):
super().__init__(os.path.join('languages', 'mixed_fortran'), *args,
**kwargs)
def test_build(self):
self.build(executable('program'))
self.assertOutput([executable('program')], 'hello from f77!\n')
| [] |
TeamLab/introduction_to_pythoy_TEAMLAB_MOOC | code/7/collections/namedtupe_example.py | ebf1ff02d6a341bfee8695eac478ff8297cb97e4 | from collections import namedtuple
# Basic example
Point = namedtuple('Point', ['x', 'y'])
p = Point(11, y=22)
print(p[0] + p[1])
x, y = p
print(x, y)
print(p.x + p.y)
print(Point(x=11, y=22))
from collections import namedtuple
import csv
f = open("users.csv", "r")
next(f)
reader = csv.reader(f)
student_list = []
for row in reader:
student_list.append(row)
print(row)
print(student_list)
columns = ["user_id", "integration_id", "login_id", "password", "first_name",
"last_name", "full_name", "sortable_name", "short_name",
"email", "status"]
Student = namedtuple('Student', columns)
student_namedtupe_list = []
for row in student_list:
student = Student(*row)
student_namedtupe_list.append(student)
print(student_namedtupe_list[0])
print(student_namedtupe_list[0].full_name)
| [((4, 8, 4, 39), 'collections.namedtuple', 'namedtuple', ({(4, 19, 4, 26): '"""Point"""', (4, 28, 4, 38): "['x', 'y']"}, {}), "('Point', ['x', 'y'])", False, 'from collections import namedtuple\n'), ((17, 9, 17, 22), 'csv.reader', 'csv.reader', ({(17, 20, 17, 21): 'f'}, {}), '(f)', False, 'import csv\n'), ((27, 10, 27, 40), 'collections.namedtuple', 'namedtuple', ({(27, 21, 27, 30): '"""Student"""', (27, 32, 27, 39): 'columns'}, {}), "('Student', columns)", False, 'from collections import namedtuple\n')] |
dotnes/mitmproxy | test/helper_tools/benchtool.py | 5eb17bbf6d47c8d703763bfa41cf1ff3f98a632f | # Profile mitmdump with apachebench and
# yappi (https://code.google.com/p/yappi/)
#
# Requirements:
# - Apache Bench "ab" binary
# - pip install click yappi
from mitmproxy.main import mitmdump
from os import system
from threading import Thread
import time
import yappi
import click
class ApacheBenchThread(Thread):
def __init__(self, concurrency):
self.concurrency = concurrency
super().__init__()
def run(self):
time.sleep(2)
system(
"ab -n 1024 -c {} -X 127.0.0.1:8080 http://example.com/".format(self.concurrency))
@click.command()
@click.option('--profiler', default="none", type=click.Choice(['none', 'yappi']))
@click.option('--clock-type', default="cpu", type=click.Choice(['wall', 'cpu']))
@click.option('--concurrency', default=1, type=click.INT)
def main(profiler, clock_type, concurrency):
outfile = "callgrind.mitmdump-{}-c{}".format(clock_type, concurrency)
a = ApacheBenchThread(concurrency)
a.start()
if profiler == "yappi":
yappi.set_clock_type(clock_type)
yappi.start(addons=True)
print("Start mitmdump...")
mitmdump(["-k", "-q", "-S", "1024example"])
print("mitmdump stopped.")
print("Save profile information...")
if profiler == "yappi":
yappi.stop()
stats = yappi.get_func_stats()
stats.save(outfile, type='callgrind')
print("Done.")
if __name__ == '__main__':
main()
| [((29, 1, 29, 16), 'click.command', 'click.command', ({}, {}), '()', False, 'import click\n'), ((32, 1, 32, 57), 'click.option', 'click.option', (), '', False, 'import click\n'), ((44, 4, 44, 47), 'mitmproxy.main.mitmdump', 'mitmdump', ({(44, 13, 44, 46): "['-k', '-q', '-S', '1024example']"}, {}), "(['-k', '-q', '-S', '1024example'])", False, 'from mitmproxy.main import mitmdump\n'), ((24, 8, 24, 21), 'time.sleep', 'time.sleep', ({(24, 19, 24, 20): '(2)'}, {}), '(2)', False, 'import time\n'), ((40, 8, 40, 40), 'yappi.set_clock_type', 'yappi.set_clock_type', ({(40, 29, 40, 39): 'clock_type'}, {}), '(clock_type)', False, 'import yappi\n'), ((41, 8, 41, 32), 'yappi.start', 'yappi.start', (), '', False, 'import yappi\n'), ((49, 8, 49, 20), 'yappi.stop', 'yappi.stop', ({}, {}), '()', False, 'import yappi\n'), ((50, 16, 50, 38), 'yappi.get_func_stats', 'yappi.get_func_stats', ({}, {}), '()', False, 'import yappi\n'), ((30, 49, 30, 80), 'click.Choice', 'click.Choice', ({(30, 62, 30, 79): "['none', 'yappi']"}, {}), "(['none', 'yappi'])", False, 'import click\n'), ((31, 50, 31, 79), 'click.Choice', 'click.Choice', ({(31, 63, 31, 78): "['wall', 'cpu']"}, {}), "(['wall', 'cpu'])", False, 'import click\n')] |
alexliberzonlab/pivpy | pivpy/graphics.py | c1c984cd669fce6f5c0b6a602d6a51ed3fec5954 | # -*- coding: utf-8 -*-
"""
Various plots
"""
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation, FFMpegWriter
import xarray as xr
import os
def quiver(data, arrScale = 25.0, threshold = None, nthArr = 1,
contourLevels = None, colbar = True, logscale = False,
aspectratio='equal', colbar_orient = 'vertical', units = None):
"""
Generates a quiver plot of a 'data' xarray DataArray object (single frame from a dataset)
Inputs:
data - xarray DataArray of the type defined in pivpy, one of the frames in the Dataset
selected by default using .isel(t=0)
threshold - values above the threshold will be set equal to threshold
arrScale - use to change arrow scales
nthArr - use to plot only every nth arrow from the array
contourLevels - use to specify the maximum value (abs) of contour plots
colbar - True/False wether to generate a colorbar or not
logscale - if true then colorbar is on log scale
aspectratio - set auto or equal for the plot's apearence
colbar_orient - 'horizontal' or 'vertical' orientation of the colorbar (if colbar is True)
Outputs:
none
Usage:
graphics.quiver(data, arrScale = 0.2, threshold = Inf, n)
"""
data = dataset_to_array(data)
x = data.x
y = data.y
u = data.u
v = data.v
if units is not None:
lUnits = units[0] # ['m' 'm' 'mm/s' 'mm/s']
velUnits = units[2]
tUnits = velUnits.split('/')[1] # make it 's' or 'dt'
else:
lUnits, velUnits, tUnits = '', '', ''
if threshold is not None:
data['u'] = xr.where(data['u']>threshold, threshold, data['u'])
data['v'] = xr.where(data['v']>threshold, threshold, data['v'])
S = np.array(np.sqrt(u**2 + v**2))
fig = plt.get_fignums()
if len(fig) == 0: # if no figure is open
fig, ax = plt.subplots() # open a new figure
else:
ax = plt.gca()
if contourLevels is None:
levels = np.linspace(0, np.max(S.flatten()), 30) # default contour levels up to max of S
else:
levels = np.linspace(0, contourLevels, 30)
if logscale:
c = ax.contourf(x,y,S,alpha=0.8,
cmap = plt.get_cmap("Blues"),
levels = levels, norm = plt.colors.LogNorm())
else:
c = ax.contourf(x,y,S,alpha=0.8,
cmap = plt.get_cmap("Blues"),
levels=levels)
if colbar:
cbar = plt.colorbar(c, orientation=colbar_orient)
cbar.set_label(r'$\left| \, V \, \right|$ ['+ lUnits +' $\cdot$ '+ tUnits +'$^{-1}$]')
ax.quiver(x[::nthArr],y[::nthArr],
u[::nthArr,::nthArr],v[::nthArr,::nthArr],units='width',
scale = np.max(S*arrScale),headwidth=2)
ax.set_xlabel('x (' + lUnits + ')')
ax.set_ylabel('y (' + lUnits + ')')
ax.set_aspect(aspectratio)
return fig,ax
def histogram(data, normed = False):
"""
this function will plot a normalized histogram of
the velocity data.
Input:
data : xarray DataSet with ['u','v'] attrs['units']
normed : (optional) default is False to present normalized
histogram
"""
u = np.asarray(data.u).flatten()
v = np.asarray(data.v).flatten()
units = data.attrs['units']
f,ax = plt.subplots(2)
ax[0].hist(u,bins=np.int(np.sqrt(len(u))*0.5),density=normed)
ax[0].set_xlabel('u ['+units[2]+']')
ax[1] = plt.subplot2grid((2,1),(1,0))
ax[1].hist(v,bins=np.int(np.sqrt(len(v)*0.5)),density=normed)
ax[1].set_xlabel('v ['+units[2]+']')
plt.tight_layout()
return f, ax
def contour_plot(data, threshold = None, contourLevels = None,
colbar = True, logscale = False, aspectration='equal', units=None):
""" contourf ajusted for the xarray PIV dataset, creates a
contour map for the data['w'] property.
Input:
data : xarray PIV DataArray, converted automatically using .isel(t=0)
threshold : a threshold value, default is None (no data clipping)
contourLevels : number of contour levels, default is None
colbar : boolean (default is True) show/hide colorbar
logscale : boolean (True is default) create in linear/log scale
aspectration : string, 'equal' is the default
"""
data = dataset_to_array(data)
if units is not None:
lUnits = units[0] # ['m' 'm' 'mm/s' 'mm/s']
# velUnits = units[2]
# tUnits = velUnits.split('/')[1] # make it 's' or 'dt'
else:
# lUnits, velUnits = '', ''
lUnits = ''
f,ax = plt.subplots()
if threshold is not None:
data['w'] = xr.where(data['w']>threshold, threshold, data['w'])
m = np.amax(abs(data['w']))
if contourLevels == None:
levels = np.linspace(-m, m, 30)
else:
levels = np.linspace(-contourLevels, contourLevels, 30)
if logscale:
c = ax.contourf(data.x,data.y,np.abs(data['w']), levels=levels,
cmap = plt.get_cmap('RdYlBu'), norm=plt.colors.LogNorm())
else:
c = ax.contourf(data.x,data.y,data['w'], levels=levels,
cmap = plt.get_cmap('RdYlBu'))
plt.xlabel('x [' + lUnits + ']')
plt.ylabel('y [' + lUnits + ']')
if colbar:
cbar = plt.colorbar(c)
cbar.set_label(r'$\omega$ [s$^{-1}$]')
ax.set_aspect(aspectration)
return f,ax
def showf(data, variables=None, units=None, fig=None):
"""
showf(data, var, units)
Arguments:
data : xarray.DataSet that contains dimensions of t,x,y
and variables u,v and maybe w (scalar)
"""
if variables is None:
xlabel = ' '
ylabel = ' '
else:
xlabel = variables[0]
ylabel = variables[1]
if units is not None:
xlabel += ' ' + units[0]
ylabel += ' ' + units[1]
fig = plt.figure(None if fig is None else fig.number)
for t in data['t']:
d = data.isel(t=t)
plt.quiver(d['x'],d['y'],d['u'],d['v'],d['u']**2 + d['v']**2)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.draw()
plt.pause(0.1)
plt.show()
def showscal(data, property='ken'):
"""
showf(data, var, units)
Arguments:
data : xarray.DataSet that contains dimensions of t,x,y
and a variable w (scalar)
"""
# fig = plt.figure(None if fig is None else fig.number)
# import pdb; pdb.set_trace()
# xlabel = (None if var is None else var[0]) + ' [' + (None if units is None else units[0])+']'
# ylabel = (None if var is None else var[1]) + ' [' + (None if units is None else units[1])+']'
data = data.piv.vec2scal(property=property)
contour_plot(data)
def animate(data, arrowscale=1, savepath=None):
""" animates the quiver plot for the dataset (multiple frames)
Input:
data : xarray PIV type of DataSet
arrowscale : [optional] integer, default is 1
savepath : [optional] path to save the MP4 animation, default is None
Output:
if savepath is None, then only an image display of the animation
if savepath is an existing path, a file named im.mp4 is saved
"""
X, Y = data.x, data.y
U, V = data.u[:,:,0], data.v[:,:,0] # first frame
fig, ax = plt.subplots(1,1)
M = np.sqrt(U**2 + V**2)
Q = ax.quiver(X[::3,::3], Y[::3,::3],
U[::3,::3], V[::3,::3], M[::3,::3],
units='inches', scale=arrowscale)
cb = plt.colorbar(Q)
units = data.attrs['units']
cb.ax.set_ylabel('velocity (' + units[2] + ')')
text = ax.text(0.2,1.05, '1/'+str(len(data.t)), ha='center', va='center',
transform=ax.transAxes)
def update_quiver(num,Q,data,text):
U,V = data.u[:,:,num],data.v[:,:,num]
M = np.sqrt(U[::3,::3]**2 + V[::3,::3]**2)
Q.set_UVC(U,V,M)
text.set_text(str(num+1)+'/'+str(len(data.t)))
return Q
anim = FuncAnimation(fig, update_quiver, fargs=(Q,data,text),
frames = len(data.t), blit=False)
mywriter = FFMpegWriter()
if savepath:
p = os.getcwd()
os.chdir(savepath)
anim.save('im.mp4', writer=mywriter)
os.chdir(p)
else: anim.save('im.mp4', writer=mywriter)
def dataset_to_array(data,N=0):
""" converts xarray Dataset to array """
if 't' in data.dims:
print('Warning: function for a single frame, using first frame, supply data.isel(t=N)')
data = data.isel(t=N)
return data | [((56, 10, 56, 27), 'matplotlib.pyplot.get_fignums', 'plt.get_fignums', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n'), ((102, 11, 102, 26), 'matplotlib.pyplot.subplots', 'plt.subplots', ({(102, 24, 102, 25): '2'}, {}), '(2)', True, 'import matplotlib.pyplot as plt\n'), ((107, 12, 107, 41), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', ({(107, 29, 107, 34): '(2, 1)', (107, 35, 107, 40): '(1, 0)'}, {}), '((2, 1), (1, 0))', True, 'import matplotlib.pyplot as plt\n'), ((110, 4, 110, 22), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n'), ((139, 11, 139, 25), 'matplotlib.pyplot.subplots', 'plt.subplots', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n'), ((157, 4, 157, 36), 'matplotlib.pyplot.xlabel', 'plt.xlabel', ({(157, 15, 157, 35): "('x [' + lUnits + ']')"}, {}), "('x [' + lUnits + ']')", True, 'import matplotlib.pyplot as plt\n'), ((158, 4, 158, 36), 'matplotlib.pyplot.ylabel', 'plt.ylabel', ({(158, 15, 158, 35): "('y [' + lUnits + ']')"}, {}), "('y [' + lUnits + ']')", True, 'import matplotlib.pyplot as plt\n'), ((186, 10, 186, 57), 'matplotlib.pyplot.figure', 'plt.figure', ({(186, 21, 186, 56): 'None if fig is None else fig.number'}, {}), '(None if fig is None else fig.number)', True, 'import matplotlib.pyplot as plt\n'), ((197, 4, 197, 14), 'matplotlib.pyplot.show', 'plt.show', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n'), ((230, 14, 230, 31), 'matplotlib.pyplot.subplots', 'plt.subplots', ({(230, 27, 230, 28): '1', (230, 29, 230, 30): '1'}, {}), '(1, 1)', True, 'import matplotlib.pyplot as plt\n'), ((231, 8, 231, 28), 'numpy.sqrt', 'np.sqrt', ({(231, 16, 231, 27): 'U ** 2 + V ** 2'}, {}), '(U ** 2 + V ** 2)', True, 'import numpy as np\n'), ((237, 9, 237, 24), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ({(237, 22, 237, 23): 'Q'}, {}), '(Q)', True, 'import matplotlib.pyplot as plt\n'), ((256, 15, 256, 29), 'matplotlib.animation.FFMpegWriter', 'FFMpegWriter', ({}, {}), '()', False, 'from matplotlib.animation import FuncAnimation, FFMpegWriter\n'), ((51, 20, 51, 71), 'xarray.where', 'xr.where', ({(51, 29, 51, 48): "data['u'] > threshold", (51, 50, 51, 59): 'threshold', (51, 61, 51, 70): "data['u']"}, {}), "(data['u'] > threshold, threshold, data['u'])", True, 'import xarray as xr\n'), ((52, 20, 52, 71), 'xarray.where', 'xr.where', ({(52, 29, 52, 48): "data['v'] > threshold", (52, 50, 52, 59): 'threshold', (52, 61, 52, 70): "data['v']"}, {}), "(data['v'] > threshold, threshold, data['v'])", True, 'import xarray as xr\n'), ((54, 17, 54, 37), 'numpy.sqrt', 'np.sqrt', ({(54, 25, 54, 36): 'u ** 2 + v ** 2'}, {}), '(u ** 2 + v ** 2)', True, 'import numpy as np\n'), ((58, 18, 58, 32), 'matplotlib.pyplot.subplots', 'plt.subplots', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n'), ((60, 13, 60, 22), 'matplotlib.pyplot.gca', 'plt.gca', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n'), ((65, 17, 65, 50), 'numpy.linspace', 'np.linspace', ({(65, 29, 65, 30): '0', (65, 32, 65, 45): 'contourLevels', (65, 47, 65, 49): '30'}, {}), '(0, contourLevels, 30)', True, 'import numpy as np\n'), ((76, 15, 76, 57), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (), '', True, 'import matplotlib.pyplot as plt\n'), ((142, 20, 142, 71), 'xarray.where', 'xr.where', ({(142, 29, 142, 48): "data['w'] > threshold", (142, 50, 142, 59): 'threshold', (142, 61, 142, 70): "data['w']"}, {}), "(data['w'] > threshold, threshold, data['w'])", True, 'import xarray as xr\n'), ((146, 17, 146, 39), 'numpy.linspace', 'np.linspace', ({(146, 29, 146, 31): '-m', (146, 33, 146, 34): 'm', (146, 36, 146, 38): '30'}, {}), '(-m, m, 30)', True, 'import numpy as np\n'), ((148, 17, 148, 63), 'numpy.linspace', 'np.linspace', ({(148, 29, 148, 43): '-contourLevels', (148, 45, 148, 58): 'contourLevels', (148, 60, 148, 62): '30'}, {}), '(-contourLevels, contourLevels, 30)', True, 'import numpy as np\n'), ((160, 15, 160, 30), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ({(160, 28, 160, 29): 'c'}, {}), '(c)', True, 'import matplotlib.pyplot as plt\n'), ((191, 8, 191, 69), 'matplotlib.pyplot.quiver', 'plt.quiver', ({(191, 19, 191, 25): "d['x']", (191, 26, 191, 32): "d['y']", (191, 33, 191, 39): "d['u']", (191, 40, 191, 46): "d['v']", (191, 47, 191, 68): "(d['u'] ** 2 + d['v'] ** 2)"}, {}), "(d['x'], d['y'], d['u'], d['v'], d['u'] ** 2 + d['v'] ** 2)", True, 'import matplotlib.pyplot as plt\n'), ((192, 8, 192, 26), 'matplotlib.pyplot.xlabel', 'plt.xlabel', ({(192, 19, 192, 25): 'xlabel'}, {}), '(xlabel)', True, 'import matplotlib.pyplot as plt\n'), ((193, 8, 193, 26), 'matplotlib.pyplot.ylabel', 'plt.ylabel', ({(193, 19, 193, 25): 'ylabel'}, {}), '(ylabel)', True, 'import matplotlib.pyplot as plt\n'), ((194, 8, 194, 18), 'matplotlib.pyplot.draw', 'plt.draw', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n'), ((195, 8, 195, 22), 'matplotlib.pyplot.pause', 'plt.pause', ({(195, 18, 195, 21): '(0.1)'}, {}), '(0.1)', True, 'import matplotlib.pyplot as plt\n'), ((249, 12, 249, 50), 'numpy.sqrt', 'np.sqrt', ({(249, 20, 249, 49): 'U[::3, ::3] ** 2 + V[::3, ::3] ** 2'}, {}), '(U[::3, ::3] ** 2 + V[::3, ::3] ** 2)', True, 'import numpy as np\n'), ((258, 12, 258, 23), 'os.getcwd', 'os.getcwd', ({}, {}), '()', False, 'import os\n'), ((259, 8, 259, 26), 'os.chdir', 'os.chdir', ({(259, 17, 259, 25): 'savepath'}, {}), '(savepath)', False, 'import os\n'), ((261, 8, 261, 19), 'os.chdir', 'os.chdir', ({(261, 17, 261, 18): 'p'}, {}), '(p)', False, 'import os\n'), ((81, 23, 81, 41), 'numpy.max', 'np.max', ({(81, 30, 81, 40): '(S * arrScale)'}, {}), '(S * arrScale)', True, 'import numpy as np\n'), ((98, 8, 98, 26), 'numpy.asarray', 'np.asarray', ({(98, 19, 98, 25): 'data.u'}, {}), '(data.u)', True, 'import numpy as np\n'), ((99, 8, 99, 26), 'numpy.asarray', 'np.asarray', ({(99, 19, 99, 25): 'data.v'}, {}), '(data.v)', True, 'import numpy as np\n'), ((151, 38, 151, 55), 'numpy.abs', 'np.abs', ({(151, 45, 151, 54): "data['w']"}, {}), "(data['w'])", True, 'import numpy as np\n'), ((69, 24, 69, 45), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', ({(69, 37, 69, 44): '"""Blues"""'}, {}), "('Blues')", True, 'import matplotlib.pyplot as plt\n'), ((70, 41, 70, 61), 'matplotlib.pyplot.colors.LogNorm', 'plt.colors.LogNorm', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n'), ((73, 24, 73, 45), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', ({(73, 37, 73, 44): '"""Blues"""'}, {}), "('Blues')", True, 'import matplotlib.pyplot as plt\n'), ((152, 24, 152, 46), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', ({(152, 37, 152, 45): '"""RdYlBu"""'}, {}), "('RdYlBu')", True, 'import matplotlib.pyplot as plt\n'), ((152, 53, 152, 73), 'matplotlib.pyplot.colors.LogNorm', 'plt.colors.LogNorm', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n'), ((155, 24, 155, 46), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', ({(155, 37, 155, 45): '"""RdYlBu"""'}, {}), "('RdYlBu')", True, 'import matplotlib.pyplot as plt\n')] |
BostonCrayfish/mmsegmentation | configs/my_config/vit_base_aspp.py | e8b87242b877bfe0c32ea2630c2fd08977d7dd4b | # model settings
norm_cfg = dict(type='BN', requires_grad=True)
model = dict(
type='EncoderDecoder',
pretrained='pretrain/vit_base_patch16_224.pth',
backbone=dict(
type='VisionTransformer',
img_size=(224, 224),
patch_size=16,
in_channels=3,
embed_dim=768,
depth=12,
num_heads=12,
mlp_ratio=4,
# out_indices=(2, 5, 8, 11),
qkv_bias=True,
drop_rate=0.0,
attn_drop_rate=0.0,
drop_path_rate=0.0,
with_cls_token=True,
norm_cfg=dict(type='LN', eps=1e-6),
act_cfg=dict(type='GELU'),
norm_eval=False,
interpolate_mode='bicubic'),
neck=None,
decode_head=dict(
type='ASPPHead',
in_channels=768,
# in_index=3,
channels=512,
dilations=(1, 6, 12, 18),
dropout_ratio=0.1,
num_classes=21,
contrast=True,
norm_cfg=norm_cfg,
align_corners=False,
loss_decode=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
auxiliary_head=None,
# model training and testing settings
train_cfg=dict(),
test_cfg=dict(mode='whole')) # yapf: disable | [] |
smolar/tripleo-ansible | tripleo_ansible/ansible_plugins/modules/podman_container.py | 7bd37f019870c032bea71f22b305832932d81424 | #!/usr/bin/python
# Copyright (c) 2019 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# flake8: noqa: E501
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import json
from distutils.version import LooseVersion
import yaml
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_bytes, to_native
ANSIBLE_METADATA = {
'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = """
module: podman_container
author:
- "Sagi Shnaidman (@sshnaidm)"
version_added: '2.9'
short_description: Manage podman containers
notes: []
description:
- Start, stop, restart and manage Podman containers
requirements:
- "Podman installed on host"
options:
name:
description:
- Name of the container
required: True
type: str
executable:
description:
- Path to C(podman) executable if it is not in the C($PATH) on the
machine running C(podman)
default: 'podman'
type: str
state:
description:
- I(absent) - A container matching the specified name will be stopped and
removed.
- I(present) - Asserts the existence of a container matching the name and
any provided configuration parameters. If no container matches the
name, a container will be created. If a container matches the name but
the provided configuration does not match, the container will be
updated, if it can be. If it cannot be updated, it will be removed and
re-created with the requested config. Image version will be taken into
account when comparing configuration. Use the recreate option to force
the re-creation of the matching container.
- I(started) - Asserts there is a running container matching the name and
any provided configuration. If no container matches the name, a
container will be created and started. Use recreate to always re-create
a matching container, even if it is running. Use force_restart to force
a matching container to be stopped and restarted.
- I(stopped) - Asserts that the container is first I(present), and then
if the container is running moves it to a stopped state.
type: str
default: started
choices:
- absent
- present
- stopped
- started
image:
description:
- Repository path (or image name) and tag used to create the container.
If an image is not found, the image will be pulled from the registry.
If no tag is included, C(latest) will be used.
- Can also be an image ID. If this is the case, the image is assumed to
be available locally.
type: str
annotation:
description:
- Add an annotation to the container. The format is key value, multiple
times.
type: dict
authfile:
description:
- Path of the authentication file. Default is
``${XDG_RUNTIME_DIR}/containers/auth.json``
(Not available for remote commands) You can also override the default
path of the authentication file by setting the ``REGISTRY_AUTH_FILE``
environment variable. ``export REGISTRY_AUTH_FILE=path``
type: path
blkio_weight:
description:
- Block IO weight (relative weight) accepts a weight value between 10 and
1000
type: int
blkio_weight_device:
description:
- Block IO weight (relative device weight, format DEVICE_NAME[:]WEIGHT).
type: dict
cap_add:
description:
- List of capabilities to add to the container.
type: list
elements: str
cap_drop:
description:
- List of capabilities to drop from the container.
type: list
elements: str
cgroup_parent:
description:
- Path to cgroups under which the cgroup for the container will be
created.
If the path is not absolute, the path is considered to be relative to
the cgroups path of the init process. Cgroups will be created if they
do not already exist.
type: path
cgroupns:
description:
- Path to cgroups under which the cgroup for the container will be
created.
type: str
cgroups:
description:
- Determines whether the container will create CGroups.
Valid values are enabled and disabled, which the default being enabled.
The disabled option will force the container to not create CGroups,
and thus conflicts with CGroup options cgroupns and cgroup-parent.
type: str
choices:
- default
- disabled
cidfile:
description:
- Write the container ID to the file
type: path
cmd_args:
description:
- Any additionl command options you want to pass to podman command,
cmd_args - ['--other-param', 'value']
Be aware module doesn't support idempotency if this is set.
type: list
elements: str
conmon_pidfile:
description:
- Write the pid of the conmon process to a file.
conmon runs in a separate process than Podman,
so this is necessary when using systemd to restart Podman containers.
type: path
command:
description:
- Override command of container. Can be a string or a list.
type: raw
cpu_period:
description:
- Limit the CPU real-time period in microseconds
type: int
cpu_rt_period:
description:
- Limit the CPU real-time period in microseconds.
Limit the container's Real Time CPU usage. This flag tell the kernel to
restrict the container's Real Time CPU usage to the period you specify.
type: int
cpu_rt_runtime:
description:
- Limit the CPU real-time runtime in microseconds.
This flag tells the kernel to limit the amount of time in a given CPU
period Real Time tasks may consume.
type: int
cpu_shares:
description:
- CPU shares (relative weight)
type: int
cpus:
description:
- Number of CPUs. The default is 0.0 which means no limit.
type: str
cpuset_cpus:
description:
- CPUs in which to allow execution (0-3, 0,1)
type: str
cpuset_mems:
description:
- Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only
effective on NUMA systems.
type: str
detach:
description:
- Run container in detach mode
type: bool
default: True
debug:
description:
- Return additional information which can be helpful for investigations.
type: bool
default: False
detach_keys:
description:
- Override the key sequence for detaching a container. Format is a single
character or ctrl-value
type: str
device:
description:
- Add a host device to the container.
The format is <device-on-host>[:<device-on-container>][:<permissions>]
(e.g. device /dev/sdc:/dev/xvdc:rwm)
type: list
elements: str
device_read_bps:
description:
- Limit read rate (bytes per second) from a device
(e.g. device-read-bps /dev/sda:1mb)
type: list
device_read_iops:
description:
- Limit read rate (IO per second) from a device
(e.g. device-read-iops /dev/sda:1000)
type: list
device_write_bps:
description:
- Limit write rate (bytes per second) to a device
(e.g. device-write-bps /dev/sda:1mb)
type: list
device_write_iops:
description:
- Limit write rate (IO per second) to a device
(e.g. device-write-iops /dev/sda:1000)
type: list
dns:
description:
- Set custom DNS servers
type: list
elements: str
dns_option:
description:
- Set custom DNS options
type: str
dns_search:
description:
- Set custom DNS search domains (Use dns_search with '' if you don't wish
to set the search domain)
type: str
entrypoint:
description:
- Overwrite the default ENTRYPOINT of the image
type: str
env:
description:
- Set environment variables.
This option allows you to specify arbitrary environment variables that
are available for the process that will be launched inside of the
container.
type: dict
env_file:
description:
- Read in a line delimited file of environment variables
type: path
env_host:
description:
- Use all current host environment variables in container.
Defaults to false.
type: bool
etc_hosts:
description:
- Dict of host-to-IP mappings, where each host name is a key in the
dictionary. Each host name will be added to the container's
``/etc/hosts`` file.
type: dict
aliases:
- add_hosts
expose:
description:
- Expose a port, or a range of ports (e.g. expose "3300-3310") to set up
port redirection on the host system.
type: list
elements: str
aliases:
- exposed
- exposed_ports
force_restart:
description:
- Force restart of container.
type: bool
default: False
aliases:
- restart
gidmap:
description:
- Run the container in a new user namespace using the supplied mapping.
type: str
group_add:
description:
- Add additional groups to run as
type: list
healthcheck:
description:
- Set or alter a healthcheck command for a container.
type: str
healthcheck_interval:
description:
- Set an interval for the healthchecks
(a value of disable results in no automatic timer setup)
(default "30s")
type: str
healthcheck_retries:
description:
- The number of retries allowed before a healthcheck is considered to be
unhealthy. The default value is 3.
type: int
healthcheck_start_period:
description:
- The initialization time needed for a container to bootstrap.
The value can be expressed in time format like 2m3s. The default value
is 0s
type: str
healthcheck_timeout:
description:
- The maximum time allowed to complete the healthcheck before an interval
is considered failed. Like start-period, the value can be expressed in
a time format such as 1m22s. The default value is 30s
type: str
hostname:
description:
- Container host name. Sets the container host name that is available
inside the container.
type: str
http_proxy:
description:
- By default proxy environment variables are passed into the container if
set for the podman process. This can be disabled by setting the
http_proxy option to false. The environment variables passed in
include http_proxy, https_proxy, ftp_proxy, no_proxy, and also the
upper case versions of those.
Defaults to true
type: bool
image_volume:
description:
- Tells podman how to handle the builtin image volumes.
The options are bind, tmpfs, or ignore (default bind)
type: str
choices:
- 'bind'
- 'tmpfs'
- 'ignore'
image_strict:
description:
- Whether to compare images in idempotency by taking into account a full
name with registry and namespaces.
type: bool
default: False
init:
description:
- Run an init inside the container that forwards signals and reaps
processes.
type: str
init_path:
description:
- Path to the container-init binary.
type: str
interactive:
description:
- Keep STDIN open even if not attached. The default is false.
When set to true, keep stdin open even if not attached.
The default is false.
type: bool
ip:
description:
- Specify a static IP address for the container, for example
'10.88.64.128'.
Can only be used if no additional CNI networks to join were specified
via 'network:', and if the container is not joining another container's
network namespace via 'network container:<name|id>'.
The address must be within the default CNI network's pool
(default 10.88.0.0/16).
type: str
ipc:
description:
- Default is to create a private IPC namespace (POSIX SysV IPC) for the
container
type: str
kernel_memory:
description:
- Kernel memory limit
(format <number>[<unit>], where unit = b, k, m or g)
Note - idempotency is supported for integers only.
type: str
label:
description:
- Add metadata to a container, pass dictionary of label names and values
type: dict
label_file:
description:
- Read in a line delimited file of labels
type: str
log_driver:
description:
- Logging driver. Used to set the log driver for the container.
For example log_driver "k8s-file".
type: str
choices:
- k8s-file
- journald
- json-file
log_opt:
description:
- Logging driver specific options. Used to set the path to the container
log file. For example log_opt
"path=/var/log/container/mycontainer.json"
type: str
aliases:
- log_options
memory:
description:
- Memory limit (format 10k, where unit = b, k, m or g)
Note - idempotency is supported for integers only.
type: str
memory_reservation:
description:
- Memory soft limit (format 100m, where unit = b, k, m or g)
Note - idempotency is supported for integers only.
type: str
memory_swap:
description:
- A limit value equal to memory plus swap. Must be used with the -m
(--memory) flag.
The swap LIMIT should always be larger than -m (--memory) value.
By default, the swap LIMIT will be set to double the value of --memory
Note - idempotency is supported for integers only.
type: str
memory_swappiness:
description:
- Tune a container's memory swappiness behavior. Accepts an integer
between 0 and 100.
type: int
mount:
description:
- Attach a filesystem mount to the container. bind or tmpfs
For example mount
"type=bind,source=/path/on/host,destination=/path/in/container"
type: str
network:
description:
- Set the Network mode for the container
* bridge create a network stack on the default bridge
* none no networking
* container:<name|id> reuse another container's network stack
* host use the podman host network stack.
* <network-name>|<network-id> connect to a user-defined network
* ns:<path> path to a network namespace to join
* slirp4netns use slirp4netns to create a user network stack.
This is the default for rootless containers
type: list
elements: str
aliases:
- net
no_hosts:
description:
- Do not create /etc/hosts for the container
Default is false.
type: bool
oom_kill_disable:
description:
- Whether to disable OOM Killer for the container or not.
Default is false.
type: bool
oom_score_adj:
description:
- Tune the host's OOM preferences for containers (accepts -1000 to 1000)
type: int
pid:
description:
- Set the PID mode for the container
type: str
pids_limit:
description:
- Tune the container's pids limit. Set -1 to have unlimited pids for the
container.
type: str
pod:
description:
- Run container in an existing pod.
If you want podman to make the pod for you, preference the pod name
with "new:"
type: str
privileged:
description:
- Give extended privileges to this container. The default is false.
type: bool
publish:
description:
- Publish a container's port, or range of ports, to the host.
Format - ip:hostPort:containerPort | ip::containerPort |
hostPort:containerPort | containerPort
type: list
elements: str
aliases:
- ports
- published
- published_ports
publish_all:
description:
- Publish all exposed ports to random ports on the host interfaces. The
default is false.
type: bool
read_only:
description:
- Mount the container's root filesystem as read only. Default is false
type: bool
read_only_tmpfs:
description:
- If container is running in --read-only mode, then mount a read-write
tmpfs on /run, /tmp, and /var/tmp. The default is true
type: bool
recreate:
description:
- Use with present and started states to force the re-creation of an
existing container.
type: bool
default: False
restart_policy:
description:
- Restart policy to follow when containers exit.
Restart policy will not take effect if a container is stopped via the
podman kill or podman stop commands. Valid values are
* no - Do not restart containers on exit
* on-failure[:max_retries] - Restart containers when they exit with a
non-0 exit code, retrying indefinitely
or until the optional max_retries count is hit
* always - Restart containers when they exit, regardless of status,
retrying indefinitely
type: str
rm:
description:
- Automatically remove the container when it exits. The default is false.
type: bool
aliases:
- remove
rootfs:
description:
- If true, the first argument refers to an exploded container on the file
system. The dafault is false.
type: bool
security_opt:
description:
- Security Options. For example security_opt "seccomp=unconfined"
type: list
elements: str
shm_size:
description:
- Size of /dev/shm. The format is <number><unit>. number must be greater
than 0.
Unit is optional and can be b (bytes), k (kilobytes), m(megabytes), or
g (gigabytes).
If you omit the unit, the system uses bytes. If you omit the size
entirely, the system uses 64m
type: str
sig_proxy:
description:
- Proxy signals sent to the podman run command to the container process.
SIGCHLD, SIGSTOP, and SIGKILL are not proxied. The default is true.
type: bool
stop_signal:
description:
- Signal to stop a container. Default is SIGTERM.
type: int
stop_timeout:
description:
- Timeout (in seconds) to stop a container. Default is 10.
type: int
subgidname:
description:
- Run the container in a new user namespace using the map with 'name' in
the /etc/subgid file.
type: str
subuidname:
description:
- Run the container in a new user namespace using the map with 'name' in
the /etc/subuid file.
type: str
sysctl:
description:
- Configure namespaced kernel parameters at runtime
type: dict
systemd:
description:
- Run container in systemd mode. The default is true.
type: bool
tmpfs:
description:
- Create a tmpfs mount. For example tmpfs
"/tmp" "rw,size=787448k,mode=1777"
type: dict
tty:
description:
- Allocate a pseudo-TTY. The default is false.
type: bool
uidmap:
description:
- Run the container in a new user namespace using the supplied mapping.
type: list
ulimit:
description:
- Ulimit options
type: list
user:
description:
- Sets the username or UID used and optionally the groupname or GID for
the specified command.
type: str
userns:
description:
- Set the user namespace mode for the container.
It defaults to the PODMAN_USERNS environment variable.
An empty value means user namespaces are disabled.
type: str
uts:
description:
- Set the UTS mode for the container
type: str
volume:
description:
- Create a bind mount. If you specify, volume /HOST-DIR:/CONTAINER-DIR,
podman bind mounts /HOST-DIR in the host to /CONTAINER-DIR in the
podman container.
type: list
elements: str
aliases:
- volumes
volumes_from:
description:
- Mount volumes from the specified container(s).
type: list
elements: str
workdir:
description:
- Working directory inside the container.
The default working directory for running binaries within a container
is the root directory (/).
type: str
"""
EXAMPLES = """
- name: Run container
podman_container:
name: container
image: quay.io/bitnami/wildfly
state: started
- name: Create a data container
podman_container:
name: mydata
image: busybox
volume:
- /tmp/data
- name: Re-create a redis container
podman_container:
name: myredis
image: redis
command: redis-server --appendonly yes
state: present
recreate: yes
expose:
- 6379
volumes_from:
- mydata
- name: Restart a container
podman_container:
name: myapplication
image: redis
state: started
restart: yes
etc_hosts:
other: "127.0.0.1"
restart_policy: "no"
device: "/dev/sda:/dev/xvda:rwm"
ports:
- "8080:9000"
- "127.0.0.1:8081:9001/udp"
env:
SECRET_KEY: "ssssh"
BOOLEAN_KEY: "yes"
- name: Container present
podman_container:
name: mycontainer
state: present
image: ubuntu:14.04
command: "sleep 1d"
- name: Stop a container
podman_container:
name: mycontainer
state: stopped
- name: Start 4 load-balanced containers
podman_container:
name: "container{{ item }}"
recreate: yes
image: someuser/anotherappimage
command: sleep 1d
with_sequence: count=4
- name: remove container
podman_container:
name: ohno
state: absent
- name: Writing output
podman_container:
name: myservice
image: busybox
log_options: path=/var/log/container/mycontainer.json
log_driver: k8s-file
"""
RETURN = """
container:
description:
- Facts representing the current state of the container. Matches the
podman inspection output.
- Note that facts are part of the registered vars since Ansible 2.8. For
compatibility reasons, the facts
are also accessible directly as C(podman_container). Note that the
returned fact will be removed in Ansible 2.12.
- Empty if C(state) is I(absent).
returned: always
type: dict
sample: '{
"AppArmorProfile": "",
"Args": [
"sh"
],
"BoundingCaps": [
"CAP_CHOWN",
...
],
"Config": {
"Annotations": {
"io.kubernetes.cri-o.ContainerType": "sandbox",
"io.kubernetes.cri-o.TTY": "false"
},
"AttachStderr": false,
"AttachStdin": false,
"AttachStdout": false,
"Cmd": [
"sh"
],
"Domainname": "",
"Entrypoint": "",
"Env": [
"PATH=/usr/sbin:/usr/bin:/sbin:/bin",
"TERM=xterm",
"HOSTNAME=",
"container=podman"
],
"Hostname": "",
"Image": "docker.io/library/busybox:latest",
"Labels": null,
"OpenStdin": false,
"StdinOnce": false,
"StopSignal": 15,
"Tty": false,
"User": {
"gid": 0,
"uid": 0
},
"Volumes": null,
"WorkingDir": "/"
},
"ConmonPidFile": "...",
"Created": "2019-06-17T19:13:09.873858307+03:00",
"Dependencies": [],
"Driver": "overlay",
"EffectiveCaps": [
"CAP_CHOWN",
...
],
"ExecIDs": [],
"ExitCommand": [
"/usr/bin/podman",
"--root",
...
],
"GraphDriver": {
...
},
"HostConfig": {
...
},
"HostnamePath": "...",
"HostsPath": "...",
"ID": "...",
"Image": "...",
"ImageName": "docker.io/library/busybox:latest",
"IsInfra": false,
"LogPath": "/tmp/container/mycontainer.json",
"MountLabel": "system_u:object_r:container_file_t:s0:c282,c782",
"Mounts": [
...
],
"Name": "myservice",
"Namespace": "",
"NetworkSettings": {
"Bridge": "",
...
},
"Path": "sh",
"ProcessLabel": "system_u:system_r:container_t:s0:c282,c782",
"ResolvConfPath": "...",
"RestartCount": 0,
"Rootfs": "",
"State": {
"Dead": false,
"Error": "",
"ExitCode": 0,
"FinishedAt": "2019-06-17T19:13:10.157518963+03:00",
"Healthcheck": {
"FailingStreak": 0,
"Log": null,
"Status": ""
},
"OOMKilled": false,
"OciVersion": "1.0.1-dev",
"Paused": false,
"Pid": 4083,
"Restarting": false,
"Running": false,
"StartedAt": "2019-06-17T19:13:10.152479729+03:00",
"Status": "exited"
},
"StaticDir": "..."
...
}'
"""
class PodmanModuleParams:
"""Creates list of arguments for podman CLI command.
Arguments:
action {str} -- action type from 'run', 'stop', 'create', 'delete',
'start'
params {dict} -- dictionary of module parameters
"""
def __init__(self, action, params, podman_version, module):
self.params = params
self.action = action
self.podman_version = podman_version
self.module = module
def construct_command_from_params(self):
"""Create a podman command from given module parameters.
Returns:
list -- list of byte strings for Popen command
"""
if self.action in ['start', 'stop', 'delete']:
return self.start_stop_delete()
if self.action in ['create', 'run']:
cmd = [self.action, '--name', self.params['name']]
all_param_methods = [func for func in dir(self)
if callable(getattr(self, func))
and func.startswith("addparam")]
params_set = (i for i in self.params if self.params[i] is not None)
for param in params_set:
func_name = "_".join(["addparam", param])
if func_name in all_param_methods:
cmd = getattr(self, func_name)(cmd)
cmd.append(self.params['image'])
if self.params['command']:
if isinstance(self.params['command'], list):
cmd += self.params['command']
else:
cmd += self.params['command'].split()
return [to_bytes(i, errors='surrogate_or_strict') for i in cmd]
def start_stop_delete(self):
if self.action in ['stop', 'start']:
cmd = [self.action, self.params['name']]
return [to_bytes(i, errors='surrogate_or_strict') for i in cmd]
if self.action == 'delete':
cmd = ['rm', '-f', self.params['name']]
return [to_bytes(i, errors='surrogate_or_strict') for i in cmd]
def check_version(self, param, minv=None, maxv=None):
if minv and LooseVersion(minv) > LooseVersion(
self.podman_version):
self.module.fail_json(msg="Parameter %s is supported from podman "
"version %s only! Current version is %s" % (
param, minv, self.podman_version))
if maxv and LooseVersion(maxv) < LooseVersion(
self.podman_version):
self.module.fail_json(msg="Parameter %s is supported till podman "
"version %s only! Current version is %s" % (
param, minv, self.podman_version))
def addparam_annotation(self, c):
for annotate in self.params['annotation'].items():
c += ['--annotation', '='.join(annotate)]
return c
def addparam_authfile(self, c):
return c + ['--authfile', self.params['authfile']]
def addparam_blkio_weight(self, c):
return c + ['--blkio-weight', self.params['blkio_weight']]
def addparam_blkio_weight_device(self, c):
for blkio in self.params['blkio_weight_device'].items():
c += ['--blkio-weight-device', ':'.join(blkio)]
return c
def addparam_cap_add(self, c):
for cap_add in self.params['cap_add']:
c += ['--cap-add', cap_add]
return c
def addparam_cap_drop(self, c):
for cap_drop in self.params['cap_drop']:
c += ['--cap-drop', cap_drop]
return c
def addparam_cgroups(self, c):
self.check_version('--cgroups', minv='1.6.0')
return c + ['--cgroups=%s' % self.params['cgroups']]
def addparam_cgroupns(self, c):
self.check_version('--cgroupns', minv='1.6.2')
return c + ['--cgroupns=%s' % self.params['cgroupns']]
def addparam_cgroup_parent(self, c):
return c + ['--cgroup-parent', self.params['cgroup_parent']]
def addparam_cidfile(self, c):
return c + ['--cidfile', self.params['cidfile']]
def addparam_conmon_pidfile(self, c):
return c + ['--conmon-pidfile', self.params['conmon_pidfile']]
def addparam_cpu_period(self, c):
return c + ['--cpu-period', self.params['cpu_period']]
def addparam_cpu_rt_period(self, c):
return c + ['--cpu-rt-period', self.params['cpu_rt_period']]
def addparam_cpu_rt_runtime(self, c):
return c + ['--cpu-rt-runtime', self.params['cpu_rt_runtime']]
def addparam_cpu_shares(self, c):
return c + ['--cpu-shares', self.params['cpu_shares']]
def addparam_cpus(self, c):
return c + ['--cpus', self.params['cpus']]
def addparam_cpuset_cpus(self, c):
return c + ['--cpuset-cpus', self.params['cpuset_cpus']]
def addparam_cpuset_mems(self, c):
return c + ['--cpuset-mems', self.params['cpuset_mems']]
def addparam_detach(self, c):
return c + ['--detach=%s' % self.params['detach']]
def addparam_detach_keys(self, c):
return c + ['--detach-keys', self.params['detach_keys']]
def addparam_device(self, c):
for dev in self.params['device']:
c += ['--device', dev]
return c
def addparam_device_read_bps(self, c):
for dev in self.params['device_read_bps']:
c += ['--device-read-bps', dev]
return c
def addparam_device_read_iops(self, c):
for dev in self.params['device_read_iops']:
c += ['--device-read-iops', dev]
return c
def addparam_device_write_bps(self, c):
for dev in self.params['device_write_bps']:
c += ['--device-write-bps', dev]
return c
def addparam_device_write_iops(self, c):
for dev in self.params['device_write_iops']:
c += ['--device-write-iops', dev]
return c
def addparam_dns(self, c):
return c + ['--dns', ','.join(self.params['dns'])]
def addparam_dns_option(self, c):
return c + ['--dns-option', self.params['dns_option']]
def addparam_dns_search(self, c):
return c + ['--dns-search', self.params['dns_search']]
def addparam_entrypoint(self, c):
return c + ['--entrypoint', self.params['entrypoint']]
def addparam_env(self, c):
for env_value in self.params['env'].items():
c += ['--env',
b"=".join([to_bytes(k, errors='surrogate_or_strict')
for k in env_value])]
return c
def addparam_env_file(self, c):
return c + ['--env-file', self.params['env_file']]
def addparam_env_host(self, c):
self.check_version('--env-host', minv='1.5.0')
return c + ['--env-host=%s' % self.params['env_host']]
def addparam_etc_hosts(self, c):
for host_ip in self.params['etc_hosts'].items():
c += ['--add-host', ':'.join(host_ip)]
return c
def addparam_expose(self, c):
for exp in self.params['expose']:
c += ['--expose', exp]
return c
def addparam_gidmap(self, c):
return c + ['--gidmap', self.params['gidmap']]
def addparam_group_add(self, c):
for g in self.params['group_add']:
c += ['--group-add', g]
return c
def addparam_healthcheck(self, c):
return c + ['--healthcheck-command', self.params['healthcheck']]
def addparam_healthcheck_interval(self, c):
return c + ['--healthcheck-interval',
self.params['healthcheck_interval']]
def addparam_healthcheck_retries(self, c):
return c + ['--healthcheck-retries',
self.params['healthcheck_retries']]
def addparam_healthcheck_start_period(self, c):
return c + ['--healthcheck-start-period',
self.params['healthcheck_start_period']]
def addparam_healthcheck_timeout(self, c):
return c + ['--healthcheck-timeout',
self.params['healthcheck_timeout']]
def addparam_hostname(self, c):
return c + ['--hostname', self.params['hostname']]
def addparam_http_proxy(self, c):
return c + ['--http-proxy=%s' % self.params['http_proxy']]
def addparam_image_volume(self, c):
return c + ['--image-volume', self.params['image_volume']]
def addparam_init(self, c):
return c + ['--init', self.params['init']]
def addparam_init_path(self, c):
return c + ['--init-path', self.params['init_path']]
def addparam_interactive(self, c):
return c + ['--interactive=%s' % self.params['interactive']]
def addparam_ip(self, c):
return c + ['--ip', self.params['ip']]
def addparam_ipc(self, c):
return c + ['--ipc', self.params['ipc']]
def addparam_kernel_memory(self, c):
return c + ['--kernel-memory', self.params['kernel_memory']]
def addparam_label(self, c):
for label in self.params['label'].items():
c += ['--label', b'='.join([to_bytes(l, errors='surrogate_or_strict')
for l in label])]
return c
def addparam_label_file(self, c):
return c + ['--label-file', self.params['label_file']]
def addparam_log_driver(self, c):
return c + ['--log-driver', self.params['log_driver']]
def addparam_log_opt(self, c):
return c + ['--log-opt', self.params['log_opt']]
def addparam_memory(self, c):
return c + ['--memory', self.params['memory']]
def addparam_memory_reservation(self, c):
return c + ['--memory-reservation', self.params['memory_reservation']]
def addparam_memory_swap(self, c):
return c + ['--memory-swap', self.params['memory_swap']]
def addparam_memory_swappiness(self, c):
return c + ['--memory-swappiness', self.params['memory_swappiness']]
def addparam_mount(self, c):
return c + ['--mount', self.params['mount']]
def addparam_network(self, c):
return c + ['--network', ",".join(self.params['network'])]
def addparam_no_hosts(self, c):
return c + ['--no-hosts=%s' % self.params['no_hosts']]
def addparam_oom_kill_disable(self, c):
return c + ['--oom-kill-disable=%s' % self.params['oom_kill_disable']]
def addparam_oom_score_adj(self, c):
return c + ['--oom-score-adj', self.params['oom_score_adj']]
def addparam_pid(self, c):
return c + ['--pid', self.params['pid']]
def addparam_pids_limit(self, c):
return c + ['--pids-limit', self.params['pids_limit']]
def addparam_pod(self, c):
return c + ['--pod', self.params['pod']]
def addparam_privileged(self, c):
return c + ['--privileged=%s' % self.params['privileged']]
def addparam_publish(self, c):
for pub in self.params['publish']:
c += ['--publish', pub]
return c
def addparam_publish_all(self, c):
return c + ['--publish-all=%s' % self.params['publish_all']]
def addparam_read_only(self, c):
return c + ['--read-only=%s' % self.params['read_only']]
def addparam_read_only_tmpfs(self, c):
return c + ['--read-only-tmpfs=%s' % self.params['read_only_tmpfs']]
def addparam_restart_policy(self, c):
return c + ['--restart=%s' % self.params['restart_policy']]
def addparam_rm(self, c):
if self.params['rm']:
c += ['--rm']
return c
def addparam_rootfs(self, c):
return c + ['--rootfs=%s' % self.params['rootfs']]
def addparam_security_opt(self, c):
for secopt in self.params['security_opt']:
c += ['--security-opt', secopt]
return c
def addparam_shm_size(self, c):
return c + ['--shm-size', self.params['shm_size']]
def addparam_sig_proxy(self, c):
return c + ['--sig-proxy=%s' % self.params['sig_proxy']]
def addparam_stop_signal(self, c):
return c + ['--stop-signal', self.params['stop_signal']]
def addparam_stop_timeout(self, c):
return c + ['--stop-timeout', self.params['stop_timeout']]
def addparam_subgidname(self, c):
return c + ['--subgidname', self.params['subgidname']]
def addparam_subuidname(self, c):
return c + ['--subuidname', self.params['subuidname']]
def addparam_sysctl(self, c):
for sysctl in self.params['sysctl'].items():
c += ['--sysctl',
b"=".join([to_bytes(k, errors='surrogate_or_strict')
for k in sysctl])]
return c
def addparam_systemd(self, c):
return c + ['--systemd=%s' % self.params['systemd']]
def addparam_tmpfs(self, c):
for tmpfs in self.params['tmpfs'].items():
c += ['--tmpfs', ':'.join(tmpfs)]
return c
def addparam_tty(self, c):
return c + ['--tty=%s' % self.params['tty']]
def addparam_uidmap(self, c):
for uidmap in self.params['uidmap']:
c += ['--uidmap', uidmap]
return c
def addparam_ulimit(self, c):
for u in self.params['ulimit']:
c += ['--ulimit', u]
return c
def addparam_user(self, c):
return c + ['--user', self.params['user']]
def addparam_userns(self, c):
return c + ['--userns', self.params['userns']]
def addparam_uts(self, c):
return c + ['--uts', self.params['uts']]
def addparam_volume(self, c):
for vol in self.params['volume']:
if vol:
c += ['--volume', vol]
return c
def addparam_volumes_from(self, c):
for vol in self.params['volumes_from']:
c += ['--volumes-from', vol]
return c
def addparam_workdir(self, c):
return c + ['--workdir', self.params['workdir']]
# Add your own args for podman command
def addparam_cmd_args(self, c):
return c + self.params['cmd_args']
class PodmanDefaults:
def __init__(self, module, podman_version):
self.module = module
self.version = podman_version
self.defaults = {
"blkio_weight": 0,
"cgroups": "default",
"cgroup_parent": "",
"cidfile": "",
"cpus": 0.0,
"cpu_shares": 0,
"cpu_quota": 0,
"cpu_period": 0,
"cpu_rt_runtime": 0,
"cpu_rt_period": 0,
"cpuset_cpus": "",
"cpuset_mems": "",
"detach": True,
"device": [],
"env_host": False,
"etc_hosts": {},
"group_add": [],
"healthcheck": "",
"ipc": "",
"kernelmemory": "0",
"log_driver": "k8s-file",
"memory": "0",
"memory_swap": "0",
"memory_reservation": "0",
# "memory_swappiness": -1,
"no_hosts": False,
# libpod issue with networks in inspection
"network": ["default"],
"oom_score_adj": 0,
"pid": "",
"privileged": False,
"rm": False,
"security_opt": [],
"stop_signal": 15,
"tty": False,
"user": "",
"uts": "",
"volume": [],
"workdir": "/",
}
def default_dict(self):
# make here any changes to self.defaults related to podman version
return self.defaults
class PodmanContainerDiff:
def __init__(self, module, info, podman_version):
self.module = module
self.version = podman_version
self.default_dict = None
self.info = yaml.safe_load(json.dumps(info).lower())
self.params = self.defaultize()
self.diff = {'before': {}, 'after': {}}
self.non_idempotent = {
'env_file',
'env_host',
"ulimit", # Defaults depend on user and platform, impossible to guess
}
def defaultize(self):
params_with_defaults = {}
self.default_dict = PodmanDefaults(
self.module, self.version).default_dict()
for p in self.module.params:
if self.module.params[p] is None and p in self.default_dict:
params_with_defaults[p] = self.default_dict[p]
else:
params_with_defaults[p] = self.module.params[p]
return params_with_defaults
def _diff_update_and_compare(self, param_name, before, after):
if before != after:
self.diff['before'].update({param_name: before})
self.diff['after'].update({param_name: after})
return True
return False
def diffparam_annotation(self):
before = self.info['config']['annotations'] or {}
after = before.copy()
if self.module.params['annotation'] is not None:
after.update(self.params['annotation'])
return self._diff_update_and_compare('annotation', before, after)
def diffparam_env_host(self):
# It's impossible to get from inspest, recreate it if not default
before = False
after = self.params['env_host']
return self._diff_update_and_compare('env_host', before, after)
def diffparam_blkio_weight(self):
before = self.info['hostconfig']['blkioweight']
after = self.params['blkio_weight']
return self._diff_update_and_compare('blkio_weight', before, after)
def diffparam_blkio_weight_device(self):
before = self.info['hostconfig']['blkioweightdevice']
if before == [] and self.module.params['blkio_weight_device'] is None:
after = []
else:
after = self.params['blkio_weight_device']
return self._diff_update_and_compare('blkio_weight_device', before, after)
def diffparam_cap_add(self):
before = self.info['effectivecaps'] or []
after = []
if self.module.params['cap_add'] is not None:
after += ["cap_" + i.lower()
for i in self.module.params['cap_add']]
after += before
before, after = sorted(list(set(before))), sorted(list(set(after)))
return self._diff_update_and_compare('cap_add', before, after)
def diffparam_cap_drop(self):
before = self.info['effectivecaps'] or []
after = before[:]
if self.module.params['cap_drop'] is not None:
for c in ["cap_" + i.lower() for i in self.module.params['cap_drop']]:
if c in after:
after.remove(c)
before, after = sorted(list(set(before))), sorted(list(set(after)))
return self._diff_update_and_compare('cap_drop', before, after)
def diffparam_cgroup_parent(self):
before = self.info['hostconfig']['cgroupparent']
after = self.params['cgroup_parent']
return self._diff_update_and_compare('cgroup_parent', before, after)
def diffparam_cgroups(self):
# Cgroups output is not supported in all versions
if 'cgroups' in self.info['hostconfig']:
before = self.info['hostconfig']['cgroups']
after = self.params['cgroups']
return self._diff_update_and_compare('cgroups', before, after)
return False
def diffparam_cidfile(self):
before = self.info['hostconfig']['containeridfile']
after = self.params['cidfile']
return self._diff_update_and_compare('cidfile', before, after)
def diffparam_command(self):
# TODO(sshnaidm): to inspect image to get the default command
if self.module.params['command'] is not None:
before = self.info['config']['cmd']
after = self.params['command']
if isinstance(after, str):
after = [i.lower() for i in after.split()]
elif isinstance(after, list):
after = [i.lower() for i in after]
return self._diff_update_and_compare('command', before, after)
return False
def diffparam_conmon_pidfile(self):
before = self.info['conmonpidfile']
if self.module.params['conmon_pidfile'] is None:
after = before
else:
after = self.params['conmon_pidfile']
return self._diff_update_and_compare('conmon_pidfile', before, after)
def diffparam_cpu_period(self):
before = self.info['hostconfig']['cpuperiod']
after = self.params['cpu_period']
return self._diff_update_and_compare('cpu_period', before, after)
def diffparam_cpu_rt_period(self):
before = self.info['hostconfig']['cpurealtimeperiod']
after = self.params['cpu_rt_period']
return self._diff_update_and_compare('cpu_rt_period', before, after)
def diffparam_cpu_rt_runtime(self):
before = self.info['hostconfig']['cpurealtimeruntime']
after = self.params['cpu_rt_runtime']
return self._diff_update_and_compare('cpu_rt_runtime', before, after)
def diffparam_cpu_shares(self):
before = self.info['hostconfig']['cpushares']
after = self.params['cpu_shares']
return self._diff_update_and_compare('cpu_shares', before, after)
def diffparam_cpus(self):
before = int(self.info['hostconfig']['nanocpus']) / 1000000000
after = self.params['cpus']
return self._diff_update_and_compare('cpus', before, after)
def diffparam_cpuset_cpus(self):
before = self.info['hostconfig']['cpusetcpus']
after = self.params['cpuset_cpus']
return self._diff_update_and_compare('cpuset_cpus', before, after)
def diffparam_cpuset_mems(self):
before = self.info['hostconfig']['cpusetmems']
after = self.params['cpuset_mems']
return self._diff_update_and_compare('cpuset_mems', before, after)
def diffparam_device(self):
before = [":".join([i['pathonhost'], i['pathincontainer']])
for i in self.info['hostconfig']['devices']]
after = [":".join(i.split(":")[:2]) for i in self.params['device']]
before, after = sorted(list(set(before))), sorted(list(set(after)))
return self._diff_update_and_compare('devices', before, after)
def diffparam_device_read_bps(self):
before = self.info['hostconfig']['blkiodevicereadbps'] or []
before = ["%s:%s" % (i['path'], i['rate']) for i in before]
after = self.params['device_read_bps'] or []
before, after = sorted(list(set(before))), sorted(list(set(after)))
return self._diff_update_and_compare('device_read_bps', before, after)
def diffparam_device_read_iops(self):
before = self.info['hostconfig']['blkiodevicereadiops'] or []
before = ["%s:%s" % (i['path'], i['rate']) for i in before]
after = self.params['device_read_iops'] or []
before, after = sorted(list(set(before))), sorted(list(set(after)))
return self._diff_update_and_compare('device_read_iops', before, after)
def diffparam_device_write_bps(self):
before = self.info['hostconfig']['blkiodevicewritebps'] or []
before = ["%s:%s" % (i['path'], i['rate']) for i in before]
after = self.params['device_write_bps'] or []
before, after = sorted(list(set(before))), sorted(list(set(after)))
return self._diff_update_and_compare('device_write_bps', before, after)
def diffparam_device_write_iops(self):
before = self.info['hostconfig']['blkiodevicewriteiops'] or []
before = ["%s:%s" % (i['path'], i['rate']) for i in before]
after = self.params['device_write_iops'] or []
before, after = sorted(list(set(before))), sorted(list(set(after)))
return self._diff_update_and_compare('device_write_iops', before, after)
# Limited idempotency, it can't guess default values
def diffparam_env(self):
env_before = self.info['config']['env'] or {}
before = {i.split("=")[0]: i.split("=")[1] for i in env_before}
after = before.copy()
if self.params['env']:
after.update({
str(k).lower(): str(v).lower()
for k, v in self.params['env'].items()
})
return self._diff_update_and_compare('env', before, after)
def diffparam_etc_hosts(self):
if self.info['hostconfig']['extrahosts']:
before = dict([i.split(":") for i in self.info['hostconfig']['extrahosts']])
else:
before = {}
after = self.params['etc_hosts']
return self._diff_update_and_compare('etc_hosts', before, after)
def diffparam_group_add(self):
before = self.info['hostconfig']['groupadd']
after = self.params['group_add']
return self._diff_update_and_compare('group_add', before, after)
# Healthcheck is only defined in container config if a healthcheck
# was configured; otherwise the config key isn't part of the config.
def diffparam_healthcheck(self):
if 'healthcheck' in self.info['config']:
# the "test" key is a list of 2 items where the first one is
# "CMD-SHELL" and the second one is the actual healthcheck command.
before = self.info['config']['healthcheck']['test'][1]
else:
before = ''
after = self.params['healthcheck'] or before
return self._diff_update_and_compare('healthcheck', before, after)
# Because of hostname is random generated, this parameter has partial idempotency only.
def diffparam_hostname(self):
before = self.info['config']['hostname']
after = self.params['hostname'] or before
return self._diff_update_and_compare('hostname', before, after)
def diffparam_image(self):
# TODO(sshnaidm): for strict image compare mode use SHAs
before = self.info['config']['image']
after = self.params['image']
mode = self.params['image_strict']
if mode is None or not mode:
# In a idempotency 'lite mode' assume all images from different registries are the same
before = before.replace(":latest", "")
after = after.replace(":latest", "")
before = before.split("/")[-1]
after = after.split("/")[-1]
return self._diff_update_and_compare('image', before, after)
def diffparam_ipc(self):
before = self.info['hostconfig']['ipcmode']
after = self.params['ipc']
return self._diff_update_and_compare('ipc', before, after)
def diffparam_label(self):
before = self.info['config']['labels'] or {}
after = before.copy()
if self.params['label']:
after.update({
str(k).lower(): str(v).lower()
for k, v in self.params['label'].items()
})
return self._diff_update_and_compare('label', before, after)
def diffparam_log_driver(self):
before = self.info['hostconfig']['logconfig']['type']
after = self.params['log_driver']
return self._diff_update_and_compare('log_driver', before, after)
# Parameter has limited idempotency, unable to guess the default log_path
def diffparam_log_opt(self):
before = self.info['logpath']
if self.module.params['log_opt'] in [None, '']:
after = before
else:
after = self.params['log_opt'].split("=")[1]
return self._diff_update_and_compare('log_opt', before, after)
def diffparam_memory(self):
before = str(self.info['hostconfig']['memory'])
after = self.params['memory']
return self._diff_update_and_compare('memory', before, after)
def diffparam_memory_swap(self):
# By default it's twice memory parameter
before = str(self.info['hostconfig']['memoryswap'])
after = self.params['memory_swap']
if (self.module.params['memory_swap'] is None
and self.params['memory'] != 0
and self.params['memory'].isdigit()):
after = str(int(self.params['memory']) * 2)
return self._diff_update_and_compare('memory_swap', before, after)
def diffparam_memory_reservation(self):
before = str(self.info['hostconfig']['memoryreservation'])
after = self.params['memory_reservation']
return self._diff_update_and_compare('memory_reservation', before, after)
def diffparam_network(self):
before = [self.info['hostconfig']['networkmode']]
after = self.params['network']
return self._diff_update_and_compare('network', before, after)
def diffparam_no_hosts(self):
before = not bool(self.info['hostspath'])
after = self.params['no_hosts']
if self.params['network'] == ['none']:
after = True
return self._diff_update_and_compare('no_hosts', before, after)
def diffparam_oom_score_adj(self):
before = self.info['hostconfig']['oomscoreadj']
after = self.params['oom_score_adj']
return self._diff_update_and_compare('oom_score_adj', before, after)
def diffparam_privileged(self):
before = self.info['hostconfig']['privileged']
after = self.params['privileged']
return self._diff_update_and_compare('privileged', before, after)
def diffparam_pid(self):
before = self.info['hostconfig']['pidmode']
after = self.params['pid']
return self._diff_update_and_compare('pid', before, after)
def diffparam_rm(self):
before = self.info['hostconfig']['autoremove']
after = self.params['rm']
return self._diff_update_and_compare('rm', before, after)
def diffparam_security_opt(self):
before = self.info['hostconfig']['securityopt']
after = self.params['security_opt']
before, after = sorted(list(set(before))), sorted(list(set(after)))
return self._diff_update_and_compare('security_opt', before, after)
def diffparam_stop_signal(self):
before = self.info['config']['stopsignal']
after = self.params['stop_signal']
return self._diff_update_and_compare('stop_signal', before, after)
def diffparam_tty(self):
before = self.info['config']['tty']
after = self.params['tty']
return self._diff_update_and_compare('tty', before, after)
def diffparam_user(self):
before = self.info['config']['user']
if self.module.params['user'] is None and before:
after = before
else:
after = self.params['user']
return self._diff_update_and_compare('user', before, after)
def diffparam_uts(self):
before = self.info['hostconfig']['utsmode']
after = self.params['uts']
return self._diff_update_and_compare('uts', before, after)
def diffparam_volume(self):
before = self.info['mounts']
if before:
volumes = []
for m in before:
if m['type'] == 'volume':
volumes.append([m['name'], m['destination']])
else:
volumes.append([m['source'], m['destination']])
before = [":".join(v) for v in volumes]
# Ignore volumes option for idempotency
after = [":".join(v.split(":")[:2]) for v in self.params['volume']]
before, after = sorted(list(set(before))), sorted(list(set(after)))
return self._diff_update_and_compare('volume', before, after)
def diffparam_volumes_from(self):
before = self.info['hostconfig']['volumesfrom'] or []
after = self.params['volumes_from'] or []
return self._diff_update_and_compare('volumes_from', before, after)
def diffparam_workdir(self):
before = self.info['config']['workingdir']
after = self.params['workdir']
return self._diff_update_and_compare('workdir', before, after)
def is_different(self):
diff_func_list = [func for func in dir(self)
if callable(getattr(self, func)) and func.startswith(
"diffparam")]
fail_fast = not bool(self.module._diff)
different = False
for func_name in diff_func_list:
dff_func = getattr(self, func_name)
if dff_func():
if fail_fast:
return True
else:
different = True
# Check non idempotent parameters
for p in self.non_idempotent:
if self.module.params[p] is not None and self.module.params[p] not in [{}, [], '']:
different = True
return different
def ensure_image_exists(module, image):
"""If image is passed, ensure it exists, if not - pull it or fail.
Arguments:
module {obj} -- ansible module object
image {str} -- name of image
Returns:
list -- list of image actions - if it pulled or nothing was done
"""
image_actions = []
module_exec = module.params['executable']
if not image:
return image_actions
rc, out, err = module.run_command([module_exec, 'image', 'exists', image])
if rc == 0:
return image_actions
rc, out, err = module.run_command([module_exec, 'image', 'pull', image])
if rc != 0:
module.fail_json(msg="Can't pull image %s" % image, stdout=out,
stderr=err)
image_actions.append("pulled image %s" % image)
return image_actions
class PodmanContainer:
"""Perform container tasks.
Manages podman container, inspects it and checks its current state
"""
def __init__(self, module, name):
"""Initialize PodmanContainer class.
Arguments:
module {obj} -- ansible module object
name {str} -- name of container
"""
super(PodmanContainer, self).__init__()
self.module = module
self.name = name
self.stdout, self.stderr = '', ''
self.info = self.get_info()
self.version = self._get_podman_version()
self.diff = {}
self.actions = []
@property
def exists(self):
"""Check if container exists."""
return bool(self.info != {})
@property
def different(self):
"""Check if container is different."""
diffcheck = PodmanContainerDiff(self.module, self.info, self.version)
is_different = diffcheck.is_different()
diffs = diffcheck.diff
if self.module._diff and is_different and diffs['before'] and diffs['after']:
self.diff['before'] = "\n".join(
["%s - %s" % (k, v) for k, v in sorted(
diffs['before'].items())]) + "\n"
self.diff['after'] = "\n".join(
["%s - %s" % (k, v) for k, v in sorted(
diffs['after'].items())]) + "\n"
return is_different
@property
def running(self):
"""Return True if container is running now."""
return self.exists and self.info['State']['Running']
@property
def stopped(self):
"""Return True if container exists and is not running now."""
return self.exists and not self.info['State']['Running']
def get_info(self):
"""Inspect container and gather info about it."""
rc, out, err = self.module.run_command(
[self.module.params['executable'], b'container', b'inspect', self.name])
return json.loads(out)[0] if rc == 0 else {}
def _get_podman_version(self):
rc, out, err = self.module.run_command(
[self.module.params['executable'], b'--version'])
if rc != 0 or not out or "version" not in out:
self.module.fail_json(msg="%s run failed!" % self.module.params['executable'])
return out.split("version")[1].strip()
def _perform_action(self, action):
"""Perform action with container.
Arguments:
action {str} -- action to perform - start, create, stop, run,
delete
"""
b_command = PodmanModuleParams(action,
self.module.params,
self.version,
self.module,
).construct_command_from_params()
full_cmd = " ".join([self.module.params['executable']]
+ [to_native(i) for i in b_command])
self.module.log("PODMAN-CONTAINER-DEBUG: %s" % full_cmd)
self.actions.append(full_cmd)
if not self.module.check_mode:
rc, out, err = self.module.run_command(
[self.module.params['executable'], b'container'] + b_command,
expand_user_and_vars=False)
self.stdout = out
self.stderr = err
if rc != 0:
self.module.fail_json(
msg="Can't %s container %s" % (action, self.name),
stdout=out, stderr=err)
def run(self):
"""Run the container."""
self._perform_action('run')
def delete(self):
"""Delete the container."""
self._perform_action('delete')
def stop(self):
"""Stop the container."""
self._perform_action('stop')
def start(self):
"""Start the container."""
self._perform_action('start')
def create(self):
"""Create the container."""
self._perform_action('create')
def recreate(self):
"""Recreate the container."""
self.delete()
self.run()
def restart(self):
"""Restart the container."""
self.stop()
self.run()
class PodmanManager:
"""Module manager class.
Defines according to parameters what actions should be applied to container
"""
def __init__(self, module):
"""Initialize PodmanManager class.
Arguments:
module {obj} -- ansible module object
"""
super(PodmanManager, self).__init__()
self.module = module
self.results = {
'changed': False,
'actions': [],
'container': {},
}
self.name = self.module.params['name']
self.executable = \
self.module.get_bin_path(self.module.params['executable'],
required=True)
self.image = self.module.params['image']
image_actions = ensure_image_exists(self.module, self.image)
self.results['actions'] += image_actions
self.state = self.module.params['state']
self.restart = self.module.params['force_restart']
self.recreate = self.module.params['recreate']
self.container = PodmanContainer(self.module, self.name)
def update_container_result(self, changed=True):
"""Inspect the current container, update results with last info, exit.
Keyword Arguments:
changed {bool} -- whether any action was performed
(default: {True})
"""
facts = self.container.get_info() if changed else self.container.info
out, err = self.container.stdout, self.container.stderr
self.results.update({'changed': changed, 'container': facts,
'podman_actions': self.container.actions},
stdout=out, stderr=err)
if self.container.diff:
self.results.update({'diff': self.container.diff})
if self.module.params['debug']:
self.results.update({'podman_version': self.container.version})
self.module.exit_json(**self.results)
def make_started(self):
"""Run actions if desired state is 'started'."""
if self.container.running and \
(self.container.different or self.recreate):
self.container.recreate()
self.results['actions'].append('recreated %s' %
self.container.name)
self.update_container_result()
elif self.container.running and not self.container.different:
if self.restart:
self.container.restart()
self.results['actions'].append('restarted %s' %
self.container.name)
self.update_container_result()
self.update_container_result(changed=False)
elif not self.container.exists:
self.container.run()
self.results['actions'].append('started %s' % self.container.name)
self.update_container_result()
elif self.container.stopped and self.container.different:
self.container.recreate()
self.results['actions'].append('recreated %s' %
self.container.name)
self.update_container_result()
elif self.container.stopped and not self.container.different:
self.container.start()
self.results['actions'].append('started %s' % self.container.name)
self.update_container_result()
def make_stopped(self):
"""Run actions if desired state is 'stopped'."""
if not self.container.exists and not self.image:
self.module.fail_json(msg='Cannot create container when image'
' is not specified!')
if not self.container.exists:
self.container.create()
self.results['actions'].append('created %s' % self.container.name)
self.update_container_result()
if self.container.stopped:
self.update_container_result(changed=False)
elif self.container.running:
self.container.stop()
self.results['actions'].append('stopped %s' % self.container.name)
self.update_container_result()
def make_absent(self):
"""Run actions if desired state is 'absent'."""
if not self.container.exists:
self.results.update({'changed': False})
elif self.container.exists:
self.container.delete()
self.results['actions'].append('deleted %s' % self.container.name)
self.results.update({'changed': True})
self.results.update({'container': {},
'podman_actions': self.container.actions})
self.module.exit_json(**self.results)
def execute(self):
"""Execute the desired action according to map of actions & states."""
states_map = {
'present': self.make_started,
'started': self.make_started,
'absent': self.make_absent,
'stopped': self.make_stopped
}
process_action = states_map[self.state]
process_action()
self.module.fail_json(msg="Unexpected logic error happened, "
"please contact maintainers ASAP!")
def main():
module = AnsibleModule(
argument_spec=yaml.safe_load(DOCUMENTATION)['options'],
mutually_exclusive=(
['no_hosts', 'etc_hosts'],
),
supports_check_mode=True,
)
# work on input vars
if module.params['state'] in ['started', 'present'] and \
not module.params['image']:
module.fail_json(msg="State '%s' required image to be configured!" %
module.params['state'])
PodmanManager(module).execute()
if __name__ == '__main__':
main()
| [((894, 20, 894, 61), 'ansible.module_utils._text.to_bytes', 'to_bytes', (), '', False, 'from ansible.module_utils._text import to_bytes, to_native\n'), ((900, 20, 900, 61), 'ansible.module_utils._text.to_bytes', 'to_bytes', (), '', False, 'from ansible.module_utils._text import to_bytes, to_native\n'), ((904, 20, 904, 61), 'ansible.module_utils._text.to_bytes', 'to_bytes', (), '', False, 'from ansible.module_utils._text import to_bytes, to_native\n'), ((907, 20, 907, 38), 'distutils.version.LooseVersion', 'LooseVersion', ({(907, 33, 907, 37): 'minv'}, {}), '(minv)', False, 'from distutils.version import LooseVersion\n'), ((907, 41, 908, 36), 'distutils.version.LooseVersion', 'LooseVersion', ({(908, 16, 908, 35): 'self.podman_version'}, {}), '(self.podman_version)', False, 'from distutils.version import LooseVersion\n'), ((912, 20, 912, 38), 'distutils.version.LooseVersion', 'LooseVersion', ({(912, 33, 912, 37): 'maxv'}, {}), '(maxv)', False, 'from distutils.version import LooseVersion\n'), ((912, 41, 913, 36), 'distutils.version.LooseVersion', 'LooseVersion', ({(913, 16, 913, 35): 'self.podman_version'}, {}), '(self.podman_version)', False, 'from distutils.version import LooseVersion\n'), ((1791, 15, 1791, 30), 'json.loads', 'json.loads', ({(1791, 26, 1791, 29): 'out'}, {}), '(out)', False, 'import json\n'), ((1982, 22, 1982, 51), 'yaml.safe_load', 'yaml.safe_load', ({(1982, 37, 1982, 50): 'DOCUMENTATION'}, {}), '(DOCUMENTATION)', False, 'import yaml\n'), ((1317, 35, 1317, 51), 'json.dumps', 'json.dumps', ({(1317, 46, 1317, 50): 'info'}, {}), '(info)', False, 'import json\n'), ((1813, 31, 1813, 43), 'ansible.module_utils._text.to_native', 'to_native', ({(1813, 41, 1813, 42): 'i'}, {}), '(i)', False, 'from ansible.module_utils._text import to_bytes, to_native\n'), ((1028, 29, 1028, 70), 'ansible.module_utils._text.to_bytes', 'to_bytes', (), '', False, 'from ansible.module_utils._text import to_bytes, to_native\n'), ((1105, 40, 1105, 81), 'ansible.module_utils._text.to_bytes', 'to_bytes', (), '', False, 'from ansible.module_utils._text import to_bytes, to_native\n'), ((1208, 29, 1208, 70), 'ansible.module_utils._text.to_bytes', 'to_bytes', (), '', False, 'from ansible.module_utils._text import to_bytes, to_native\n')] |
UdoGi/dark-matter | setup.py | 3d49e89fa5e81f83144119f6216c5774176d203b | #!/usr/bin/env python
from setuptools import setup
# Modified from http://stackoverflow.com/questions/2058802/
# how-can-i-get-the-version-defined-in-setup-py-setuptools-in-my-package
def version():
import os
import re
init = os.path.join('dark', '__init__.py')
with open(init) as fp:
initData = fp.read()
match = re.search(r"^__version__ = ['\"]([^'\"]+)['\"]",
initData, re.M)
if match:
return match.group(1)
else:
raise RuntimeError('Unable to find version string in %r.' % init)
# Explicitly list bin scripts to be installed, seeing as I have a few local
# bin files that are not (yet) part of the distribution.
scripts = [
'bin/aa-info.py',
'bin/aa-to-dna.py',
'bin/aa-to-properties.py',
'bin/adaptor-distances.py',
'bin/alignment-panel-civ.py',
'bin/alignments-per-read.py',
'bin/bit-score-to-e-value.py',
'bin/cat-json-blast-records.py',
'bin/check-fasta-json-blast-consistency.py',
'bin/codon-distance.py',
'bin/compare-consensuses.py',
'bin/compare-sequences.py',
'bin/convert-blast-xml-to-json.py',
'bin/convert-diamond-to-json.py',
'bin/convert-diamond-to-sam.py',
'bin/convert-sam-to-fastq.sh',
'bin/create-newick-relabeling-output.py',
'bin/dark-matter-version.py',
'bin/describe-protein-database.py',
'bin/dna-to-aa.py',
'bin/download-genbank.sh',
'bin/e-value-to-bit-score.py',
'bin/extract-ORFs.py',
'bin/fasta-base-indices.py',
'bin/fasta-count.py',
'bin/fasta-diff.sh',
'bin/fasta-identity-table.py',
'bin/fasta-ids.py',
'bin/fasta-join.py',
'bin/fasta-lengths.py',
'bin/fasta-sequences.py',
'bin/fasta-sort.py',
'bin/fasta-split-by-id.py',
'bin/fasta-subset.py',
'bin/fasta-subtraction.py',
'bin/fasta-to-phylip.py',
'bin/fasta-variable-sites.py',
'bin/filter-fasta-by-complexity.py',
'bin/filter-fasta-by-taxonomy.py',
'bin/filter-fasta.py',
'bin/filter-hits-to-fasta.py',
'bin/filter-reads-alignments.py',
'bin/filter-sam.py',
'bin/find-hits.py',
'bin/format-fasta.py',
'bin/genome-protein-summary.py',
'bin/get-features.py',
'bin/get-hosts.py',
'bin/get-reads.py',
'bin/get-taxonomy.py',
'bin/graph-evalues.py',
'bin/local-align.py',
'bin/make-consensus.py',
'bin/make-fasta-database.py',
'bin/make-protein-database.py',
'bin/ncbi-fetch-id.py',
'bin/newick-to-ascii.py',
'bin/noninteractive-alignment-panel.py',
'bin/parse-genbank-flat-file.py',
'bin/position-summary.py',
'bin/pre-commit.sh',
'bin/print-blast-xml-for-derek.py',
'bin/print-blast-xml.py',
'bin/print-read-lengths.py',
'bin/proteins-to-pathogens.py',
'bin/proteins-to-pathogens-civ.py',
'bin/randomize-fasta.py',
'bin/read-blast-json.py',
'bin/read-blast-xml.py',
'bin/relabel-newick-tree.py',
'bin/run-bwa.py',
'bin/run-bowtie2.py',
'bin/sam-coverage.py',
'bin/sam-coverage-depth.py',
'bin/sam-to-fasta-alignment.py',
'bin/sam-reference-read-counts.py',
'bin/sam-references.py',
'bin/sff-to-fastq.py',
'bin/split-fasta-by-adaptors.py',
'bin/subset-protein-database.py',
'bin/summarize-fasta-bases.py',
'bin/summarize-reads.py',
'bin/trim-primers.py',
'bin/trim-reads.py',
'bin/write-htcondor-job-spec.py',
]
setup(name='dark-matter',
version=version(),
packages=['dark', 'dark.blast', 'dark.diamond', 'dark.civ'],
url='https://github.com/acorg/dark-matter',
download_url='https://github.com/acorg/dark-matter',
author='Terry Jones, Barbara Muehlemann, Tali Veith, Sophie Mathias',
author_email='[email protected]',
keywords=['virus discovery'],
classifiers=[
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Topic :: Software Development :: Libraries :: Python Modules',
],
license='MIT',
description='Python classes for working with genetic sequence data',
scripts=scripts,
install_requires=[
'biopython>=1.71',
'bz2file>=0.98',
'Cython>=0.29.16',
'ipython>=3.1.0',
'matplotlib>=1.4.3',
'mysql-connector-python==8.0.11',
'numpy>=1.14.2',
'pysam>=0.15.2',
'pyfaidx>=0.4.8.4',
'pyzmq>=14.3.1',
'requests>=2.18.4',
'cachetools>=3.1.0',
'simplejson>=3.5.3',
'six>=1.11.0',
])
| [((12, 11, 12, 46), 'os.path.join', 'os.path.join', ({(12, 24, 12, 30): '"""dark"""', (12, 32, 12, 45): '"""__init__.py"""'}, {}), "('dark', '__init__.py')", False, 'import os\n'), ((15, 12, 16, 37), 're.search', 're.search', ({(15, 22, 15, 59): '"""^__version__ = [\'\\\\"]([^\'\\\\"]+)[\'\\\\"]"""', (16, 22, 16, 30): 'initData', (16, 32, 16, 36): 're.M'}, {}), '(\'^__version__ = [\\\'\\\\"]([^\\\'\\\\"]+)[\\\'\\\\"]\', initData, re.M)', False, 'import re\n')] |
FilipBali/VirtualPortfolio-WebApplication | authenticationApp/templatetags/timetags.py | 9236509205e37c2c682b7b2f518f5794a94fd178 | # ======================================================================================================================
# Fakulta informacnich technologii VUT v Brne
# Bachelor thesis
# Author: Filip Bali (xbalif00)
# License: MIT
# ======================================================================================================================
from django import template
import datetime
import time
from portfolioApp.models import NotificationEvent
register = template.Library()
import pandas as pd
def print_timestamp(timestamp):
return time.strftime('%Y-%m-%d'.format(timestamp%1000), time.gmtime(timestamp/1000.0))
def print_timestamp_analysis(timestamp):
return str(timestamp.year) + '-' + str(timestamp.month) +'-' + str(timestamp.day)
def print_timestamp_notifications(timestamp):
return str(timestamp.year) + '-' + str(timestamp.month) +'-' + str(timestamp.day)
def print_notification_text(type):
if type == 1:
return 'At a price change equal/above/below'
elif type == 2:
return 'Percentage increase current price'
elif type == 3:
return 'Percentage decrease current price'
def print_symbol_notifications(notification_id):
object = NotificationEvent.objects.get(id=notification_id)
symbol = str(object.company.symbol)
return symbol
def print_type_notifications(notification_type):
if notification_type == 1:
return 'Interday'
elif notification_type == 2:
return 'Intraday'
register.filter(print_timestamp)
register.filter(print_timestamp_analysis)
register.filter(print_timestamp_notifications)
register.filter(print_notification_text)
register.filter(print_symbol_notifications)
register.filter(print_type_notifications) | [((13, 11, 13, 29), 'django.template.Library', 'template.Library', ({}, {}), '()', False, 'from django import template\n'), ((35, 13, 35, 62), 'portfolioApp.models.NotificationEvent.objects.get', 'NotificationEvent.objects.get', (), '', False, 'from portfolioApp.models import NotificationEvent\n'), ((18, 60, 18, 89), 'time.gmtime', 'time.gmtime', ({(18, 72, 18, 88): '(timestamp / 1000.0)'}, {}), '(timestamp / 1000.0)', False, 'import time\n')] |
donatoaz/pycfmodel | pycfmodel/model/resources/properties/policy.py | 1586e290b67d2347493dd4a77d2b0c8ee6c0936b | from pycfmodel.model.resources.properties.policy_document import PolicyDocument
from pycfmodel.model.resources.properties.property import Property
from pycfmodel.model.types import Resolvable, ResolvableStr
class Policy(Property):
"""
Contains information about an attached policy.
Properties:
- PolicyDocument: A [policy document][pycfmodel.model.resources.properties.policy_document.PolicyDocument] object.
- PolicyName: The friendly name (not ARN) identifying the policy.
"""
PolicyName: ResolvableStr
PolicyDocument: Resolvable[PolicyDocument]
| [] |
mrahim/stacked-learn | stlearn/__init__.py | b04b49f65f06de7f5b59ba4139b0f78f8d66d94a | from .stacking import StackingClassifier, stack_features
from .multitask import MultiTaskEstimator
| [] |
utsavm9/wasm-micro-runtime | samples/workload/XNNPACK/toolchain/emscripten_toolchain_config.bzl | 0960e82db2be30b741f5c83e7a57ea9056b2ab59 | # Copyright (C) 2019 Intel Corporation. All rights reserved.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
load("@bazel_tools//tools/build_defs/cc:action_names.bzl", "ACTION_NAMES")
load(
"@bazel_tools//tools/cpp:cc_toolchain_config_lib.bzl",
"feature",
"flag_group",
"flag_set",
"tool_path",
)
all_compile_actions = [
ACTION_NAMES.c_compile,
ACTION_NAMES.cpp_compile,
]
all_link_actions = [
ACTION_NAMES.cpp_link_executable,
ACTION_NAMES.cpp_link_dynamic_library,
ACTION_NAMES.cpp_link_nodeps_dynamic_library,
]
def _impl(ctx):
tool_paths = [
tool_path(
name = "gcc",
path = "/opt/emsdk/upstream/emscripten/emcc",
),
tool_path(
name = "ld",
path = "/opt/emsdk/upstream/emscripten/emcc",
),
tool_path(
name = "ar",
path = "/opt/emsdk/upstream/emscripten/emar",
),
tool_path(
name = "cpp",
path = "/opt/emsdk/upstream/emscripten/em++",
),
tool_path(
name = "gcov",
path = "/bin/false",
),
tool_path(
name = "nm",
path = "/bin/false",
),
tool_path(
name = "objdump",
path = "/bin/false",
),
tool_path(
name = "strip",
path = "/bin/false",
),
]
features = [ # NEW
feature(
name = "default_compile_flags",
enabled = True,
flag_sets = [
flag_set(
actions = all_compile_actions,
flag_groups = ([
flag_group(
flags = [
"-O3",
"-msimd128",
"-s",
"USE_PTHREADS=0",
"-s",
"ERROR_ON_UNDEFINED_SYMBOLS=0",
"-s",
"STANDALONE_WASM=1",
],
),
]),
),
],
),
feature(
name = "default_linker_flags",
enabled = True,
flag_sets = [
flag_set(
actions = all_link_actions,
flag_groups = ([
flag_group(
flags = [
"-O3",
"-msimd128",
"-s",
"USE_PTHREADS=0",
"-s",
"ERROR_ON_UNDEFINED_SYMBOLS=0",
"-s",
"STANDALONE_WASM=1",
"-Wl,--export=__heap_base",
"-Wl,--export=__data_end",
],
),
]),
),
],
),
]
return cc_common.create_cc_toolchain_config_info(
ctx = ctx,
features = features, # NEW
cxx_builtin_include_directories = [
"/opt/emsdk/upstream/emscripten/system/include/libcxx",
"/opt/emsdk/upstream/emscripten/system/lib/libcxxabi/include",
"/opt/emsdk/upstream/emscripten/system/include",
"/opt/emsdk/upstream/emscripten/system/include/libc",
"/opt/emsdk/upstream/emscripten/system/lib/libc/musl/arch/emscripten",
"/opt/emsdk/upstream/lib/clang/12.0.0/include/",
],
toolchain_identifier = "wasm-emsdk",
host_system_name = "i686-unknown-linux-gnu",
target_system_name = "wasm32-unknown-emscripten",
target_cpu = "wasm32",
target_libc = "unknown",
compiler = "emsdk",
abi_version = "unknown",
abi_libc_version = "unknown",
tool_paths = tool_paths,
)
emsdk_toolchain_config = rule(
implementation = _impl,
attrs = {},
provides = [CcToolchainConfigInfo],
)
| [] |
toplenboren/safezone | cloud_storages/gdrive/gdrive.py | eafad765ed7cd6f6b7607ac07e75fd843d32ee07 | from __future__ import print_function
import json
from typing import List
from functools import lru_cache
from cloud_storages.http_shortcuts import *
from database.database import Database
from models.models import StorageMetaInfo, Resource, Size
from cloud_storages.storage import Storage
from cloud_storages.gdrive.client_config import GOOGLE_DRIVE_CONFIG, SCOPES
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
from google.oauth2.credentials import Credentials
GOOGLE_DRIVE_DB_KEY = 'google'
class GDriveStorage(Storage):
def __init__(self, token):
self.token = token
@lru_cache(maxsize=None)
def _get_folder_id_by_name(self, name: str) -> str:
"""
Google drive has a quirk - you can't really use normal os-like paths - first you need to get an ID of the folder
This function searches for folders with specified name
"""
response = get_with_OAuth(
f"https://www.googleapis.com/drive/v3/files",
params={
'fields': '*',
'q': f"name = '{name}' and mimeType = 'application/vnd.google-apps.folder'"
},
token=self.token
)
if response.status_code == 200:
response_as_json = response.json()
try:
result = response_as_json['files'][0]['id']
return result
except IndexError as e:
raise ValueError(f"Something went wrong with GD: Error: {e}")
else:
raise ValueError(f"Something went wrong with GD: Response: "
f"{str(response.status_code)} — {response.json()}")
@classmethod
# todo (toplenboren) remove database argument dependency :(
def auth(cls, db: Database):
creds = None
creds_from_db = db.get(GOOGLE_DRIVE_DB_KEY)
if creds_from_db:
creds = Credentials.from_authorized_user_info(json.loads(creds_from_db), SCOPES)
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_config(GOOGLE_DRIVE_CONFIG, SCOPES)
creds = flow.run_local_server(port=0)
db.set(GOOGLE_DRIVE_DB_KEY, creds.token)
@classmethod
def _deserialize_resource(cls, json: dict) -> Resource or None:
"""
Tries to parse Resource from YD to Resource object
:param json:
:return:
"""
try:
is_file = True
if 'folder' in json['mimeType']:
is_file = False
# You don't have pathes in google drive, instead -- you have an id
path = json['id']
except KeyError:
return None
res = Resource(is_file, path)
res.size = Size(json.get('size'), 'b') if json.get('size') else None
res.name = json.get('name')
res.url = json.get('webContentLink')
res.updated = json.get('modifiedTime')
res.md5 = json.get('md5Checksum')
return res
def list_resources_on_path(self, remote_path: str) -> List[Resource]:
"""
List all items in directory
:param path: path to the resource
"""
folder_id = self._get_folder_id_by_name(remote_path)
response = get_with_OAuth(
f"https://www.googleapis.com/drive/v3/files",
params={
'fields': '*',
'q': f"'{folder_id}' in parents"
},
token=self.token
)
if response.status_code == 200:
result = []
response_as_json = response.json()
files = response_as_json['files']
for resource in files:
res: Resource or None = self._deserialize_resource(resource)
if res is not None:
result.append(res)
return result
else:
raise ValueError(f"Something went wrong with YD: Response: "
f"{str(response.status_code)} — {response.json()['message']}")
def get_meta_info(self) -> StorageMetaInfo:
response = get_with_OAuth('https://www.googleapis.com/drive/v3/about?fields=*', token=self.token)
if response.status_code == 200:
response_read = response.json()
used_space = response_read.get('storageQuota', {}).get('usage')
total_space = response_read.get('storageQuota', {}).get('limit')
return StorageMetaInfo(int(used_space), int(total_space))
else:
raise ValueError(f"Something went wrong with GD: Response: "
f"{str(response.status_code)} — {response.json()['message']}")
def create_path(self, remote_path: List[str]) -> None:
"""
Creates the remote path on yandex disk
"""
print(f'[{__name__}] Trying to create directory {"/".join(remote_path)} on remote...')
dir_to_create = []
for dir in remote_path:
dir_to_create.append(dir)
path_to_create = '/'.join(dir_to_create)
response = put_with_OAuth(f'https://cloud-api.yandex.net/v1/disk/resources?path={path_to_create}',
token=self.token)
if 199 < response.status_code < 401:
print(f'[{__name__}] Created directory {path_to_create}')
continue
elif response.status_code == 409 and 'уже существует' in response.json().get('message', ''):
continue
return
def save_resource_to_path(self, resource: Resource, remote_path: str, overwrite: bool, _rec_call:bool = False) -> Resource or None:
"""
Put an Item to the directory
:param resource: resource on the local fs
:param remote_path: string, path to resource on remote fs
:param _rec_call: bool, a system parameter, whether or not this function was called as a recursive call
:return: saved resource or raises exception
"""
upload_successful_flag = False
response = get_with_OAuth(
f'https://cloud-api.yandex.net/v1/disk/resources/upload?path={remote_path}&overwrite=${overwrite}',
token=self.token
)
if response.status_code == 200:
response_read = response.json()
upload_link = response_read['href']
with open(resource.path, 'rb') as f:
files = f
response = put_with_OAuth(upload_link, data=files)
if 199 < response.status_code < 401:
upload_successful_flag = True
response = get_with_OAuth(f'https://cloud-api.yandex.net/v1/disk/resources?path={remote_path}',
token=self.token)
resource_metainfo = self._deserialize_resource(response.json())
if 199 < response.status_code < 401:
return resource_metainfo
elif upload_successful_flag:
return resource
# This dir is not present in the storage
# We use _rec_call to tell that the next call was made as recursive call, so we don't cause SO
elif response.status_code == 409 and not _rec_call:
# We don't need to create a folder with the name equal to the filename, so we do [:-1]
self.create_path(remote_path.split('/')[:-1])
return self.save_resource_to_path(resource, remote_path, overwrite, _rec_call=True)
raise ValueError(f"Something went wrong with YD: Response: "
f"{str(response.status_code)} — {response.json().get('message', '')}")
def download_resource(self, remote_path, local_path) -> str:
response = get_with_OAuth(
f'https://cloud-api.yandex.net/v1/disk/resources/download?path={remote_path}',
token=self.token
)
if response.status_code == 200:
response_read = response.json()
dl_url = response_read.get('href')
else:
raise ValueError(f"[{__name__}] Something went wrong with YD: Response: "
f"{str(response.status_code)} — {response.json()['message']}")
file = requests.get(dl_url)
open(local_path, 'wb').write(file.content)
return local_path
def main():
storage = GDriveStorage(None)
db = Database('../storage.db')
storage.auth(db)
authed_storage = GDriveStorage(json.loads(db.get(GOOGLE_DRIVE_DB_KEY))['token'])
result = authed_storage.list_resources_on_path('savezone')
print(result)
if __name__ == '__main__':
main() | [((25, 5, 25, 28), 'functools.lru_cache', 'lru_cache', (), '', False, 'from functools import lru_cache\n'), ((216, 9, 216, 34), 'database.database.Database', 'Database', ({(216, 18, 216, 33): '"""../storage.db"""'}, {}), "('../storage.db')", False, 'from database.database import Database\n'), ((82, 14, 82, 37), 'models.models.Resource', 'Resource', ({(82, 23, 82, 30): 'is_file', (82, 32, 82, 36): 'path'}, {}), '(is_file, path)', False, 'from models.models import StorageMetaInfo, Resource, Size\n'), ((84, 19, 84, 35), 'json.get', 'json.get', ({(84, 28, 84, 34): '"""name"""'}, {}), "('name')", False, 'import json\n'), ((85, 18, 85, 44), 'json.get', 'json.get', ({(85, 27, 85, 43): '"""webContentLink"""'}, {}), "('webContentLink')", False, 'import json\n'), ((86, 22, 86, 46), 'json.get', 'json.get', ({(86, 31, 86, 45): '"""modifiedTime"""'}, {}), "('modifiedTime')", False, 'import json\n'), ((87, 18, 87, 41), 'json.get', 'json.get', ({(87, 27, 87, 40): '"""md5Checksum"""'}, {}), "('md5Checksum')", False, 'import json\n'), ((83, 50, 83, 66), 'json.get', 'json.get', ({(83, 59, 83, 65): '"""size"""'}, {}), "('size')", False, 'import json\n'), ((58, 58, 58, 83), 'json.loads', 'json.loads', ({(58, 69, 58, 82): 'creds_from_db'}, {}), '(creds_from_db)', False, 'import json\n'), ((63, 23, 63, 87), 'google_auth_oauthlib.flow.InstalledAppFlow.from_client_config', 'InstalledAppFlow.from_client_config', ({(63, 59, 63, 78): 'GOOGLE_DRIVE_CONFIG', (63, 80, 63, 86): 'SCOPES'}, {}), '(GOOGLE_DRIVE_CONFIG, SCOPES)', False, 'from google_auth_oauthlib.flow import InstalledAppFlow\n'), ((83, 24, 83, 40), 'json.get', 'json.get', ({(83, 33, 83, 39): '"""size"""'}, {}), "('size')", False, 'import json\n'), ((61, 30, 61, 39), 'google.auth.transport.requests.Request', 'Request', ({}, {}), '()', False, 'from google.auth.transport.requests import Request\n')] |
darkestmidnight/fedcodeathon2018 | index/urls.py | 2cac972b6eaebd7bfc47c02aade36b0f4a6869ab | from django.urls import re_path, include
from . import views
app_name='logged'
# url mappings for the webapp.
urlpatterns = [
re_path(r'^$', views.logged_count, name="logged_count"),
re_path(r'^loggedusers/', views.logged, name="logged_users"),
re_path(r'^settings/', views.user_settings, name="update_info"),
re_path(r'^administrators/', views.post_alert, name="post_alert"),
re_path(r'^alerts/$', views.list_alert, name="list_alert"),
re_path(r'^alerts/(?P<slug>[\w-]+)/$', views.view_alert, name="view_alert"),
re_path(r'^display/', views.display, name="display"),
re_path(r'^doorselection/', views.doors_election, name="door_selecttion")
] | [((8, 4, 8, 59), 'django.urls.re_path', 're_path', (), '', False, 'from django.urls import re_path, include\n'), ((9, 4, 9, 64), 'django.urls.re_path', 're_path', (), '', False, 'from django.urls import re_path, include\n'), ((10, 4, 10, 67), 'django.urls.re_path', 're_path', (), '', False, 'from django.urls import re_path, include\n'), ((11, 4, 11, 69), 'django.urls.re_path', 're_path', (), '', False, 'from django.urls import re_path, include\n'), ((12, 4, 12, 62), 'django.urls.re_path', 're_path', (), '', False, 'from django.urls import re_path, include\n'), ((13, 4, 13, 79), 'django.urls.re_path', 're_path', (), '', False, 'from django.urls import re_path, include\n'), ((14, 4, 14, 56), 'django.urls.re_path', 're_path', (), '', False, 'from django.urls import re_path, include\n'), ((15, 4, 15, 77), 'django.urls.re_path', 're_path', (), '', False, 'from django.urls import re_path, include\n')] |
uw-it-aca/scout | scout/dao/item.py | be787378c216f1fb172d68914a550a91c62bc264 | # Copyright 2021 UW-IT, University of Washington
# SPDX-License-Identifier: Apache-2.0
from scout.dao.space import get_spots_by_filter, _get_spot_filters, \
_get_extended_info_by_key
import copy
def get_item_by_id(item_id):
spot = get_spots_by_filter([
('item:id', item_id),
('extended_info:app_type', 'tech')
])
if spot:
spot = _filter_spot_items(item_id, spot[0])
return spot
def _filter_spot_items(item_id, spot):
for item in spot.items:
if item.item_id == item_id:
spot.item = item
return spot
def add_item_info(spot):
for item in spot.items:
item.model = _get_extended_info_by_key("i_model",
item.extended_info)
item.brand = _get_extended_info_by_key("i_brand",
item.extended_info)
item.checkout_period = _get_extended_info_by_key(
"i_checkout_period",
item.extended_info
)
item.reservation_notes = _get_extended_info_by_key(
"i_reservation_notes",
item.extended_info
)
item.is_active = _get_extended_info_by_key(
"i_is_active",
item.extended_info
)
item.quantity = _get_extended_info_by_key(
"i_quantity",
item.extended_info
)
item.description = _get_extended_info_by_key(
"i_description",
item.extended_info
)
item.reserve_url = _get_extended_info_by_key(
"i_reserve_url",
item.extended_info
)
item.manual_url = _get_extended_info_by_key(
"i_manual_url",
item.extended_info
)
item.owner = _get_extended_info_by_key(
"i_owner",
item.extended_info
)
item.is_stf = _get_extended_info_by_key(
"i_is_stf",
item.extended_info
)
item.cte_type_id = _get_extended_info_by_key(
"cte_type_id",
item.extended_info
)
return spot
def get_filtered_items(spots, request):
parameter_list = _get_spot_filters(request)
brand = []
subcategory = []
is_active = False
for param in parameter_list:
if param[0] == "item:extended_info:i_brand":
brand.append(param[1])
elif param[0] == "item:subcategory":
subcategory.append(param[1])
elif param[0] == "item:extended_info:i_is_active":
is_active = True
new_spots = []
for spot in spots:
new_spot = copy.deepcopy(spot)
new_spot.items = []
for item in spot.items:
if is_active and not item.is_active:
continue
if len(subcategory) > 0 and item.subcategory not in subcategory:
continue
if len(brand) > 0 and item.brand not in brand:
continue
new_spot.items.append(item)
new_spots.append(new_spot)
return new_spots
def get_item_count(spots):
item_count = 0
for spot in spots:
item_count += len(spot.items)
return item_count
| [((10, 11, 13, 6), 'scout.dao.space.get_spots_by_filter', 'get_spots_by_filter', ({(10, 31, 13, 5): "[('item:id', item_id), ('extended_info:app_type', 'tech')]"}, {}), "([('item:id', item_id), ('extended_info:app_type', 'tech')])", False, 'from scout.dao.space import get_spots_by_filter, _get_spot_filters, _get_extended_info_by_key\n'), ((77, 21, 77, 47), 'scout.dao.space._get_spot_filters', '_get_spot_filters', ({(77, 39, 77, 46): 'request'}, {}), '(request)', False, 'from scout.dao.space import get_spots_by_filter, _get_spot_filters, _get_extended_info_by_key\n'), ((28, 21, 29, 66), 'scout.dao.space._get_extended_info_by_key', '_get_extended_info_by_key', ({(28, 47, 28, 56): '"""i_model"""', (29, 47, 29, 65): 'item.extended_info'}, {}), "('i_model', item.extended_info)", False, 'from scout.dao.space import get_spots_by_filter, _get_spot_filters, _get_extended_info_by_key\n'), ((30, 21, 31, 66), 'scout.dao.space._get_extended_info_by_key', '_get_extended_info_by_key', ({(30, 47, 30, 56): '"""i_brand"""', (31, 47, 31, 65): 'item.extended_info'}, {}), "('i_brand', item.extended_info)", False, 'from scout.dao.space import get_spots_by_filter, _get_spot_filters, _get_extended_info_by_key\n'), ((32, 31, 35, 9), 'scout.dao.space._get_extended_info_by_key', '_get_extended_info_by_key', ({(33, 12, 33, 31): '"""i_checkout_period"""', (34, 12, 34, 30): 'item.extended_info'}, {}), "('i_checkout_period', item.extended_info)", False, 'from scout.dao.space import get_spots_by_filter, _get_spot_filters, _get_extended_info_by_key\n'), ((36, 33, 39, 9), 'scout.dao.space._get_extended_info_by_key', '_get_extended_info_by_key', ({(37, 12, 37, 33): '"""i_reservation_notes"""', (38, 12, 38, 30): 'item.extended_info'}, {}), "('i_reservation_notes', item.extended_info)", False, 'from scout.dao.space import get_spots_by_filter, _get_spot_filters, _get_extended_info_by_key\n'), ((40, 25, 43, 9), 'scout.dao.space._get_extended_info_by_key', '_get_extended_info_by_key', ({(41, 12, 41, 25): '"""i_is_active"""', (42, 12, 42, 30): 'item.extended_info'}, {}), "('i_is_active', item.extended_info)", False, 'from scout.dao.space import get_spots_by_filter, _get_spot_filters, _get_extended_info_by_key\n'), ((44, 24, 47, 9), 'scout.dao.space._get_extended_info_by_key', '_get_extended_info_by_key', ({(45, 12, 45, 24): '"""i_quantity"""', (46, 12, 46, 30): 'item.extended_info'}, {}), "('i_quantity', item.extended_info)", False, 'from scout.dao.space import get_spots_by_filter, _get_spot_filters, _get_extended_info_by_key\n'), ((48, 27, 51, 9), 'scout.dao.space._get_extended_info_by_key', '_get_extended_info_by_key', ({(49, 12, 49, 27): '"""i_description"""', (50, 12, 50, 30): 'item.extended_info'}, {}), "('i_description', item.extended_info)", False, 'from scout.dao.space import get_spots_by_filter, _get_spot_filters, _get_extended_info_by_key\n'), ((52, 27, 55, 9), 'scout.dao.space._get_extended_info_by_key', '_get_extended_info_by_key', ({(53, 12, 53, 27): '"""i_reserve_url"""', (54, 12, 54, 30): 'item.extended_info'}, {}), "('i_reserve_url', item.extended_info)", False, 'from scout.dao.space import get_spots_by_filter, _get_spot_filters, _get_extended_info_by_key\n'), ((56, 26, 59, 9), 'scout.dao.space._get_extended_info_by_key', '_get_extended_info_by_key', ({(57, 12, 57, 26): '"""i_manual_url"""', (58, 12, 58, 30): 'item.extended_info'}, {}), "('i_manual_url', item.extended_info)", False, 'from scout.dao.space import get_spots_by_filter, _get_spot_filters, _get_extended_info_by_key\n'), ((60, 21, 63, 9), 'scout.dao.space._get_extended_info_by_key', '_get_extended_info_by_key', ({(61, 12, 61, 21): '"""i_owner"""', (62, 12, 62, 30): 'item.extended_info'}, {}), "('i_owner', item.extended_info)", False, 'from scout.dao.space import get_spots_by_filter, _get_spot_filters, _get_extended_info_by_key\n'), ((64, 22, 67, 9), 'scout.dao.space._get_extended_info_by_key', '_get_extended_info_by_key', ({(65, 12, 65, 22): '"""i_is_stf"""', (66, 12, 66, 30): 'item.extended_info'}, {}), "('i_is_stf', item.extended_info)", False, 'from scout.dao.space import get_spots_by_filter, _get_spot_filters, _get_extended_info_by_key\n'), ((68, 27, 71, 9), 'scout.dao.space._get_extended_info_by_key', '_get_extended_info_by_key', ({(69, 12, 69, 25): '"""cte_type_id"""', (70, 12, 70, 30): 'item.extended_info'}, {}), "('cte_type_id', item.extended_info)", False, 'from scout.dao.space import get_spots_by_filter, _get_spot_filters, _get_extended_info_by_key\n'), ((92, 19, 92, 38), 'copy.deepcopy', 'copy.deepcopy', ({(92, 33, 92, 37): 'spot'}, {}), '(spot)', False, 'import copy\n')] |
umeboshi2/juriscraper | juriscraper/opinions/united_states/state/minnctapp.py | 16abceb3747947593841b1c2708de84dcc85c59d | #Scraper for Minnesota Court of Appeals Published Opinions
#CourtID: minnctapp
#Court Short Name: MN
#Author: mlr
#Date: 2016-06-03
from juriscraper.opinions.united_states.state import minn
class Site(minn.Site):
# Only subclasses minn for the _download method.
def __init__(self, *args, **kwargs):
super(Site, self).__init__(*args, **kwargs)
self.court_id = self.__module__
self.court_filters = ['/ctapun/', '/ctappub/']
| [] |
JosephMontoya-TRI/monty | monty/os/__init__.py | facef1776c7d05c941191a32a0b93f986a9761dd | from __future__ import absolute_import
import os
import errno
from contextlib import contextmanager
__author__ = 'Shyue Ping Ong'
__copyright__ = 'Copyright 2013, The Materials Project'
__version__ = '0.1'
__maintainer__ = 'Shyue Ping Ong'
__email__ = '[email protected]'
__date__ = '1/24/14'
@contextmanager
def cd(path):
"""
A Fabric-inspired cd context that temporarily changes directory for
performing some tasks, and returns to the original working directory
afterwards. E.g.,
with cd("/my/path/"):
do_something()
Args:
path: Path to cd to.
"""
cwd = os.getcwd()
os.chdir(path)
try:
yield
finally:
os.chdir(cwd)
def makedirs_p(path, **kwargs):
"""
Wrapper for os.makedirs that does not raise an exception if the directory already exists, in the fashion of
"mkdir -p" command. The check is performed in a thread-safe way
Args:
path: path of the directory to create
kwargs: standard kwargs for os.makedirs
"""
try:
os.makedirs(path, **kwargs)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise | [((29, 10, 29, 21), 'os.getcwd', 'os.getcwd', ({}, {}), '()', False, 'import os\n'), ((30, 4, 30, 18), 'os.chdir', 'os.chdir', ({(30, 13, 30, 17): 'path'}, {}), '(path)', False, 'import os\n'), ((34, 8, 34, 21), 'os.chdir', 'os.chdir', ({(34, 17, 34, 20): 'cwd'}, {}), '(cwd)', False, 'import os\n'), ((48, 8, 48, 35), 'os.makedirs', 'os.makedirs', ({(48, 20, 48, 24): 'path'}, {}), '(path, **kwargs)', False, 'import os\n'), ((50, 41, 50, 60), 'os.path.isdir', 'os.path.isdir', ({(50, 55, 50, 59): 'path'}, {}), '(path)', False, 'import os\n')] |
FrancisMudavanhu/cookiecutter-data-science | {{ cookiecutter.repo_name }}/tests/test_environment.py | be766817a7399ccd714bf03d085609985fa7313a | import sys
REQUIRED_PYTHON = "python3"
required_major = 3
def main():
system_major = sys.version_info.major
if system_major != required_major:
raise TypeError(
f"This project requires Python {required_major}."
f" Found: Python {sys.version}")
else:
print(">>> Development environment passes all tests!")
if __name__ == '__main__':
main()
| [] |
siagholami/aws-documentation | documents/aws-doc-sdk-examples/python/example_code/kda/kda-python-datagenerator-stockticker.py | 2d06ee9011f3192b2ff38c09f04e01f1ea9e0191 | # snippet-comment:[These are tags for the AWS doc team's sample catalog. Do not remove.]
# snippet-sourcedescription:[kda-python-datagenerator-stockticker.py demonstrates how to generate sample data for Amazon Kinesis Data Analytics SQL applications.]
# snippet-service:[kinesisanalytics]
# snippet-keyword:[Python]
# snippet-sourcesyntax:[python]
# snippet-sourcesyntax:[python]
# snippet-keyword:[Amazon Kinesis Data Analytics]
# snippet-keyword:[Code Sample]
# snippet-sourcetype:[full-example]
# snippet-sourcedate:[2019-01-29]
# snippet-sourceauthor:[fletpatr (AWS)]
# Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# This file is licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License. A copy of the
# License is located at
#
# http://aws.amazon.com/apache2.0/
#
# This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS
# OF ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
# snippet-start:[kinesisanalytics.python.datagenerator.stockticker]
import json
import boto3
import random
import datetime
kinesis = boto3.client('kinesis')
def getReferrer():
data = {}
now = datetime.datetime.now()
str_now = now.isoformat()
data['EVENT_TIME'] = str_now
data['TICKER'] = random.choice(['AAPL', 'AMZN', 'MSFT', 'INTC', 'TBV'])
price = random.random() * 100
data['PRICE'] = round(price, 2)
return data
while True:
data = json.dumps(getReferrer())
print(data)
kinesis.put_record(
StreamName="ExampleInputStream",
Data=data,
PartitionKey="partitionkey")
# snippet-end:[kinesisanalytics.python.datagenerator.stockticker]
| [((31, 10, 31, 33), 'boto3.client', 'boto3.client', ({(31, 23, 31, 32): '"""kinesis"""'}, {}), "('kinesis')", False, 'import boto3\n'), ((34, 10, 34, 33), 'datetime.datetime.now', 'datetime.datetime.now', ({}, {}), '()', False, 'import datetime\n'), ((37, 21, 37, 75), 'random.choice', 'random.choice', ({(37, 35, 37, 74): "['AAPL', 'AMZN', 'MSFT', 'INTC', 'TBV']"}, {}), "(['AAPL', 'AMZN', 'MSFT', 'INTC', 'TBV'])", False, 'import random\n'), ((38, 12, 38, 27), 'random.random', 'random.random', ({}, {}), '()', False, 'import random\n')] |
alexespejo/project-argus | backend/app.py | 53a6a8b1790906044bffbd2db156322938b62da9 | import face_recognition
from flask import Flask, request, redirect, Response
import camera
import firestore as db
# You can change this to any folder on your system
ALLOWED_EXTENSIONS = {'png', 'jpg', 'jpeg'}
app = Flask(__name__)
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
def detect_faces_in_image(name, access, file_stream):
# Load the uploaded image filed
img = face_recognition.load_image_file(file_stream)
# Get face encodings for any faces in the uploaded image
unknown_face_encodings = face_recognition.face_encodings(img)[0].tolist()
db.add_member(name, access, unknown_face_encodings)
return ('', 204)
@app.route('/')
def root():
return ('', 204)
@app.route('/upload', methods=['GET', 'POST'])
def upload_image():
db.encoding.update()
name = request.form.get("name")
access = request.form.get("access")
access = int(access)
if request.method == 'POST':
if 'file' not in request.files:
return redirect(request.url)
file = request.files['file']
if file.filename == '':
return redirect(request.url)
if file and allowed_file(file.filename):
return detect_faces_in_image(name, access, file)
return redirect('/video_feed')
@app.route('/update', methods=['GET', 'POST'])
def update():
db.encoding.update()
member = request.form.get("updateMember")
changeName = request.form.get("changeName")
changeAccess = request.form.get("changeAccess")
if changeAccess == None:
changeAccess = ""
db.update_member(member, changeName, changeAccess)
return ('', 204)
@app.route('/configuration', methods=['GET', 'POST'])
def config():
db.config_camera_interval(int(request.form.get('cameraDuration')))
return('', 204)
@app.route('/members')
def members():
print(type(db.encoding.get_names()))
return str(db.encoding.get_names())
@app.route('/video_feed')
def video_feed():
print('CAMERA RUN')
return Response(camera.gen_frames(), mimetype='multipart/x-mixed-replace; boundary=frame')
@app.route('/recent_person')
def recent_person():
return db.history_log.get_most_recent_member()
if __name__ == "__main__":
app.run(host='0.0.0.0', port=5001, debug=True)
| [((9, 6, 9, 21), 'flask.Flask', 'Flask', ({(9, 12, 9, 20): '__name__'}, {}), '(__name__)', False, 'from flask import Flask, request, redirect, Response\n'), ((19, 10, 19, 55), 'face_recognition.load_image_file', 'face_recognition.load_image_file', ({(19, 43, 19, 54): 'file_stream'}, {}), '(file_stream)', False, 'import face_recognition\n'), ((23, 4, 23, 55), 'firestore.add_member', 'db.add_member', ({(23, 18, 23, 22): 'name', (23, 24, 23, 30): 'access', (23, 32, 23, 54): 'unknown_face_encodings'}, {}), '(name, access, unknown_face_encodings)', True, 'import firestore as db\n'), ((34, 4, 34, 24), 'firestore.encoding.update', 'db.encoding.update', ({}, {}), '()', True, 'import firestore as db\n'), ((35, 11, 35, 35), 'flask.request.form.get', 'request.form.get', ({(35, 28, 35, 34): '"""name"""'}, {}), "('name')", False, 'from flask import Flask, request, redirect, Response\n'), ((36, 13, 36, 39), 'flask.request.form.get', 'request.form.get', ({(36, 30, 36, 38): '"""access"""'}, {}), "('access')", False, 'from flask import Flask, request, redirect, Response\n'), ((48, 11, 48, 34), 'flask.redirect', 'redirect', ({(48, 20, 48, 33): '"""/video_feed"""'}, {}), "('/video_feed')", False, 'from flask import Flask, request, redirect, Response\n'), ((53, 4, 53, 24), 'firestore.encoding.update', 'db.encoding.update', ({}, {}), '()', True, 'import firestore as db\n'), ((54, 13, 54, 45), 'flask.request.form.get', 'request.form.get', ({(54, 30, 54, 44): '"""updateMember"""'}, {}), "('updateMember')", False, 'from flask import Flask, request, redirect, Response\n'), ((55, 17, 55, 47), 'flask.request.form.get', 'request.form.get', ({(55, 34, 55, 46): '"""changeName"""'}, {}), "('changeName')", False, 'from flask import Flask, request, redirect, Response\n'), ((56, 19, 56, 51), 'flask.request.form.get', 'request.form.get', ({(56, 36, 56, 50): '"""changeAccess"""'}, {}), "('changeAccess')", False, 'from flask import Flask, request, redirect, Response\n'), ((59, 4, 59, 54), 'firestore.update_member', 'db.update_member', ({(59, 21, 59, 27): 'member', (59, 29, 59, 39): 'changeName', (59, 41, 59, 53): 'changeAccess'}, {}), '(member, changeName, changeAccess)', True, 'import firestore as db\n'), ((84, 11, 84, 50), 'firestore.history_log.get_most_recent_member', 'db.history_log.get_most_recent_member', ({}, {}), '()', True, 'import firestore as db\n'), ((73, 15, 73, 38), 'firestore.encoding.get_names', 'db.encoding.get_names', ({}, {}), '()', True, 'import firestore as db\n'), ((79, 20, 79, 39), 'camera.gen_frames', 'camera.gen_frames', ({}, {}), '()', False, 'import camera\n'), ((40, 19, 40, 40), 'flask.redirect', 'redirect', ({(40, 28, 40, 39): 'request.url'}, {}), '(request.url)', False, 'from flask import Flask, request, redirect, Response\n'), ((44, 19, 44, 40), 'flask.redirect', 'redirect', ({(44, 28, 44, 39): 'request.url'}, {}), '(request.url)', False, 'from flask import Flask, request, redirect, Response\n'), ((66, 34, 66, 68), 'flask.request.form.get', 'request.form.get', ({(66, 51, 66, 67): '"""cameraDuration"""'}, {}), "('cameraDuration')", False, 'from flask import Flask, request, redirect, Response\n'), ((72, 15, 72, 38), 'firestore.encoding.get_names', 'db.encoding.get_names', ({}, {}), '()', True, 'import firestore as db\n'), ((21, 29, 21, 65), 'face_recognition.face_encodings', 'face_recognition.face_encodings', ({(21, 61, 21, 64): 'img'}, {}), '(img)', False, 'import face_recognition\n')] |
fishial/Object-Detection-Model | module/classification_package/src/utils.py | 4792f65ea785156a8e240d9cdbbc0c9d013ea0bb | import numpy as np
import logging
import numbers
import torch
import math
import json
import sys
from torch.optim.lr_scheduler import LambdaLR
from torchvision.transforms.functional import pad
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
class ConstantLRSchedule(LambdaLR):
""" Constant learning rate schedule.
"""
def __init__(self, optimizer, last_epoch=-1):
super(ConstantLRSchedule, self).__init__(optimizer, lambda _: 1.0, last_epoch=last_epoch)
class WarmupConstantSchedule(LambdaLR):
""" Linear warmup and then constant.
Linearly increases learning rate schedule from 0 to 1 over `warmup_steps` training steps.
Keeps learning rate schedule equal to 1. after warmup_steps.
"""
def __init__(self, optimizer, warmup_steps, last_epoch=-1):
self.warmup_steps = warmup_steps
super(WarmupConstantSchedule, self).__init__(optimizer, self.lr_lambda, last_epoch=last_epoch)
def lr_lambda(self, step):
if step < self.warmup_steps:
return float(step) / float(max(1.0, self.warmup_steps))
return 1.
class WarmupLinearSchedule(LambdaLR):
""" Linear warmup and then linear decay.
Linearly increases learning rate from 0 to 1 over `warmup_steps` training steps.
Linearly decreases learning rate from 1. to 0. over remaining `t_total - warmup_steps` steps.
"""
def __init__(self, optimizer, warmup_steps, t_total, last_epoch=-1):
self.warmup_steps = warmup_steps
self.t_total = t_total
super(WarmupLinearSchedule, self).__init__(optimizer, self.lr_lambda, last_epoch=last_epoch)
def lr_lambda(self, step):
if step < self.warmup_steps:
return float(step) / float(max(1, self.warmup_steps))
return max(0.0, float(self.t_total - step) / float(max(1.0, self.t_total - self.warmup_steps)))
class WarmupCosineSchedule(LambdaLR):
""" Linear warmup and then cosine decay.
Linearly increases learning rate from 0 to 1 over `warmup_steps` training steps.
Decreases learning rate from 1. to 0. over remaining `t_total - warmup_steps` steps following a cosine curve.
If `cycles` (default=0.5) is different from default, learning rate follows cosine function after warmup.
"""
def __init__(self, optimizer, warmup_steps, t_total, cycles=.5, last_epoch=-1):
self.warmup_steps = warmup_steps
self.t_total = t_total
self.cycles = cycles
super(WarmupCosineSchedule, self).__init__(optimizer, self.lr_lambda, last_epoch=last_epoch)
def lr_lambda(self, step):
if step < self.warmup_steps:
return float(step) / float(max(1.0, self.warmup_steps))
# progress after warmup
progress = float(step - self.warmup_steps) / float(max(1, self.t_total - self.warmup_steps))
return max(0.0, 0.5 * (1. + math.cos(math.pi * float(self.cycles) * 2.0 * progress)))
def get_padding(image):
w, h = image.size
max_wh = np.max([w, h])
h_padding = (max_wh - w) / 2
v_padding = (max_wh - h) / 2
l_pad = h_padding if h_padding % 1 == 0 else h_padding + 0.5
t_pad = v_padding if v_padding % 1 == 0 else v_padding + 0.5
r_pad = h_padding if h_padding % 1 == 0 else h_padding - 0.5
b_pad = v_padding if v_padding % 1 == 0 else v_padding - 0.5
padding = (int(l_pad), int(t_pad), int(r_pad), int(b_pad))
return padding
class NewPad(object):
def __init__(self, fill=0, padding_mode='constant'):
assert isinstance(fill, (numbers.Number, str, tuple))
assert padding_mode in ['constant', 'edge', 'reflect', 'symmetric']
self.fill = fill
self.padding_mode = padding_mode
def __call__(self, img):
"""
Args:
img (PIL Image): Image to be padded.
Returns:
PIL Image: Padded image.
"""
return pad(img, get_padding(img), self.fill, self.padding_mode)
def __repr__(self):
return self.__class__.__name__ + '(padding={0}, fill={1}, padding_mode={2})'. \
format(self.fill, self.padding_mode)
def find_device():
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
return device
def read_json(data):
with open(data) as f:
return json.load(f)
def save_json(data, path):
with open(path, 'w', encoding='utf-8') as f:
json.dump(data, f)
def setup_logger():
logger = logging.getLogger('train')
logger.setLevel(logging.INFO)
if len(logger.handlers) == 0:
formatter = logging.Formatter('%(asctime)s | %(message)s')
ch = logging.StreamHandler(stream=sys.stdout)
ch.setFormatter(formatter)
logger.addHandler(ch)
return logger
def adjust_learning_rate(optimizer, epoch, lr):
"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
lr = lr * (0.1 ** (epoch // 30))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def save_checkpoint(model, path):
torch.save(model.state_dict(), path)
def reverse_norm_image(image):
MEAN = torch.tensor([0.485, 0.456, 0.406])
STD = torch.tensor([0.229, 0.224, 0.225])
reverse_image = image * STD[:, None, None] + MEAN[:, None, None]
return reverse_image.permute(1, 2, 0).cpu().numpy() | [((92, 13, 92, 27), 'numpy.max', 'np.max', ({(92, 20, 92, 26): '[w, h]'}, {}), '([w, h])', True, 'import numpy as np\n'), ((142, 13, 142, 39), 'logging.getLogger', 'logging.getLogger', ({(142, 31, 142, 38): '"""train"""'}, {}), "('train')", False, 'import logging\n'), ((164, 11, 164, 46), 'torch.tensor', 'torch.tensor', ({(164, 24, 164, 45): '[0.485, 0.456, 0.406]'}, {}), '([0.485, 0.456, 0.406])', False, 'import torch\n'), ((165, 10, 165, 45), 'torch.tensor', 'torch.tensor', ({(165, 23, 165, 44): '[0.229, 0.224, 0.225]'}, {}), '([0.229, 0.224, 0.225])', False, 'import torch\n'), ((133, 15, 133, 27), 'json.load', 'json.load', ({(133, 25, 133, 26): 'f'}, {}), '(f)', False, 'import json\n'), ((138, 8, 138, 26), 'json.dump', 'json.dump', ({(138, 18, 138, 22): 'data', (138, 24, 138, 25): 'f'}, {}), '(data, f)', False, 'import json\n'), ((145, 20, 145, 66), 'logging.Formatter', 'logging.Formatter', ({(145, 38, 145, 65): '"""%(asctime)s | %(message)s"""'}, {}), "('%(asctime)s | %(message)s')", False, 'import logging\n'), ((146, 13, 146, 53), 'logging.StreamHandler', 'logging.StreamHandler', (), '', False, 'import logging\n'), ((127, 38, 127, 63), 'torch.cuda.is_available', 'torch.cuda.is_available', ({}, {}), '()', False, 'import torch\n')] |
L-Net-1992/mlflow | tests/pylint_plugins/test_assert_raises_without_msg.py | a90574dbb730935c815ff41a0660b9a823b81630 | import pytest
from tests.pylint_plugins.utils import create_message, extract_node, skip_if_pylint_unavailable
pytestmark = skip_if_pylint_unavailable()
@pytest.fixture(scope="module")
def test_case():
import pylint.testutils
from pylint_plugins import AssertRaisesWithoutMsg
class TestAssertRaisesWithoutMsg(pylint.testutils.CheckerTestCase):
CHECKER_CLASS = AssertRaisesWithoutMsg
test_case = TestAssertRaisesWithoutMsg()
test_case.setup_method()
return test_case
def test_assert_raises_without_msg(test_case):
node = extract_node("self.assertRaises(Exception)")
with test_case.assertAddsMessages(create_message(test_case.CHECKER_CLASS.name, node)):
test_case.walk(node)
node = extract_node("self.assertRaises(Exception, msg='test')")
with test_case.assertNoMessages():
test_case.walk(node)
node = extract_node("pandas.assertRaises(Exception)")
with test_case.assertNoMessages():
test_case.walk(node)
| [((5, 13, 5, 41), 'tests.pylint_plugins.utils.skip_if_pylint_unavailable', 'skip_if_pylint_unavailable', ({}, {}), '()', False, 'from tests.pylint_plugins.utils import create_message, extract_node, skip_if_pylint_unavailable\n'), ((8, 1, 8, 31), 'pytest.fixture', 'pytest.fixture', (), '', False, 'import pytest\n'), ((22, 11, 22, 55), 'tests.pylint_plugins.utils.extract_node', 'extract_node', ({(22, 24, 22, 54): '"""self.assertRaises(Exception)"""'}, {}), "('self.assertRaises(Exception)')", False, 'from tests.pylint_plugins.utils import create_message, extract_node, skip_if_pylint_unavailable\n'), ((26, 11, 26, 67), 'tests.pylint_plugins.utils.extract_node', 'extract_node', ({(26, 24, 26, 66): '"""self.assertRaises(Exception, msg=\'test\')"""'}, {}), '("self.assertRaises(Exception, msg=\'test\')")', False, 'from tests.pylint_plugins.utils import create_message, extract_node, skip_if_pylint_unavailable\n'), ((30, 11, 30, 57), 'tests.pylint_plugins.utils.extract_node', 'extract_node', ({(30, 24, 30, 56): '"""pandas.assertRaises(Exception)"""'}, {}), "('pandas.assertRaises(Exception)')", False, 'from tests.pylint_plugins.utils import create_message, extract_node, skip_if_pylint_unavailable\n'), ((23, 38, 23, 88), 'tests.pylint_plugins.utils.create_message', 'create_message', ({(23, 53, 23, 81): 'test_case.CHECKER_CLASS.name', (23, 83, 23, 87): 'node'}, {}), '(test_case.CHECKER_CLASS.name, node)', False, 'from tests.pylint_plugins.utils import create_message, extract_node, skip_if_pylint_unavailable\n')] |
AV321/SVPackage | SVassembly/plot_bcs_across_bkpts.py | c9c625af7f5047ddb43ae79f8beb2ce9aadf7697 | import pandas as pd
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.colors
import csv
from scipy.stats import mode
import math as m
import os
import collections
#set working directory
#os.chdir("/mnt/ix1/Projects/M002_131217_gastric/P00526/P00526_WG10_150722_gastric/A20_170516_hmw_maps/metr")
#bkpt_name = "1"
#example: plot_bcs_bkpt("1", "/mnt/ix1/Projects/M002_131217_gastric/P00526/P00526_WG10_150722_gastric/A20_170516_hmw_maps/metr", "/mnt/ix1/Projects/M002_131217_gastric/P00526/P00526_WG10_150722_gastric/A20_170516_hmw_maps/metr")
def plot_bcs_bkpt(bkpt_name, infolder, outfolder):
if infolder[-1] != '/':
infolder = infolder + '/'
file_1 = infolder + bkpt_name + "_1.bc_windows.txt"
file_2 = infolder + bkpt_name + "_2.bc_windows.txt"
file_hap = infolder + bkpt_name + "_hap_bcs.txt"
df_1 = pd.read_table(file_1)
df_2 = pd.read_table(file_2)
hap_bcs = pd.read_table(file_hap)
# bkpt_name = "1"
# file_1 = bkpt_name + "_1.bc_windows.txt"
# file_2 = bkpt_name + "_2.bc_windows.txt"
# file_hap = bkpt_name + "_hap_bcs.txt"
# #sort barcodes by where they map (lowest coordinate to highest)
# #read in data frames
# df_1 = pd.read_table(file_1)
# df_2 = pd.read_table(file_2)
# hap_bcs = pd.read_table(file_hap)
hap_bcs = hap_bcs.transpose()
bcs_hap_dict = {}
for key in df_1.keys():
if key != "chrom" and key != "window_start" and key != "window_end":
key = key[:-2]
bcs_hap_dict[key] = 'unassigned'
for key, values in hap_bcs.iteritems():
if values[0] != 'bcs':
hap = values[1]
bcs_hap_dict[values[0]] = hap
df_1 = df_1.sort_values('window_start')
df_2 = df_2.sort_values('window_start')
chrom_1 = df_1.at[0, 'chrom']
chrom_2 = df_2.at[0, 'chrom']
x_values_1_1 = []
x_values_1_2 = []
x_values_1_unassigned = []
y_values_1_1 = []
y_values_1_2 = []
y_values_1_unassigned = []
x_values_2_1 = []
x_values_2_2 = []
x_values_2_unassigned = []
y_values_2_1 = []
y_values_2_2 = []
y_values_2_unassigned = []
i1 = 0
window_start_arr1 = df_1['window_start']
for name, values in df_1.iteritems(): #go through columns (so each barcode)
if name != "chrom" and name != "window_start" and name != "window_end":
i1 += 1
name = name[:-2]
hap = bcs_hap_dict[name]
#print type(hap) int
for indx, window in values.iteritems():
if window != 0:
if hap == 1:
y_values_1_1.append(i1)
x_values_1_1.append(window_start_arr1[indx])
elif hap == 2:
y_values_1_2.append(i1)
x_values_1_2.append(window_start_arr1[indx])
else:
y_values_1_unassigned.append(i1)
x_values_1_unassigned.append(window_start_arr1[indx])
i2 = 0
window_start_arr2 = df_2['window_start']
for name, values in df_2.iteritems():
if name != "chrom" and name != "window_start" and name != "window_end":
i2 += 1
name = name[:-2]
hap = bcs_hap_dict[name]
for indx, window in values.iteritems():
if window != 0:
if hap == 1:
y_values_2_1.append(i2)
x_values_2_1.append(window_start_arr2[indx])
elif hap == 2:
y_values_2_2.append(i2)
x_values_2_2.append(window_start_arr2[indx])
elif hap == 'unassigned':
y_values_2_unassigned.append(i2)
x_values_2_unassigned.append(window_start_arr2[indx])
fig = plt.figure()
figL = fig.add_subplot(121)
figL.scatter(x_values_1_1, y_values_1_1, s=0.2, color='b') #this doesn't seem to contain anything
figL.scatter(x_values_1_2, y_values_1_2, s=0.2, color='r') #same
figL.scatter(x_values_1_unassigned, y_values_1_unassigned, s=0.2, color='g')
figL.set_title("")
figL.set_xlabel("chr %d (Mb)" %chrom_1)
figL.set_ylabel("SV-specific barcode")
figR = fig.add_subplot(122)
figR.scatter(x_values_2_1, y_values_2_1, s=0.2, color='b') #same
figR.scatter(x_values_2_2, y_values_2_2, s=0.2, color='r') #same
figR.scatter(x_values_2_unassigned, y_values_2_unassigned, s=0.2, color='g')
figR.set_title("")
figR.set_xlabel("chr %d (Mb)" %chrom_2)
figR.set_ylabel("")
brkpt1 = min(df_1['window_start']) + ((max(df_1['window_end']) - min(df_1['window_start']))/2)
brkpt2 = min(df_2['window_start']) + ((max(df_2['window_end']) - min(df_2['window_start']))/2)
figL.axvline(x=brkpt1, linewidth=1, color = 'black')
figR.axvline(x=brkpt2, linewidth=1, color = 'black')
path = outfolder + 'bcs_bkpt_map'
plt.savefig(path)
| [((4, 0, 4, 21), 'matplotlib.use', 'matplotlib.use', ({(4, 15, 4, 20): '"""Agg"""'}, {}), "('Agg')", False, 'import matplotlib\n'), ((26, 11, 26, 32), 'pandas.read_table', 'pd.read_table', ({(26, 25, 26, 31): 'file_1'}, {}), '(file_1)', True, 'import pandas as pd\n'), ((27, 11, 27, 32), 'pandas.read_table', 'pd.read_table', ({(27, 25, 27, 31): 'file_2'}, {}), '(file_2)', True, 'import pandas as pd\n'), ((28, 14, 28, 37), 'pandas.read_table', 'pd.read_table', ({(28, 28, 28, 36): 'file_hap'}, {}), '(file_hap)', True, 'import pandas as pd\n'), ((116, 10, 116, 22), 'matplotlib.pyplot.figure', 'plt.figure', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n'), ((140, 4, 140, 21), 'matplotlib.pyplot.savefig', 'plt.savefig', ({(140, 16, 140, 20): 'path'}, {}), '(path)', True, 'import matplotlib.pyplot as plt\n')] |
ChidinmaKO/Chobe-bitesofpy | bites/bite029.py | 2f933e6c8877a37d1ce7ef54ea22169fc67417d3 | def get_index_different_char(chars):
alnum = []
not_alnum = []
for index, char in enumerate(chars):
if str(char).isalnum():
alnum.append(index)
else:
not_alnum.append(index)
result = alnum[0] if len(alnum) < len(not_alnum) else not_alnum[0]
return result
# tests
def test_wrong_char():
inputs = (
['A', 'f', '.', 'Q', 2],
['.', '{', ' ^', '%', 'a'],
[1, '=', 3, 4, 5, 'A', 'b', 'a', 'b', 'c'],
['=', '=', '', '/', '/', 9, ':', ';', '?', '¡'],
list(range(1,9)) + ['}'] + list('abcde'), # noqa E231
)
expected = [2, 4, 1, 5, 8]
for arg, exp in zip(inputs, expected):
err = f'get_index_different_char({arg}) should return index {exp}'
assert get_index_different_char(arg) == exp, err | [] |
derlin/SwigSpot_Schwyzertuutsch-Spotting | language-detection-webapp/blueprints/langid.py | f38c8243ff34c6e512cadab5e4f51b08dacc16c6 | import logging
from flask import Blueprint
from flask import Flask, render_template, request, flash
from flask_wtf import FlaskForm
from wtforms import StringField, validators, SelectField, BooleanField
from wtforms.fields.html5 import IntegerRangeField
from wtforms.widgets import TextArea
import langid
from utils.utils import templated
blueprint_langid = Blueprint('langid', __name__)
class UrlForm(FlaskForm):
url = StringField(
'URL',
validators=[validators.DataRequired(), validators.URL(message='Sorry, this is not a valid URL,')])
wMin = IntegerRangeField(
'Min. words',
default=5,
validators=[validators.DataRequired(), validators.NumberRange(min=1, max=20)])
extractor_class = SelectField(
'Extractor',
default=langid.EXTRACTORS[0],
choices=[(i, i) for i in langid.EXTRACTORS],
validators=[validators.DataRequired()])
model_class = SelectField(
'Model',
default=langid.MODELS[0],
choices=[(i, i) for i in langid.MODELS],
validators=[validators.DataRequired()])
return_raw = BooleanField(
'Display raw sentences',
default=False
)
class TextForm(FlaskForm):
text = StringField(
'Text',
widget=TextArea(),
validators=[validators.DataRequired()])
model_class = SelectField(
'Model',
default=langid.MODELS[0],
choices=[(i, i) for i in langid.MODELS],
validators=[validators.DataRequired()])
@blueprint_langid.route('/', methods=['GET', 'POST'])
@templated('index.html')
def crawl():
form = UrlForm(request.form)
if request.method == 'GET':
return dict(form=form)
elif not form.validate():
for f, errs in form.errors.items():
flash("%s: %s" % (f, "<br>".join(errs)), 'danger')
return dict(form=form)
try:
results = langid.mixed_sentences_from_urls(
form.url.data.strip(), extractor_name=form.extractor_class.data, model=form.model_class.data,
with_proba=True, min_words=form.wMin.data, return_raw=form.return_raw.data)
except Exception as e:
flash('Something went wrong %s' % e, 'danger')
logging.exception(e)
return dict(form=form)
return dict(form=form, results=results, labels=langid.DEFAULT_LABELS)
@blueprint_langid.route('/text', methods=['GET', 'POST'])
@templated('langid.html')
def predict_text():
form = TextForm(request.form)
if request.method == 'GET':
return dict(form=form)
elif not form.validate():
for f, errs in form.errors.items():
flash("%s: %s" % (f, "<br>".join(errs)), 'danger')
return dict(form=form)
results = [[r] for r in langid.lang_of_text(
form.text.data, model=form.model_class.data, with_proba=True)]
return dict(form=form, results=results, labels=langid.DEFAULT_LABELS)
| [((14, 19, 14, 48), 'flask.Blueprint', 'Blueprint', ({(14, 29, 14, 37): '"""langid"""', (14, 39, 14, 47): '__name__'}, {}), "('langid', __name__)", False, 'from flask import Blueprint\n'), ((60, 1, 60, 24), 'utils.utils.templated', 'templated', ({(60, 11, 60, 23): '"""index.html"""'}, {}), "('index.html')", False, 'from utils.utils import templated\n'), ((83, 1, 83, 25), 'utils.utils.templated', 'templated', ({(83, 11, 83, 24): '"""langid.html"""'}, {}), "('langid.html')", False, 'from utils.utils import templated\n'), ((39, 17, 42, 5), 'wtforms.BooleanField', 'BooleanField', (), '', False, 'from wtforms import StringField, validators, SelectField, BooleanField\n'), ((49, 15, 49, 25), 'wtforms.widgets.TextArea', 'TextArea', ({}, {}), '()', False, 'from wtforms.widgets import TextArea\n'), ((76, 8, 76, 54), 'flask.flash', 'flash', ({(76, 14, 76, 43): "('Something went wrong %s' % e)", (76, 45, 76, 53): '"""danger"""'}, {}), "('Something went wrong %s' % e, 'danger')", False, 'from flask import Flask, render_template, request, flash\n'), ((77, 8, 77, 28), 'logging.exception', 'logging.exception', ({(77, 26, 77, 27): 'e'}, {}), '(e)', False, 'import logging\n'), ((95, 28, 96, 69), 'langid.lang_of_text', 'langid.lang_of_text', (), '', False, 'import langid\n'), ((20, 20, 20, 45), 'wtforms.validators.DataRequired', 'validators.DataRequired', ({}, {}), '()', False, 'from wtforms import StringField, validators, SelectField, BooleanField\n'), ((20, 47, 20, 104), 'wtforms.validators.URL', 'validators.URL', (), '', False, 'from wtforms import StringField, validators, SelectField, BooleanField\n'), ((25, 20, 25, 45), 'wtforms.validators.DataRequired', 'validators.DataRequired', ({}, {}), '()', False, 'from wtforms import StringField, validators, SelectField, BooleanField\n'), ((25, 47, 25, 84), 'wtforms.validators.NumberRange', 'validators.NumberRange', (), '', False, 'from wtforms import StringField, validators, SelectField, BooleanField\n'), ((31, 20, 31, 45), 'wtforms.validators.DataRequired', 'validators.DataRequired', ({}, {}), '()', False, 'from wtforms import StringField, validators, SelectField, BooleanField\n'), ((37, 20, 37, 45), 'wtforms.validators.DataRequired', 'validators.DataRequired', ({}, {}), '()', False, 'from wtforms import StringField, validators, SelectField, BooleanField\n'), ((50, 20, 50, 45), 'wtforms.validators.DataRequired', 'validators.DataRequired', ({}, {}), '()', False, 'from wtforms import StringField, validators, SelectField, BooleanField\n'), ((56, 20, 56, 45), 'wtforms.validators.DataRequired', 'validators.DataRequired', ({}, {}), '()', False, 'from wtforms import StringField, validators, SelectField, BooleanField\n')] |
kehw/spack | var/spack/repos/builtin/packages/r-xts/package.py | 4f49b1a9301447a8cf880c99820cad65e5c2d7e3 | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RXts(RPackage):
"""Provide for uniform handling of R's different time-based data classes by
extending zoo, maximizing native format information preservation and
allowing for user level customization and extension, while simplifying
cross-class interoperability."""
homepage = "http://r-forge.r-project.org/projects/xts/"
url = "https://cloud.r-project.org/src/contrib/xts_0.11-2.tar.gz"
list_url = "https://cloud.r-project.org/src/contrib/Archive/xts"
version('0.11-2', sha256='12772f6a66aab5b84b0665c470f11a3d8d8a992955c027261cfe8e6077ee13b8')
version('0.9-7', sha256='f11f7cb98f4b92b7f6632a2151257914130880c267736ef5a264b5dc2dfb7098')
depends_on('[email protected]:', type=('build', 'run'))
| [] |
threefoldtech/threebot_prebuilt | sandbox/lib/jumpscale/Jumpscale/core/BASECLASSES/JSConfigsBCDB.py | 1f0e1c65c14cef079cd80f73927d7c8318755c48 | # Copyright (C) July 2018: TF TECH NV in Belgium see https://www.threefold.tech/
# In case TF TECH NV ceases to exist (e.g. because of bankruptcy)
# then Incubaid NV also in Belgium will get the Copyright & Authorship for all changes made since July 2018
# and the license will automatically become Apache v2 for all code related to Jumpscale & DigitalMe
# This file is part of jumpscale at <https://github.com/threefoldtech>.
# jumpscale is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# jumpscale is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License v3 for more details.
#
# You should have received a copy of the GNU General Public License
# along with jumpscale or jumpscale derived works. If not, see <http://www.gnu.org/licenses/>.
# LICENSE END
from Jumpscale import j
from .JSConfigBCDBBase import JSConfigBCDBBase
class JSConfigsBCDB(JSConfigBCDBBase):
def _childclass_selector(self, jsxobject, **kwargs):
"""
allow custom implementation of which child class to use
:return:
"""
return self.__class__._CHILDCLASS
def new(self, name, jsxobject=None, autosave=True, **kwargs):
"""
it it exists will delete if first when delete == True
:param name:
:param jsxobject:
:param autosave: sets the autosave argument on the data and also saves the object before the function returns. If set to False, you need to explicitly save the object.
:param kwargs:
:return:
"""
if self.exists(name=name):
raise j.exceptions.Base("cannot do new object, exists")
jsconfig = self._new(name=name, jsxobject=jsxobject, autosave=autosave, **kwargs)
self._check(jsconfig)
return jsconfig
def _check_children(self):
if not self._cache_use:
assert self._children == {}
def _check(self, jsconfig):
if jsconfig._id is None:
# model has never been saved no check required yet
return
# lets do some tests (maybe in future can be removed, but for now the safe bet)
assert jsconfig._id > 0
mother_id = jsconfig._mother_id_get()
if mother_id:
assert jsconfig.mother_id == mother_id
assert jsconfig._model.schema._md5 == self._model.schema._md5
def _new(self, name, jsxobject=None, autosave=True, **kwargs):
"""
:param name: for the CONFIG item (is a unique name for the service, client, ...)
:param jsxobject: you can right away specify the jsxobject
:param kwargs: the data elements which will be given to JSXObject underneith (given to constructor)
:return: the service
"""
kwargs_to_class = {}
if not jsxobject:
if kwargs:
kwargs_to_obj_new = {}
props = [i.name for i in self._model.schema.properties]
for key, val in kwargs.items():
if key in props:
kwargs_to_obj_new[key] = val
else:
kwargs_to_class[key] = val
jsxobject = self._model.new(data=kwargs_to_obj_new)
else:
jsxobject = self._model.new()
jsxobject.name = name
# means we need to remember the parent id
mother_id = self._mother_id_get()
if mother_id:
if jsxobject.mother_id != mother_id:
jsxobject.mother_id = mother_id
jsconfig_klass = self._childclass_selector(jsxobject=jsxobject)
jsconfig = jsconfig_klass(parent=self, jsxobject=jsxobject, **kwargs_to_class)
jsconfig._triggers_call(jsconfig, "new")
jsconfig._autosave = autosave
self._children[name] = jsconfig
if autosave:
self._children[name].save()
jsxobject._autosave = autosave
return self._children[name]
def get(self, name="main", id=None, needexist=False, autosave=True, reload=False, **kwargs):
"""
:param name: of the object
"""
# will reload if needed (not in self._children)
rc, jsconfig = self._get(name=name, id=id, die=needexist, reload=reload)
if not jsconfig:
self._log_debug("NEW OBJ:%s:%s" % (name, self._classname))
jsconfig = self._new(name=name, autosave=autosave, **kwargs)
else:
# check that the stored values correspond with kwargs given
# means comes from the database
if not jsconfig._data._model.schema._md5 == jsconfig._model.schema._md5:
# means data came from DB and schema is not same as config mgmt class
j.shell()
changed = False
jsconfig._data._autosave = False
for key, val in kwargs.items():
if not getattr(jsconfig, key) == val:
changed = True
setattr(jsconfig, key, val)
if changed and autosave:
try:
jsconfig.save()
except Exception as e:
print("CHECK WHY ERROR")
j.shell()
jsconfig._autosave = autosave
# lets do some tests (maybe in future can be removed, but for now the safe bet)
self._check(jsconfig)
jsconfig._triggers_call(jsconfig, "get")
return jsconfig
def _get(self, name="main", id=None, die=True, reload=False, autosave=True):
if id:
obj = self._model.get(id)
name = obj.name
return 1, self._new(name, obj)
obj = self._validate_child(name)
if obj:
if reload:
obj.load()
return 1, obj
self._log_debug("get child:'%s'from '%s'" % (name, self._classname))
# new = False
res = self.find(name=name)
if len(res) < 1:
if not die:
return 3, None
raise j.exceptions.Base(
"Did not find instance for:%s, name searched for:%s" % (self.__class__._location, name)
)
elif len(res) > 1:
raise j.exceptions.Base(
"Found more than 1 service for :%s, name searched for:%s" % (self.__class__._location, name)
)
else:
jsxconfig = res[0]
jsxconfig._autosave = autosave
return 2, jsxconfig
def reset(self):
"""
will destroy all data in the DB, be carefull
:return:
"""
self._log_debug("reset all data")
for item in self.find():
try:
item.delete()
except Exception as e:
j.shell()
if not self._mother_id_get():
self._model.index.destroy()
def _children_names_get(self, filter=None):
condition = False
Item = self._model.index.sql
mother_id = self._mother_id_get()
if mother_id:
condition = Item.mother_id == mother_id
if filter and filter != "*":
condition = Item.name.startswith(filter) and condition if condition else Item.name.startswith(filter)
if condition:
res = [i.name for i in Item.select().where(condition)]
else:
res = [i.name for i in Item.select()]
if len(res) > 50:
return []
return res
def find(self, reload=False, **kwargs):
"""
:param kwargs: e.g. color="red",...
:return: list of the config objects
"""
res = []
ids_done = []
for key, item in list(self._children.items()):
match = True
for key, val in kwargs.items():
if item._hasattr(key):
if val != getattr(item, key):
match = False
else:
match = False
if match:
if reload:
item.load()
res.append(item)
if item.id not in ids_done:
ids_done.append(item.id)
kwargs = self._kwargs_update(kwargs)
# this is more efficient no need to go to backend stor if the objects are already in mem
ids = self._model.find_ids(**kwargs)
for id in ids:
if id not in ids_done:
item = self.get(id=id, reload=reload, autosave=False)
res.append(item)
return res
def _kwargs_update(self, kwargs):
mother_id = self._mother_id_get()
if mother_id:
kwargs["mother_id"] = mother_id
return kwargs
def count(self, **kwargs):
"""
:param kwargs: e.g. color="red",...
:return: list of the config objects
"""
kwargs = self._kwargs_update(kwargs)
# TODO do proper count query
return len(list(self._model.find_ids(**kwargs)))
def _findData(self, **kwargs):
"""
:param kwargs: e.g. color="red",...
:return: list of the data objects (the data of the model)
"""
kwargs = self._kwargs_update(kwargs)
return self._model.find(**kwargs)
def save(self):
for item in self._children_get():
if item._hasattr("save"):
item.save()
def delete(self, name=None):
"""
:param name:
:return:
"""
self._delete(name=name)
def _delete(self, name=None):
if name:
_, child = self._get(name=name, die=False)
if child:
return child.delete()
else:
return self.reset()
if not name and self._parent:
if self._classname in self._parent._children:
if not isinstance(self._parent, j.baseclasses.factory):
# only delete when not a factory means is a custom class we're building
del self._parent._children[self._data.name]
def exists(self, name="main"):
"""
:param name: of the object
"""
obj = self._validate_child(name)
if obj:
return True
# will only use the index
return self.count(name=name) == 1
def _children_get(self, filter=None):
"""
:param filter: is '' then will show all, if None will ignore _
when * at end it will be considered a prefix
when * at start it will be considered a end of line filter (endswith)
when R as first char its considered to be a regex
everything else is a full match
:return:
"""
# TODO implement filter properly
x = []
for _, item in self._children.items():
x.append(item)
x = self._filter(filter=filter, llist=x, nameonly=False)
# be smarter in how we use the index
for item in self.find():
if item not in x:
x.append(item)
return x
def __str__(self):
return "jsxconfigobj:collection:%s" % self._model.schema.url
| [((43, 18, 43, 67), 'Jumpscale.j.exceptions.Base', 'j.exceptions.Base', ({(43, 36, 43, 66): '"""cannot do new object, exists"""'}, {}), "('cannot do new object, exists')", False, 'from Jumpscale import j\n'), ((163, 18, 165, 13), 'Jumpscale.j.exceptions.Base', 'j.exceptions.Base', ({(164, 16, 164, 103): "('Did not find instance for:%s, name searched for:%s' % (self.__class__.\n _location, name))"}, {}), "('Did not find instance for:%s, name searched for:%s' % (\n self.__class__._location, name))", False, 'from Jumpscale import j\n'), ((119, 16, 119, 25), 'Jumpscale.j.shell', 'j.shell', ({}, {}), '()', False, 'from Jumpscale import j\n'), ((168, 18, 170, 13), 'Jumpscale.j.exceptions.Base', 'j.exceptions.Base', ({(169, 16, 169, 108): "('Found more than 1 service for :%s, name searched for:%s' % (self.\n __class__._location, name))"}, {}), "('Found more than 1 service for :%s, name searched for:%s' %\n (self.__class__._location, name))", False, 'from Jumpscale import j\n'), ((188, 16, 188, 25), 'Jumpscale.j.shell', 'j.shell', ({}, {}), '()', False, 'from Jumpscale import j\n'), ((131, 20, 131, 29), 'Jumpscale.j.shell', 'j.shell', ({}, {}), '()', False, 'from Jumpscale import j\n')] |
holderekt/regression-tree | source/tree.py | 130fe07262faea8681159092718310d9aefe9889 | import utils as utl
import error_measures as err
# Regression Tree Node
class Node:
def __init__(self, parent, node_id, index=None, value=None, examples=None, prediction=0):
self.index = index
self.id = node_id
self.prediction = prediction
self.value = value
self.parent = parent
self.examples = examples
self.right = None
self.left = None
self.ssr = 0
self.leaves = 0
self.ssr_as_root = 0
def is_leaf(self):
if(self.right == None and self.left == None):
return True
return False
def leafs_id(self):
if(not self.is_leaf()):
return self._leafs_search(self.left) + self._leafs_search(self.right)
return [1]
def n_leafs(self):
return len(self.leafs_id())
def _leafs_search(self, node):
if node.is_leaf():
return [node.id]
return self._leafs_search(node.left) + self._leafs_search(node.right)
def __str__(self):
return str(self.id)
# Regression Tree
class Regression_Tree:
def __init__(self, y_train, root):
self.y = y_train
self.root = root
# Generate Prediction given a test example
def predict(self, example, deleted=[]):
current_node = self.root
while(not current_node.is_leaf() and ((current_node in deleted) == False)):
if(example[current_node.index] <= current_node.value):
current_node = current_node.left
else:
current_node = current_node.right
return current_node.prediction
# Generate Sum Square Residuals of a given node on training data
def node_ssr(self, node):
ssr = 0
for example in node.examples:
ssr = ssr + pow((self.y[example] - node.prediction) , 2)
return ssr
def leafs_id(self):
return self.root.leafs_id()
def n_leafs(self):
return len(self.leafs_id())
def __str__(self):
return self._print(self.root)
def print_leaf(self, node):
if(node.is_leaf()):
print(len(node.examples))
else:
self.print_leaf(node.left)
self.print_leaf(node.right)
def _print(self, node):
node_id = str(node.id)
r_string = node_id + " " + str(node.ssr)
if(not node.is_leaf()):
r_string = r_string + "\nLeft : " + node_id + "\n" + self._print(node.left)
r_string = r_string + "\nRight: " + node_id + "\n" + self._print(node.right)
return r_string
| [] |
ninaamorim/sentiment-analysis-2018-president-election | src/site/config.py | a5c12f1b659186edbc2dfa916bc82a2cfa2dd67f | from starlette.applications import Starlette
from starlette.middleware.gzip import GZipMiddleware
from starlette.middleware.cors import CORSMiddleware
from starlette.staticfiles import StaticFiles
app = Starlette(debug=False, template_directory='src/site/templates')
app.add_middleware(GZipMiddleware, minimum_size=500)
app.add_middleware(CORSMiddleware, allow_origins=['*'])
app.mount('/static', StaticFiles(directory='src/site/media'), name='static')
| [((6, 6, 6, 69), 'starlette.applications.Starlette', 'Starlette', (), '', False, 'from starlette.applications import Starlette\n'), ((9, 21, 9, 60), 'starlette.staticfiles.StaticFiles', 'StaticFiles', (), '', False, 'from starlette.staticfiles import StaticFiles\n')] |
fqzhou/LoadBalanceControl-RL | loadbalanceRL/lib/__init__.py | 689eec3b3b27e121aa45d2793e411f1863f6fc0b | #! /usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Contains core logic for Rainman2
"""
__author__ = 'Ari Saha ([email protected]), Mingyang Liu([email protected])'
__date__ = 'Wednesday, February 14th 2018, 11:42:09 am'
| [] |
openforcefield/bespoke-f | openff/bespokefit/__init__.py | 27b072bd09610dc8209429118d739e1f453edd61 | """
BespokeFit
Creating bespoke parameters for individual molecules.
"""
import logging
import sys
from ._version import get_versions
versions = get_versions()
__version__ = versions["version"]
__git_revision__ = versions["full-revisionid"]
del get_versions, versions
# Silence verbose messages when running the CLI otherwise you can't read the output
# without seeing tens of 'Unable to load AmberTools' or don't import simtk warnings...
if sys.argv[0].endswith("openff-bespoke"):
from openff.bespokefit.utilities.logging import DeprecationWarningFilter
# if "openff-bespoke"
logging.getLogger("openff.toolkit").setLevel(logging.ERROR)
logging.getLogger().addFilter(DeprecationWarningFilter())
| [((24, 34, 24, 60), 'openff.bespokefit.utilities.logging.DeprecationWarningFilter', 'DeprecationWarningFilter', ({}, {}), '()', False, 'from openff.bespokefit.utilities.logging import DeprecationWarningFilter\n'), ((23, 4, 23, 39), 'logging.getLogger', 'logging.getLogger', ({(23, 22, 23, 38): '"""openff.toolkit"""'}, {}), "('openff.toolkit')", False, 'import logging\n'), ((24, 4, 24, 23), 'logging.getLogger', 'logging.getLogger', ({}, {}), '()', False, 'import logging\n')] |
greenwoodms/TRANSFORM-Library | TRANSFORM/Resources/python/2006LUT_to_SDF.py | dc152d4f0298d3f18385f2ea33645d87d7812915 | # -*- coding: utf-8 -*-
"""
Created on Tue Apr 03 11:06:37 2018
@author: vmg
"""
import sdf
import numpy as np
# Load 2006 LUT for interpolation
# 2006 Groeneveld Look-Up Table as presented in
# "2006 CHF Look-Up Table", Nuclear Engineering and Design 237, pp. 190-1922.
# This file requires the file 2006LUTdata.txt
# Pressure range [MPa] from 2006 LUT, convert to [Pa]
P = np.array((0.10,0.30,0.50,1.0,2.0,3.0,5.0,7.0,10.0,12.0,14.0,16.0,18.0,20.0,21.0))*1e6
# Mass Flux range [kg/m^2-s] from 2006 .LUT.
G = np.array((0.,50.,100.,300.,500.,750.,1000.,1500.,2000.,2500.,3000.,3500.,4000.,4500.,5000.,5500.,6000.,6500.,7000.,7500.,8000.))
# Quality range from 2006 LUT
x = np.array((-0.50,-0.40,-0.30,-0.20,-0.15,-0.10,-0.05,0.00,0.05,0.10,0.15,0.20,0.25,0.30,0.35,0.40,0.45,0.50,0.60,0.70,0.80,0.90,1.00))
# Critical heat flux [kW/m^2] from 2006 LUT, convert to [W/m^2]
q_raw=np.loadtxt('../Data/2006LUTdata.txt')*1e3
# Convert the imported array into a (MxNxQ) where:
# M is number of mass flux divisions
# N is number of quality divisions
# Q is number of pressure divisions
lenG = len(G)
lenx = len(x)
lenP = len(P)
q = np.zeros((lenG,lenx,lenP))
for i in xrange(lenG):
for j in xrange(lenx):
for k in xrange(lenP):
q[i,j,k] = q_raw[i + k*lenG,j]
# Create the datasets:
ds_G = sdf.Dataset('G', data=G, unit='kg/(m2.s)', is_scale=True, display_name='Mass Flux')
ds_x = sdf.Dataset('x', data=x, unit='1', is_scale=True, display_name='Quality')
ds_P = sdf.Dataset('P', data=P, unit='Pa', is_scale=True, display_name='Pressure')
ds_q = sdf.Dataset('q', data=q, unit='W/m2', scales=[ds_G,ds_x,ds_P])
# Create the root group and write the file:
g = sdf.Group('/', comment='2006 CHF LUT', datasets=[ds_G,ds_x,ds_P,ds_q])
sdf.save('../Data/2006LUT.sdf', g) | [((21, 4, 21, 132), 'numpy.array', 'np.array', ({(21, 13, 21, 131): '(0.0, 50.0, 100.0, 300.0, 500.0, 750.0, 1000.0, 1500.0, 2000.0, 2500.0, \n 3000.0, 3500.0, 4000.0, 4500.0, 5000.0, 5500.0, 6000.0, 6500.0, 7000.0,\n 7500.0, 8000.0)'}, {}), '((0.0, 50.0, 100.0, 300.0, 500.0, 750.0, 1000.0, 1500.0, 2000.0, \n 2500.0, 3000.0, 3500.0, 4000.0, 4500.0, 5000.0, 5500.0, 6000.0, 6500.0,\n 7000.0, 7500.0, 8000.0))', True, 'import numpy as np\n'), ((24, 4, 24, 137), 'numpy.array', 'np.array', ({(24, 13, 24, 136): '(-0.5, -0.4, -0.3, -0.2, -0.15, -0.1, -0.05, 0.0, 0.05, 0.1, 0.15, 0.2, \n 0.25, 0.3, 0.35, 0.4, 0.45, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0)'}, {}), '((-0.5, -0.4, -0.3, -0.2, -0.15, -0.1, -0.05, 0.0, 0.05, 0.1, 0.15,\n 0.2, 0.25, 0.3, 0.35, 0.4, 0.45, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0))', True, 'import numpy as np\n'), ((36, 4, 36, 30), 'numpy.zeros', 'np.zeros', ({(36, 13, 36, 29): '(lenG, lenx, lenP)'}, {}), '((lenG, lenx, lenP))', True, 'import numpy as np\n'), ((43, 7, 43, 90), 'sdf.Dataset', 'sdf.Dataset', (), '', False, 'import sdf\n'), ((44, 7, 44, 80), 'sdf.Dataset', 'sdf.Dataset', (), '', False, 'import sdf\n'), ((45, 7, 45, 82), 'sdf.Dataset', 'sdf.Dataset', (), '', False, 'import sdf\n'), ((46, 7, 46, 69), 'sdf.Dataset', 'sdf.Dataset', (), '', False, 'import sdf\n'), ((49, 4, 49, 74), 'sdf.Group', 'sdf.Group', (), '', False, 'import sdf\n'), ((50, 0, 50, 34), 'sdf.save', 'sdf.save', ({(50, 9, 50, 30): '"""../Data/2006LUT.sdf"""', (50, 32, 50, 33): 'g'}, {}), "('../Data/2006LUT.sdf', g)", False, 'import sdf\n'), ((18, 4, 18, 85), 'numpy.array', 'np.array', ({(18, 13, 18, 84): '(0.1, 0.3, 0.5, 1.0, 2.0, 3.0, 5.0, 7.0, 10.0, 12.0, 14.0, 16.0, 18.0, 20.0,\n 21.0)'}, {}), '((0.1, 0.3, 0.5, 1.0, 2.0, 3.0, 5.0, 7.0, 10.0, 12.0, 14.0, 16.0, \n 18.0, 20.0, 21.0))', True, 'import numpy as np\n'), ((27, 6, 27, 43), 'numpy.loadtxt', 'np.loadtxt', ({(27, 17, 27, 42): '"""../Data/2006LUTdata.txt"""'}, {}), "('../Data/2006LUTdata.txt')", True, 'import numpy as np\n')] |
tmsanrinsha/vint | test/asserting/policy.py | 8c34196252b43d7361d0f58cb78cf2d3e4e4fbd0 | import unittest
from pathlib import Path
from pprint import pprint
from vint.compat.itertools import zip_longest
from vint.linting.linter import Linter
from vint.linting.config.config_default_source import ConfigDefaultSource
class PolicyAssertion(unittest.TestCase):
class StubPolicySet(object):
def __init__(self, *policies):
self._policies = policies
def get_enabled_policies(self):
return self._policies
def update_by_config(self, policy_enabling_map):
pass
class StubConfigContainer(object):
def __init__(self, policy_names_to_enable):
default_config_dict = ConfigDefaultSource(None).get_config_dict()
policy_options = default_config_dict.get('policies', {})
for policy, options in policy_options.items():
options['enabled'] = False
for policy in policy_names_to_enable:
options = policy_options.setdefault(policy, {})
options['enabled'] = True
self._config_dict = {
'policies': policy_options,
}
def append_config_source(self, config_source):
# Ignore a comment config source
pass
def get_config_dict(self):
return self._config_dict
def assertFoundNoViolations(self, path, Policy, policy_options=None):
self.assertFoundViolationsEqual(path, Policy, [], policy_options)
def assertFoundViolationsEqual(self, path, Policy, expected_violations, policy_options=None):
policy_to_test = Policy()
policy_name = Policy.__name__
policy_set = PolicyAssertion.StubPolicySet(policy_to_test)
config = PolicyAssertion.StubConfigContainer(policy_name)
if policy_options is not None:
config.get_config_dict()['policies'][policy_name].update(policy_options)
linter = Linter(policy_set, config.get_config_dict())
violations = linter.lint_file(path)
pprint(violations)
assert len(violations) == len(expected_violations)
for violation, expected_violation in zip_longest(violations, expected_violations):
self.assertViolation(violation, expected_violation)
def assertViolation(self, actual_violation, expected_violation):
self.assertIsNot(actual_violation, None)
self.assertIsNot(expected_violation, None)
pprint(actual_violation)
assert actual_violation['name'] == expected_violation['name']
assert actual_violation['position'] == expected_violation['position']
assert actual_violation['level'] == expected_violation['level']
self.assertIsInstance(actual_violation['description'], str)
def get_fixture_path(*filename):
return Path('test', 'fixture', 'policy', *filename)
| [((88, 11, 88, 55), 'pathlib.Path', 'Path', ({(88, 16, 88, 22): '"""test"""', (88, 24, 88, 33): '"""fixture"""', (88, 35, 88, 43): '"""policy"""', (88, 45, 88, 54): '*filename'}, {}), "('test', 'fixture', 'policy', *filename)", False, 'from pathlib import Path\n'), ((67, 8, 67, 26), 'pprint.pprint', 'pprint', ({(67, 15, 67, 25): 'violations'}, {}), '(violations)', False, 'from pprint import pprint\n'), ((70, 45, 70, 89), 'vint.compat.itertools.zip_longest', 'zip_longest', ({(70, 57, 70, 67): 'violations', (70, 69, 70, 88): 'expected_violations'}, {}), '(violations, expected_violations)', False, 'from vint.compat.itertools import zip_longest\n'), ((78, 8, 78, 32), 'pprint.pprint', 'pprint', ({(78, 15, 78, 31): 'actual_violation'}, {}), '(actual_violation)', False, 'from pprint import pprint\n'), ((26, 34, 26, 59), 'vint.linting.config.config_default_source.ConfigDefaultSource', 'ConfigDefaultSource', ({(26, 54, 26, 58): 'None'}, {}), '(None)', False, 'from vint.linting.config.config_default_source import ConfigDefaultSource\n')] |
gliptak/DataProfiler | dataprofiler/labelers/character_level_cnn_model.py | 37ffbf43652246ef27e070df7ff0d9f1b9529162 | import copy
import json
import logging
import os
import sys
import time
from collections import defaultdict
import numpy as np
import tensorflow as tf
from sklearn import decomposition
from .. import dp_logging
from . import labeler_utils
from .base_model import AutoSubRegistrationMeta, BaseModel, BaseTrainableModel
_file_dir = os.path.dirname(os.path.abspath(__file__))
logger = dp_logging.get_child_logger(__name__)
class NoV1ResourceMessageFilter(logging.Filter):
"""Removes TF2 warning for using TF1 model which has resources."""
def filter(self, record):
msg = 'is a problem, consider rebuilding the SavedModel after ' + \
'running tf.compat.v1.enable_resource_variables()'
return msg not in record.getMessage()
tf_logger = logging.getLogger('tensorflow')
tf_logger.addFilter(NoV1ResourceMessageFilter())
@tf.keras.utils.register_keras_serializable()
class FBetaScore(tf.keras.metrics.Metric):
r"""Computes F-Beta score.
Adapted and slightly modified from https://github.com/tensorflow/addons/blob/v0.12.0/tensorflow_addons/metrics/f_scores.py#L211-L283
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# https://github.com/tensorflow/addons/blob/v0.12.0/LICENSE
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
It is the weighted harmonic mean of precision
and recall. Output range is `[0, 1]`. Works for
both multi-class and multi-label classification.
$$
F_{\beta} = (1 + \beta^2) * \frac{\textrm{precision} * \textrm{precision}}{(\beta^2 \cdot \textrm{precision}) + \textrm{recall}}
$$
Args:
num_classes: Number of unique classes in the dataset.
average: Type of averaging to be performed on data.
Acceptable values are `None`, `micro`, `macro` and
`weighted`. Default value is None.
beta: Determines the weight of precision and recall
in harmonic mean. Determines the weight given to the
precision and recall. Default value is 1.
threshold: Elements of `y_pred` greater than threshold are
converted to be 1, and the rest 0. If threshold is
None, the argmax is converted to 1, and the rest 0.
name: (Optional) String name of the metric instance.
dtype: (Optional) Data type of the metric result.
Returns:
F-Beta Score: float.
"""
# Modification: remove the run-time type checking for functions
def __init__(self, num_classes, average=None, beta=1.0, threshold=None,
name="fbeta_score", dtype=None, **kwargs):
super().__init__(name=name, dtype=dtype)
if average not in (None, "micro", "macro", "weighted"):
raise ValueError(
"Unknown average type. Acceptable values "
"are: [None, 'micro', 'macro', 'weighted']"
)
if not isinstance(beta, float):
raise TypeError("The value of beta should be a python float")
if beta <= 0.0:
raise ValueError("beta value should be greater than zero")
if threshold is not None:
if not isinstance(threshold, float):
raise TypeError("The value of threshold should be a python float")
if threshold > 1.0 or threshold <= 0.0:
raise ValueError("threshold should be between 0 and 1")
self.num_classes = num_classes
self.average = average
self.beta = beta
self.threshold = threshold
self.axis = None
self.init_shape = []
if self.average != "micro":
self.axis = 0
self.init_shape = [self.num_classes]
def _zero_wt_init(name):
return self.add_weight(
name, shape=self.init_shape, initializer="zeros", dtype=self.dtype
)
self.true_positives = _zero_wt_init("true_positives")
self.false_positives = _zero_wt_init("false_positives")
self.false_negatives = _zero_wt_init("false_negatives")
self.weights_intermediate = _zero_wt_init("weights_intermediate")
def update_state(self, y_true, y_pred, sample_weight=None):
if self.threshold is None:
threshold = tf.reduce_max(y_pred, axis=-1, keepdims=True)
# make sure [0, 0, 0] doesn't become [1, 1, 1]
# Use abs(x) > eps, instead of x != 0 to check for zero
y_pred = tf.logical_and(y_pred >= threshold, tf.abs(y_pred) > 1e-12)
else:
y_pred = y_pred > self.threshold
y_true = tf.cast(y_true, self.dtype)
y_pred = tf.cast(y_pred, self.dtype)
def _weighted_sum(val, sample_weight):
if sample_weight is not None:
val = tf.math.multiply(val, tf.expand_dims(sample_weight, 1))
return tf.reduce_sum(val, axis=self.axis)
self.true_positives.assign_add(_weighted_sum(y_pred * y_true, sample_weight))
self.false_positives.assign_add(
_weighted_sum(y_pred * (1 - y_true), sample_weight)
)
self.false_negatives.assign_add(
_weighted_sum((1 - y_pred) * y_true, sample_weight)
)
self.weights_intermediate.assign_add(_weighted_sum(y_true, sample_weight))
def result(self):
precision = tf.math.divide_no_nan(
self.true_positives, self.true_positives + self.false_positives
)
recall = tf.math.divide_no_nan(
self.true_positives, self.true_positives + self.false_negatives
)
mul_value = precision * recall
add_value = (tf.math.square(self.beta) * precision) + recall
mean = tf.math.divide_no_nan(mul_value, add_value)
f1_score = mean * (1 + tf.math.square(self.beta))
if self.average == "weighted":
weights = tf.math.divide_no_nan(
self.weights_intermediate, tf.reduce_sum(self.weights_intermediate)
)
f1_score = tf.reduce_sum(f1_score * weights)
elif self.average is not None: # [micro, macro]
f1_score = tf.reduce_mean(f1_score)
return f1_score
def get_config(self):
"""Returns the serializable config of the metric."""
config = {
"num_classes": self.num_classes,
"average": self.average,
"beta": self.beta,
"threshold": self.threshold,
}
base_config = super().get_config()
return {**base_config, **config}
def reset_states(self):
reset_value = tf.zeros(self.init_shape, dtype=self.dtype)
tf.keras.backend.batch_set_value([(v, reset_value) for v in self.variables])
@tf.keras.utils.register_keras_serializable()
class F1Score(FBetaScore):
r"""Computes F-1 Score.
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# https://github.com/tensorflow/addons/blob/v0.12.0/LICENSE
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
It is the harmonic mean of precision and recall.
Output range is `[0, 1]`. Works for both multi-class
and multi-label classification.
$$
F_1 = 2 \cdot \frac{\textrm{precision} \cdot \textrm{recall}}{\textrm{precision} + \textrm{recall}}
$$
Args:
num_classes: Number of unique classes in the dataset.
average: Type of averaging to be performed on data.
Acceptable values are `None`, `micro`, `macro`
and `weighted`. Default value is None.
threshold: Elements of `y_pred` above threshold are
considered to be 1, and the rest 0. If threshold is
None, the argmax is converted to 1, and the rest 0.
name: (Optional) String name of the metric instance.
dtype: (Optional) Data type of the metric result.
Returns:
F-1 Score: float.
"""
# Modification: remove the run-time type checking for functions
def __init__(self, num_classes, average=None, threshold=None,
name="f1_score", dtype=None):
super().__init__(num_classes, average, 1.0, threshold, name=name, dtype=dtype)
def get_config(self):
base_config = super().get_config()
del base_config["beta"]
return base_config
def build_embd_dictionary(filename):
"""
Returns a numpy embedding dictionary from embed file with GloVe-like format
:param filename: Path to the embed file for loading
:type filename: str
"""
embd_table = dict()
with open(filename, 'r') as embds:
for line in embds:
line = line.strip().split()
embd_table[line[0]] = np.asarray(line[1:])
return embd_table
def create_glove_char(n_dims, source_file=None):
"""
Embeds GloVe chars embeddings from source file to n_dims principal
components in a new file
:param n_dims: Final number of principal component dims of the embeddings
:type n_dims: int
:param source_file: Location of original embeddings to factor down
:type source_file: str
"""
if source_file is None:
source_file = os.path.join(_file_dir,
"embeddings/glove.840B.300d-char.txt")
# get embedding table first and vectors as array
embd_table = build_embd_dictionary(source_file)
embd_words, embd_matrix = [
np.asarray(ls) if i > 0 else list(ls)
for i, ls in enumerate(zip(*embd_table.items()))]
# get PCA embedder
pca = decomposition.PCA(n_components=n_dims)
reduced_embds = pca.fit_transform(embd_matrix)
# write to file
dir_name = os.path.dirname(source_file)
embd_file_name = os.path.join(dir_name,
'glove-reduced-{}D.txt'.format(n_dims))
with open(embd_file_name, 'w') as file:
for word, embd in zip(embd_words, reduced_embds):
file.write(word + " " + ' '.join(str(num) for num in embd) + "\n")
class CharacterLevelCnnModel(BaseTrainableModel,
metaclass=AutoSubRegistrationMeta):
# boolean if the label mapping requires the mapping for index 0 reserved
requires_zero_mapping = True
def __init__(self, label_mapping=None, parameters=None):
"""
CNN Model Initializer. initialize epoch_id
:param label_mapping: maps labels to their encoded integers
:type label_mapping: dict
:param parameters: Contains all the appropriate parameters for the
model. Must contain num_labels. Other possible parameters are:
max_length, max_char_encoding_id, dim_embed, size_fc
dropout, size_conv, num_fil, optimizer, default_label
:type parameters: dict
:return: None
"""
# parameter initialization
if not parameters:
parameters = {}
parameters.setdefault('max_length', 3400)
parameters.setdefault('max_char_encoding_id', 127)
parameters.setdefault('dim_embed', 64)
parameters.setdefault('size_fc', [96, 96])
parameters.setdefault('dropout', 0.073)
parameters.setdefault('size_conv', 13)
parameters.setdefault('default_label', "UNKNOWN")
parameters.setdefault('num_fil', [48 for _ in range(4)])
parameters['pad_label'] = 'PAD'
self._epoch_id = 0
# reconstruct flags for model
self._model_num_labels = 0
self._model_default_ind = -1
BaseModel.__init__(self, label_mapping, parameters)
def __eq__(self, other):
"""
Checks if two models are equal with one another, may only check
important variables, i.e. may not check model itself.
:param self: a model
:param other: a model
:type self: BaseModel
:type other: BaseModel
:return: Whether or not self and other are equal
:rtype: bool
"""
if self._parameters != other._parameters \
or self._label_mapping != other._label_mapping:
return False
return True
def _validate_parameters(self, parameters):
"""
Validate the parameters sent in. Raise error if invalid parameters are
present.
:param parameters: parameter dict containing the following parameters:
max_length: Maximum char length in a sample
max_char_encoding_id: Maximum integer value for encoding the input
dim_embed: Number of embedded dimensions
size_fc: Size of each fully connected layers
dropout: Ratio of dropout in the model
size_conv: Convolution kernel size
default_label: Key for label_mapping that is the default label
pad_label: Key for entities_dict that is the pad label
num_fil: Number of filters in each convolution layer
:type parameters: dict
:return: None
"""
errors = []
list_of_necessary_params = ['max_length', 'max_char_encoding_id',
'dim_embed', 'size_fc', 'dropout',
'size_conv', 'default_label', 'pad_label',
'num_fil']
# Make sure the necessary parameters are present and valid.
for param in parameters:
if param in ['max_length', 'max_char_encoding_id', 'dim_embed',
'size_conv']:
if not isinstance(parameters[param], (int, float)) \
or parameters[param] < 0:
errors.append(param + " must be a valid integer or float "
"greater than 0.")
elif param == 'dropout':
if not isinstance(parameters[param], (int, float)) \
or parameters[param] < 0 or parameters[param] > 1:
errors.append(param + " must be a valid integer or float "
"from 0 to 1.")
elif param == 'size_fc' or param == 'num_fil':
if not isinstance(parameters[param], list) \
or len(parameters[param]) == 0:
errors.append(param + " must be a non-empty list of "
"integers.")
else:
for item in parameters[param]:
if not isinstance(item, int):
errors.append(param + " must be a non-empty "
"list of integers.")
break
elif param == 'default_label':
if not isinstance(parameters[param], str):
error = str(param) + " must be a string."
errors.append(error)
# Error if there are extra parameters thrown in
for param in parameters:
if param not in list_of_necessary_params:
errors.append(param + " is not an accepted parameter.")
if errors:
raise ValueError('\n'.join(errors))
def set_label_mapping(self, label_mapping):
"""
Sets the labels for the model
:param label_mapping: label mapping of the model
:type label_mapping: dict
:return: None
"""
if not isinstance(label_mapping, (list, dict)):
raise TypeError("Labels must either be a non-empty encoding dict "
"which maps labels to index encodings or a list.")
label_mapping = copy.deepcopy(label_mapping)
if 'PAD' not in label_mapping:
if isinstance(label_mapping, list): # if list missing PAD
label_mapping = ['PAD'] + label_mapping
elif 0 not in label_mapping.values(): # if dict missing PAD and 0
label_mapping.update({'PAD': 0})
if (isinstance(label_mapping, dict)
and label_mapping.get('PAD', None) != 0): # dict with bad PAD
raise ValueError("`PAD` must map to index zero.")
if self._parameters['default_label'] not in label_mapping:
raise ValueError("The `default_label` of {} must exist in the "
"label mapping.".format(
self._parameters['default_label']))
super().set_label_mapping(label_mapping)
def _need_to_reconstruct_model(self):
"""
Determines whether or not the model needs to be reconstructed.
:return: bool of whether or not the model needs to reconstruct.
"""
if not self._model:
return False
default_ind = self.label_mapping[self._parameters['default_label']]
return self.num_labels != self._model_num_labels or \
default_ind != self._model_default_ind
def save_to_disk(self, dirpath):
"""
Saves whole model to disk with weights
:param dirpath: directory path where you want to save the model to
:type dirpath: str
:return: None
"""
if not self._model:
self._construct_model()
elif self._need_to_reconstruct_model():
self._reconstruct_model()
model_param_dirpath = os.path.join(dirpath, "model_parameters.json")
with open(model_param_dirpath, 'w') as fp:
json.dump(self._parameters, fp)
labels_dirpath = os.path.join(dirpath, "label_mapping.json")
with open(labels_dirpath, 'w') as fp:
json.dump(self.label_mapping, fp)
self._model.save(os.path.join(dirpath))
@classmethod
def load_from_disk(cls, dirpath):
"""
Loads whole model from disk with weights
:param dirpath: directory path where you want to load the model from
:type dirpath: str
:return: None
"""
# load parameters
model_param_dirpath = os.path.join(dirpath, "model_parameters.json")
with open(model_param_dirpath, 'r') as fp:
parameters = json.load(fp)
# load label_mapping
labels_dirpath = os.path.join(dirpath, "label_mapping.json")
with open(labels_dirpath, 'r') as fp:
label_mapping = json.load(fp)
# use f1 score metric
custom_objects = {
"F1Score": F1Score(
num_classes=max(label_mapping.values()) + 1,
average='micro'),
"CharacterLevelCnnModel": cls,
}
with tf.keras.utils.custom_object_scope(custom_objects):
tf_model = tf.keras.models.load_model(dirpath)
loaded_model = cls(label_mapping, parameters)
loaded_model._model = tf_model
# Tensorflow v1 Model weights need to be transferred.
if not callable(tf_model):
loaded_model._construct_model()
tf1_weights = []
for var in tf_model.variables:
if 'training' not in var.name:
tf1_weights.append(var.value())
loaded_model._construct_model()
tf1_weights.append(loaded_model._model.weights[-1].value())
loaded_model._model.set_weights(tf1_weights)
# load self
loaded_model._model_num_labels = loaded_model.num_labels
loaded_model._model_default_ind = loaded_model.label_mapping[
loaded_model._parameters['default_label']
]
return loaded_model
@staticmethod
def _char_encoding_layer(input_str_tensor, max_char_encoding_id, max_len):
"""
Character encoding for the list of sentences
:param input_str_tensor: input list of sentences converted to tensor
:type input_str_tensor: tf.tensor
:param max_char_encoding_id: Maximum integer value for encoding the
input
:type max_char_encoding_id: int
:param max_len: Maximum char length in a sample
:type max_len: int
:return : tensor containing encoded list of input sentences
:rtype: tf.Tensor
"""
# convert characters to indices
input_str_flatten = tf.reshape(input_str_tensor, [-1])
sentences_encode = tf.strings.unicode_decode(input_str_flatten,
input_encoding='UTF-8')
sentences_encode = tf.add(tf.cast(1, tf.int32), sentences_encode)
sentences_encode = tf.math.minimum(sentences_encode,
max_char_encoding_id + 1)
# padding
sentences_encode_pad = sentences_encode.to_tensor(shape=[None, max_len])
return sentences_encode_pad
@staticmethod
def _argmax_threshold_layer(num_labels, threshold=0.0, default_ind=1):
"""
Adds an argmax threshold layer to the model. This layer's output will be
the argmax value if the confidence for that argmax meets the threshold
for its label, otherwise it will be the default label index.
:param num_labels: number of entities
:type num_labels: int
:param threshold: default set to 0 so all confidences pass.
:type threshold: float
:param default_ind: default index
:type default_ind: int
:return: final argmax threshold layer for the model
"""
# Initialize the thresholds vector variable and create the threshold
# matrix.
class ThreshArgMaxLayer(tf.keras.layers.Layer):
def __init__(self, threshold_, num_labels_):
super(ThreshArgMaxLayer, self).__init__()
thresh_init = tf.constant_initializer(threshold_)
self.thresh_vec = tf.Variable(
name='ThreshVec',
initial_value=thresh_init(shape=[num_labels_]),
trainable=False)
def call(self, argmax_layer, confidence_layer):
threshold_at_argmax = tf.gather(self.thresh_vec, argmax_layer)
confidence_max_layer = tf.keras.backend.max(confidence_layer,
axis=2)
# Check if the confidences meet the threshold minimum.
argmax_mask = tf.keras.backend.cast(
tf.keras.backend.greater_equal(confidence_max_layer,
threshold_at_argmax),
dtype=argmax_layer.dtype)
# Create a vector the same size as the batch_size which
# represents the background label
bg_label_tf = tf.keras.backend.constant(
default_ind, dtype=argmax_layer.dtype)
# Generate the final predicted output using the function:
final_predicted_layer = tf.add(
bg_label_tf,
tf.multiply(
tf.subtract(argmax_layer, bg_label_tf),
argmax_mask
), name='ThreshArgMax'
)
return final_predicted_layer
return ThreshArgMaxLayer(threshold, num_labels)
def _construct_model(self):
"""
Model constructor for the data labeler. This also serves as a weight
reset.
:return: None
"""
num_labels = self.num_labels
default_ind = self.label_mapping[self._parameters['default_label']]
# Reset model
tf.keras.backend.clear_session()
# generate glove embedding
create_glove_char(self._parameters['dim_embed'])
# generate model
self._model = tf.keras.models.Sequential()
# default parameters
max_length = self._parameters['max_length']
max_char_encoding_id = self._parameters['max_char_encoding_id']
# Encoding layer
def encoding_function(input_str):
char_in_vector = CharacterLevelCnnModel._char_encoding_layer(
input_str, max_char_encoding_id, max_length)
return char_in_vector
self._model.add(tf.keras.layers.Input(shape=(None,), dtype=tf.string))
self._model.add(
tf.keras.layers.Lambda(encoding_function,
output_shape=tuple([max_length])))
# Create a pre-trained weight matrix
# character encoding indices range from 0 to max_char_encoding_id,
# we add one extra index for out-of-vocabulary character
embed_file = os.path.join(
_file_dir, "embeddings/glove-reduced-{}D.txt".format(
self._parameters['dim_embed']))
embedding_matrix = np.zeros((max_char_encoding_id + 2,
self._parameters['dim_embed']))
embedding_dict = build_embd_dictionary(embed_file)
input_shape = tuple([max_length])
# Fill in the weight matrix: let pad and space be 0s
for ascii_num in range(max_char_encoding_id):
if chr(ascii_num) in embedding_dict:
embedding_matrix[ascii_num + 1] = embedding_dict[chr(ascii_num)]
self._model.add(tf.keras.layers.Embedding(
max_char_encoding_id + 2,
self._parameters['dim_embed'],
weights=[embedding_matrix],
input_length=input_shape[0],
trainable=True))
# Add the convolutional layers
for fil in self._parameters['num_fil']:
self._model.add(tf.keras.layers.Conv1D(
filters=fil, kernel_size=self._parameters['size_conv'],
activation='relu', padding='same'))
if self._parameters['dropout']:
self._model.add(
tf.keras.layers.Dropout(self._parameters['dropout']))
# Add batch normalization, set fused = True for compactness
self._model.add(
tf.keras.layers.BatchNormalization(fused=False, scale=True))
# Add the fully connected layers
for size in self._parameters['size_fc']:
self._model.add(
tf.keras.layers.Dense(units=size, activation='relu'))
if self._parameters['dropout']:
self._model.add(
tf.keras.layers.Dropout(self._parameters['dropout']))
# Add the final Softmax layer
self._model.add(
tf.keras.layers.Dense(num_labels, activation='softmax'))
# Output the model into a .pb file for TensorFlow
argmax_layer = tf.keras.backend.argmax(self._model.output)
# Create confidence layers
final_predicted_layer = CharacterLevelCnnModel._argmax_threshold_layer(
num_labels, threshold=0.0, default_ind=default_ind)
argmax_outputs = self._model.outputs + \
[argmax_layer,
final_predicted_layer(argmax_layer, self._model.output)]
self._model = tf.keras.Model(self._model.inputs, argmax_outputs)
# Compile the model
softmax_output_layer_name = self._model.outputs[0].name.split('/')[0]
losses = {softmax_output_layer_name: "categorical_crossentropy"}
# use f1 score metric
f1_score_training = F1Score(num_classes=num_labels, average='micro')
metrics = {softmax_output_layer_name: ['acc', f1_score_training]}
self._model.compile(loss=losses,
optimizer="adam",
metrics=metrics)
self._epoch_id = 0
self._model_num_labels = num_labels
self._model_default_ind = default_ind
def reset_weights(self):
"""
Reset the weights of the model.
:return: None
"""
self._construct_model()
def _reconstruct_model(self):
"""
Reconstruct the appropriate layers if the number of number of labels is
altered
:return: None
"""
# Reset model
tf.keras.backend.clear_session()
num_labels = self.num_labels
default_ind = self.label_mapping[self._parameters['default_label']]
# Remove the 3 output layers (dense_2', 'tf_op_layer_ArgMax',
# 'thresh_arg_max_layer')
for _ in range(3):
self._model.layers.pop()
# Add the final Softmax layer to the previous spot
final_softmax_layer = tf.keras.layers.Dense(
num_labels, activation='softmax', name="dense_2")(
self._model.layers[-4].output)
# Output the model into a .pb file for TensorFlow
argmax_layer = tf.keras.backend.argmax(final_softmax_layer)
# Create confidence layers
final_predicted_layer = CharacterLevelCnnModel._argmax_threshold_layer(
num_labels, threshold=0.0, default_ind=default_ind)
argmax_outputs = [final_softmax_layer] + \
[argmax_layer,
final_predicted_layer(argmax_layer,
final_softmax_layer)]
self._model = tf.keras.Model(self._model.inputs, argmax_outputs)
# Compile the model
softmax_output_layer_name = self._model.outputs[0].name.split('/')[0]
losses = {softmax_output_layer_name: "categorical_crossentropy"}
# use f1 score metric
f1_score_training = F1Score(num_classes=num_labels, average='micro')
metrics = {softmax_output_layer_name: ['acc', f1_score_training]}
self._model.compile(loss=losses,
optimizer="adam",
metrics=metrics)
self._epoch_id = 0
self._model_num_labels = num_labels
self._model_default_ind = default_ind
def fit(self, train_data, val_data=None, batch_size=32, label_mapping=None,
reset_weights=False, verbose=True):
"""
Train the current model with the training data and validation data
:param train_data: Training data used to train model
:type train_data: Union[list, np.ndarray]
:param val_data: Validation data used to validate the training
:type val_data: Union[list, np.ndarray]
:param batch_size: Used to determine number of samples in each batch
:type batch_size: int
:param label_mapping: maps labels to their encoded integers
:type label_mapping: Union[dict, None]
:param reset_weights: Flag to determine whether to reset the weights or
not
:type reset_weights: bool
:param verbose: Flag to determine whether to print status or not
:type verbose: bool
:return: None
"""
if label_mapping is not None:
self.set_label_mapping(label_mapping)
if not self._model:
self._construct_model()
else:
if self._need_to_reconstruct_model():
self._reconstruct_model()
if reset_weights:
self.reset_weights()
history = defaultdict()
f1 = None
f1_report = []
self._model.reset_metrics()
softmax_output_layer_name = self._model.outputs[0].name.split('/')[0]
start_time = time.time()
batch_id = 0
for x_train, y_train in train_data:
model_results = self._model.train_on_batch(
x_train, {softmax_output_layer_name: y_train})
sys.stdout.flush()
if verbose:
sys.stdout.write(
"\rEPOCH %d, batch_id %d: loss: %f - acc: %f - "
"f1_score %f" %
(self._epoch_id, batch_id, *model_results[1:]))
batch_id += 1
for i, metric_label in enumerate(self._model.metrics_names):
history[metric_label] = model_results[i]
if val_data:
f1, f1_report = self._validate_training(val_data)
history['f1_report'] = f1_report
val_f1 = f1_report['weighted avg']['f1-score'] \
if f1_report else np.NAN
val_precision = f1_report['weighted avg']['precision'] \
if f1_report else np.NAN
val_recall = f1_report['weighted avg']['recall'] \
if f1_report else np.NAN
epoch_time = time.time() - start_time
logger.info("\rEPOCH %d (%ds), loss: %f - acc: %f - f1_score %f -- "
"val_f1: %f - val_precision: %f - val_recall %f" %
(self._epoch_id, epoch_time, *model_results[1:],
val_f1, val_precision, val_recall))
self._epoch_id += 1
return history, f1, f1_report
def _validate_training(self, val_data, batch_size_test=32,
verbose_log=True, verbose_keras=False):
"""
Validate the model on the test set and return the evaluation metrics.
:param val_data: data generator for the validation
:type val_data: iterator
:param batch_size_test: Number of samples to process in testing
:type batch_size_test: int
:param verbose_log: whether or not to print out scores for training,
etc.
:type verbose_log: bool
:param verbose_keras: whether or not to print out scores for training,
from keras.
:type verbose_keras: bool
return (f1-score, f1 report).
"""
f1 = None
f1_report = None
if val_data is None:
return f1, f1_report
# Predict on the test set
batch_id = 0
y_val_pred = []
y_val_test = []
for x_val, y_val in val_data:
y_val_pred.append(self._model.predict(
x_val, batch_size=batch_size_test, verbose=verbose_keras)[1])
y_val_test.append(np.argmax(y_val, axis=-1))
batch_id += 1
sys.stdout.flush()
if verbose_log:
sys.stdout.write("\rEPOCH %g, validation_batch_id %d" %
(self._epoch_id, batch_id))
tf.keras.backend.set_floatx('float32')
# Clean the predicted entities and the actual entities
f1, f1_report = labeler_utils.evaluate_accuracy(
np.concatenate(y_val_pred, axis=0),
np.concatenate(y_val_test, axis=0),
self.num_labels,
self.reverse_label_mapping,
verbose=verbose_keras)
return f1, f1_report
def predict(self, data, batch_size=32, show_confidences=False,
verbose=True):
"""
Run model and get predictions
:param data: text input
:type data: Union[list, numpy.ndarray]
:param batch_size: number of samples in the batch of data
:type batch_size: int
:param show_confidences: whether user wants prediction confidences
:type show_confidences:
:param verbose: Flag to determine whether to print status or not
:type verbose: bool
:return: char level predictions and confidences
:rtype: dict
"""
if not self._model:
raise ValueError("You are trying to predict without a model. "
"Construct/Load a model before predicting.")
elif self._need_to_reconstruct_model():
raise RuntimeError("The model label mapping definitions have been "
"altered without additional training. Please "
"train the model or reset the label mapping to "
"predict.")
# Pre-allocate space for predictions
confidences = []
sentence_lengths = np.zeros((batch_size,), dtype=int)
predictions = np.zeros((batch_size, self._parameters['max_length']))
if show_confidences:
confidences = np.zeros((batch_size,
self._parameters['max_length'],
self.num_labels))
# Run model with batching
allocation_index = 0
for batch_id, batch_data in enumerate(data):
model_output = self._model(
tf.convert_to_tensor(batch_data)
)
# Count number of samples in batch to prevent array mismatch
num_samples_in_batch = len(batch_data)
allocation_index = batch_id * batch_size
# Double array size
if len(predictions) <= allocation_index:
predictions = np.pad(predictions, ((0, len(predictions)),
(0, 0)), mode='constant')
sentence_lengths = np.pad(
sentence_lengths, pad_width=((0, len(sentence_lengths)),),
mode='constant')
if show_confidences:
confidences = np.pad(confidences,
((0, len(predictions)),
(0, 0), (0, 0)), mode='constant')
if show_confidences:
confidences[allocation_index:allocation_index + num_samples_in_batch] = model_output[0].numpy()
predictions[allocation_index:allocation_index + num_samples_in_batch] = model_output[1].numpy()
sentence_lengths[allocation_index:allocation_index + num_samples_in_batch] = list(map(lambda x: len(x[0]), batch_data))
allocation_index += num_samples_in_batch
# Convert predictions, confidences to lists from numpy
predictions_list = [i for i in range(0, allocation_index)]
confidences_list = None
if show_confidences:
confidences_list = [i for i in range(0, allocation_index)]
# Append slices of predictions to return prediction & confidence matrices
for index, sentence_length \
in enumerate(sentence_lengths[:allocation_index]):
predictions_list[index] = list(predictions[index][:sentence_length])
if show_confidences:
confidences_list[index] = list(confidences[index][:sentence_length])
if show_confidences:
return {'pred': predictions_list, 'conf': confidences_list}
return {'pred': predictions_list}
def details(self):
"""
Prints the relevant details of the model (summary, parameters, label
mapping)
"""
print("\n###### Model Details ######\n")
self._model.summary()
print("\nModel Parameters:")
for key, value in self._parameters.items():
print("{}: {}".format(key, value))
print("\nModel Label Mapping:")
for key, value in self.label_mapping.items():
print("{}: {}".format(key, value))
| [((30, 12, 30, 43), 'logging.getLogger', 'logging.getLogger', ({(30, 30, 30, 42): '"""tensorflow"""'}, {}), "('tensorflow')", False, 'import logging\n'), ((34, 1, 34, 45), 'tensorflow.keras.utils.register_keras_serializable', 'tf.keras.utils.register_keras_serializable', ({}, {}), '()', True, 'import tensorflow as tf\n'), ((190, 1, 190, 45), 'tensorflow.keras.utils.register_keras_serializable', 'tf.keras.utils.register_keras_serializable', ({}, {}), '()', True, 'import tensorflow as tf\n'), ((17, 28, 17, 53), 'os.path.abspath', 'os.path.abspath', ({(17, 44, 17, 52): '__file__'}, {}), '(__file__)', False, 'import os\n'), ((277, 10, 277, 48), 'sklearn.decomposition.PCA', 'decomposition.PCA', (), '', False, 'from sklearn import decomposition\n'), ((281, 15, 281, 43), 'os.path.dirname', 'os.path.dirname', ({(281, 31, 281, 42): 'source_file'}, {}), '(source_file)', False, 'import os\n'), ((131, 17, 131, 44), 'tensorflow.cast', 'tf.cast', ({(131, 25, 131, 31): 'y_true', (131, 33, 131, 43): 'self.dtype'}, {}), '(y_true, self.dtype)', True, 'import tensorflow as tf\n'), ((132, 17, 132, 44), 'tensorflow.cast', 'tf.cast', ({(132, 25, 132, 31): 'y_pred', (132, 33, 132, 43): 'self.dtype'}, {}), '(y_pred, self.dtype)', True, 'import tensorflow as tf\n'), ((149, 20, 151, 9), 'tensorflow.math.divide_no_nan', 'tf.math.divide_no_nan', ({(150, 12, 150, 31): 'self.true_positives', (150, 33, 150, 75): 'self.true_positives + self.false_positives'}, {}), '(self.true_positives, self.true_positives + self.\n false_positives)', True, 'import tensorflow as tf\n'), ((152, 17, 154, 9), 'tensorflow.math.divide_no_nan', 'tf.math.divide_no_nan', ({(153, 12, 153, 31): 'self.true_positives', (153, 33, 153, 75): 'self.true_positives + self.false_negatives'}, {}), '(self.true_positives, self.true_positives + self.\n false_negatives)', True, 'import tensorflow as tf\n'), ((158, 15, 158, 58), 'tensorflow.math.divide_no_nan', 'tf.math.divide_no_nan', ({(158, 37, 158, 46): 'mul_value', (158, 48, 158, 57): 'add_value'}, {}), '(mul_value, add_value)', True, 'import tensorflow as tf\n'), ((186, 22, 186, 65), 'tensorflow.zeros', 'tf.zeros', (), '', True, 'import tensorflow as tf\n'), ((187, 8, 187, 84), 'tensorflow.keras.backend.batch_set_value', 'tf.keras.backend.batch_set_value', ({(187, 41, 187, 83): '[(v, reset_value) for v in self.variables]'}, {}), '([(v, reset_value) for v in self.variables])', True, 'import tensorflow as tf\n'), ((268, 22, 269, 73), 'os.path.join', 'os.path.join', ({(268, 35, 268, 44): '_file_dir', (269, 35, 269, 72): '"""embeddings/glove.840B.300d-char.txt"""'}, {}), "(_file_dir, 'embeddings/glove.840B.300d-char.txt')", False, 'import os\n'), ((417, 24, 417, 52), 'copy.deepcopy', 'copy.deepcopy', ({(417, 38, 417, 51): 'label_mapping'}, {}), '(label_mapping)', False, 'import copy\n'), ((457, 30, 457, 76), 'os.path.join', 'os.path.join', ({(457, 43, 457, 50): 'dirpath', (457, 52, 457, 75): '"""model_parameters.json"""'}, {}), "(dirpath, 'model_parameters.json')", False, 'import os\n'), ((460, 25, 460, 68), 'os.path.join', 'os.path.join', ({(460, 38, 460, 45): 'dirpath', (460, 47, 460, 67): '"""label_mapping.json"""'}, {}), "(dirpath, 'label_mapping.json')", False, 'import os\n'), ((476, 30, 476, 76), 'os.path.join', 'os.path.join', ({(476, 43, 476, 50): 'dirpath', (476, 52, 476, 75): '"""model_parameters.json"""'}, {}), "(dirpath, 'model_parameters.json')", False, 'import os\n'), ((481, 25, 481, 68), 'os.path.join', 'os.path.join', ({(481, 38, 481, 45): 'dirpath', (481, 47, 481, 67): '"""label_mapping.json"""'}, {}), "(dirpath, 'label_mapping.json')", False, 'import os\n'), ((534, 28, 534, 62), 'tensorflow.reshape', 'tf.reshape', ({(534, 39, 534, 55): 'input_str_tensor', (534, 57, 534, 61): '[-1]'}, {}), '(input_str_tensor, [-1])', True, 'import tensorflow as tf\n'), ((535, 27, 536, 76), 'tensorflow.strings.unicode_decode', 'tf.strings.unicode_decode', (), '', True, 'import tensorflow as tf\n'), ((538, 27, 539, 68), 'tensorflow.math.minimum', 'tf.math.minimum', ({(538, 43, 538, 59): 'sentences_encode', (539, 43, 539, 67): 'max_char_encoding_id + 1'}, {}), '(sentences_encode, max_char_encoding_id + 1)', True, 'import tensorflow as tf\n'), ((612, 8, 612, 40), 'tensorflow.keras.backend.clear_session', 'tf.keras.backend.clear_session', ({}, {}), '()', True, 'import tensorflow as tf\n'), ((618, 22, 618, 50), 'tensorflow.keras.models.Sequential', 'tf.keras.models.Sequential', ({}, {}), '()', True, 'import tensorflow as tf\n'), ((642, 27, 643, 68), 'numpy.zeros', 'np.zeros', ({(642, 36, 643, 67): "(max_char_encoding_id + 2, self._parameters['dim_embed'])"}, {}), "((max_char_encoding_id + 2, self._parameters['dim_embed']))", True, 'import numpy as np\n'), ((684, 23, 684, 66), 'tensorflow.keras.backend.argmax', 'tf.keras.backend.argmax', ({(684, 47, 684, 65): 'self._model.output'}, {}), '(self._model.output)', True, 'import tensorflow as tf\n'), ((693, 22, 693, 72), 'tensorflow.keras.Model', 'tf.keras.Model', ({(693, 37, 693, 55): 'self._model.inputs', (693, 57, 693, 71): 'argmax_outputs'}, {}), '(self._model.inputs, argmax_outputs)', True, 'import tensorflow as tf\n'), ((728, 8, 728, 40), 'tensorflow.keras.backend.clear_session', 'tf.keras.backend.clear_session', ({}, {}), '()', True, 'import tensorflow as tf\n'), ((744, 23, 744, 67), 'tensorflow.keras.backend.argmax', 'tf.keras.backend.argmax', ({(744, 47, 744, 66): 'final_softmax_layer'}, {}), '(final_softmax_layer)', True, 'import tensorflow as tf\n'), ((754, 22, 754, 72), 'tensorflow.keras.Model', 'tf.keras.Model', ({(754, 37, 754, 55): 'self._model.inputs', (754, 57, 754, 71): 'argmax_outputs'}, {}), '(self._model.inputs, argmax_outputs)', True, 'import tensorflow as tf\n'), ((803, 18, 803, 31), 'collections.defaultdict', 'defaultdict', ({}, {}), '()', False, 'from collections import defaultdict\n'), ((810, 21, 810, 32), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((883, 8, 883, 46), 'tensorflow.keras.backend.set_floatx', 'tf.keras.backend.set_floatx', ({(883, 36, 883, 45): '"""float32"""'}, {}), "('float32')", True, 'import tensorflow as tf\n'), ((920, 27, 920, 61), 'numpy.zeros', 'np.zeros', (), '', True, 'import numpy as np\n'), ((921, 22, 921, 76), 'numpy.zeros', 'np.zeros', ({(921, 31, 921, 75): "(batch_size, self._parameters['max_length'])"}, {}), "((batch_size, self._parameters['max_length']))", True, 'import numpy as np\n'), ((124, 24, 124, 69), 'tensorflow.reduce_max', 'tf.reduce_max', (), '', True, 'import tensorflow as tf\n'), ((137, 19, 137, 53), 'tensorflow.reduce_sum', 'tf.reduce_sum', (), '', True, 'import tensorflow as tf\n'), ((165, 23, 165, 56), 'tensorflow.reduce_sum', 'tf.reduce_sum', ({(165, 37, 165, 55): 'f1_score * weights'}, {}), '(f1_score * weights)', True, 'import tensorflow as tf\n'), ((252, 34, 252, 54), 'numpy.asarray', 'np.asarray', ({(252, 45, 252, 53): 'line[1:]'}, {}), '(line[1:])', True, 'import numpy as np\n'), ((273, 8, 273, 22), 'numpy.asarray', 'np.asarray', ({(273, 19, 273, 21): 'ls'}, {}), '(ls)', True, 'import numpy as np\n'), ((459, 12, 459, 43), 'json.dump', 'json.dump', ({(459, 22, 459, 38): 'self._parameters', (459, 40, 459, 42): 'fp'}, {}), '(self._parameters, fp)', False, 'import json\n'), ((462, 12, 462, 45), 'json.dump', 'json.dump', ({(462, 22, 462, 40): 'self.label_mapping', (462, 42, 462, 44): 'fp'}, {}), '(self.label_mapping, fp)', False, 'import json\n'), ((463, 25, 463, 46), 'os.path.join', 'os.path.join', ({(463, 38, 463, 45): 'dirpath'}, {}), '(dirpath)', False, 'import os\n'), ((478, 25, 478, 38), 'json.load', 'json.load', ({(478, 35, 478, 37): 'fp'}, {}), '(fp)', False, 'import json\n'), ((483, 28, 483, 41), 'json.load', 'json.load', ({(483, 38, 483, 40): 'fp'}, {}), '(fp)', False, 'import json\n'), ((492, 13, 492, 63), 'tensorflow.keras.utils.custom_object_scope', 'tf.keras.utils.custom_object_scope', ({(492, 48, 492, 62): 'custom_objects'}, {}), '(custom_objects)', True, 'import tensorflow as tf\n'), ((493, 23, 493, 58), 'tensorflow.keras.models.load_model', 'tf.keras.models.load_model', ({(493, 50, 493, 57): 'dirpath'}, {}), '(dirpath)', True, 'import tensorflow as tf\n'), ((537, 34, 537, 54), 'tensorflow.cast', 'tf.cast', ({(537, 42, 537, 43): '1', (537, 45, 537, 53): 'tf.int32'}, {}), '(1, tf.int32)', True, 'import tensorflow as tf\n'), ((630, 24, 630, 77), 'tensorflow.keras.layers.Input', 'tf.keras.layers.Input', (), '', True, 'import tensorflow as tf\n'), ((652, 24, 657, 27), 'tensorflow.keras.layers.Embedding', 'tf.keras.layers.Embedding', (), '', True, 'import tensorflow as tf\n'), ((681, 12, 681, 67), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (), '', True, 'import tensorflow as tf\n'), ((739, 30, 740, 61), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (), '', True, 'import tensorflow as tf\n'), ((815, 12, 815, 30), 'sys.stdout.flush', 'sys.stdout.flush', ({}, {}), '()', False, 'import sys\n'), ((878, 12, 878, 30), 'sys.stdout.flush', 'sys.stdout.flush', ({}, {}), '()', False, 'import sys\n'), ((886, 12, 886, 46), 'numpy.concatenate', 'np.concatenate', (), '', True, 'import numpy as np\n'), ((887, 12, 887, 46), 'numpy.concatenate', 'np.concatenate', (), '', True, 'import numpy as np\n'), ((923, 26, 925, 53), 'numpy.zeros', 'np.zeros', ({(923, 35, 925, 52): "(batch_size, self._parameters['max_length'], self.num_labels)"}, {}), "((batch_size, self._parameters['max_length'], self.num_labels))", True, 'import numpy as np\n'), ((157, 21, 157, 46), 'tensorflow.math.square', 'tf.math.square', ({(157, 36, 157, 45): 'self.beta'}, {}), '(self.beta)', True, 'import tensorflow as tf\n'), ((159, 31, 159, 56), 'tensorflow.math.square', 'tf.math.square', ({(159, 46, 159, 55): 'self.beta'}, {}), '(self.beta)', True, 'import tensorflow as tf\n'), ((163, 43, 163, 83), 'tensorflow.reduce_sum', 'tf.reduce_sum', ({(163, 57, 163, 82): 'self.weights_intermediate'}, {}), '(self.weights_intermediate)', True, 'import tensorflow as tf\n'), ((168, 23, 168, 47), 'tensorflow.reduce_mean', 'tf.reduce_mean', ({(168, 38, 168, 46): 'f1_score'}, {}), '(f1_score)', True, 'import tensorflow as tf\n'), ((565, 30, 565, 65), 'tensorflow.constant_initializer', 'tf.constant_initializer', ({(565, 54, 565, 64): 'threshold_'}, {}), '(threshold_)', True, 'import tensorflow as tf\n'), ((572, 38, 572, 78), 'tensorflow.gather', 'tf.gather', ({(572, 48, 572, 63): 'self.thresh_vec', (572, 65, 572, 77): 'argmax_layer'}, {}), '(self.thresh_vec, argmax_layer)', True, 'import tensorflow as tf\n'), ((574, 39, 575, 67), 'tensorflow.keras.backend.max', 'tf.keras.backend.max', (), '', True, 'import tensorflow as tf\n'), ((585, 30, 586, 58), 'tensorflow.keras.backend.constant', 'tf.keras.backend.constant', (), '', True, 'import tensorflow as tf\n'), ((661, 28, 663, 50), 'tensorflow.keras.layers.Conv1D', 'tf.keras.layers.Conv1D', (), '', True, 'import tensorflow as tf\n'), ((669, 16, 669, 75), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', (), '', True, 'import tensorflow as tf\n'), ((674, 16, 674, 68), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (), '', True, 'import tensorflow as tf\n'), ((817, 16, 820, 67), 'sys.stdout.write', 'sys.stdout.write', ({(818, 20, 820, 66): "('\\rEPOCH %d, batch_id %d: loss: %f - acc: %f - f1_score %f' % (self.\n _epoch_id, batch_id, *model_results[1:]))"}, {}), "(\n '\\rEPOCH %d, batch_id %d: loss: %f - acc: %f - f1_score %f' % (self.\n _epoch_id, batch_id, *model_results[1:]))", False, 'import sys\n'), ((836, 25, 836, 36), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((876, 30, 876, 55), 'numpy.argmax', 'np.argmax', (), '', True, 'import numpy as np\n'), ((880, 16, 881, 60), 'sys.stdout.write', 'sys.stdout.write', ({(880, 33, 881, 59): "('\\rEPOCH %g, validation_batch_id %d' % (self._epoch_id, batch_id))"}, {}), "('\\rEPOCH %g, validation_batch_id %d' % (self._epoch_id,\n batch_id))", False, 'import sys\n'), ((931, 16, 931, 48), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', ({(931, 37, 931, 47): 'batch_data'}, {}), '(batch_data)', True, 'import tensorflow as tf\n'), ((127, 57, 127, 71), 'tensorflow.abs', 'tf.abs', ({(127, 64, 127, 70): 'y_pred'}, {}), '(y_pred)', True, 'import tensorflow as tf\n'), ((136, 44, 136, 76), 'tensorflow.expand_dims', 'tf.expand_dims', ({(136, 59, 136, 72): 'sample_weight', (136, 74, 136, 75): '1'}, {}), '(sample_weight, 1)', True, 'import tensorflow as tf\n'), ((579, 20, 580, 68), 'tensorflow.keras.backend.greater_equal', 'tf.keras.backend.greater_equal', ({(579, 51, 579, 71): 'confidence_max_layer', (580, 48, 580, 67): 'threshold_at_argmax'}, {}), '(confidence_max_layer, threshold_at_argmax)', True, 'import tensorflow as tf\n'), ((666, 20, 666, 72), 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', ({(666, 44, 666, 71): "self._parameters['dropout']"}, {}), "(self._parameters['dropout'])", True, 'import tensorflow as tf\n'), ((677, 20, 677, 72), 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', ({(677, 44, 677, 71): "self._parameters['dropout']"}, {}), "(self._parameters['dropout'])", True, 'import tensorflow as tf\n'), ((592, 24, 592, 62), 'tensorflow.subtract', 'tf.subtract', ({(592, 36, 592, 48): 'argmax_layer', (592, 50, 592, 61): 'bg_label_tf'}, {}), '(argmax_layer, bg_label_tf)', True, 'import tensorflow as tf\n')] |
Nipica/airflow | airflow/contrib/plugins/metastore_browser/main.py | 211a71f8a6b9d808bd03af84bd77bf8ff0ef247f | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from datetime import datetime
import json
from flask import Blueprint, request
from flask_admin import BaseView, expose
import pandas as pd
from airflow.hooks.hive_hooks import HiveMetastoreHook, HiveCliHook
from airflow.hooks.mysql_hook import MySqlHook
from airflow.hooks.presto_hook import PrestoHook
from airflow.plugins_manager import AirflowPlugin
from airflow.www import utils as wwwutils
from airflow.www.decorators import gzipped
METASTORE_CONN_ID = 'metastore_default'
METASTORE_MYSQL_CONN_ID = 'metastore_mysql'
PRESTO_CONN_ID = 'presto_default'
HIVE_CLI_CONN_ID = 'hive_default'
DEFAULT_DB = 'default'
DB_WHITELIST = None
DB_BLACKLIST = ['tmp']
TABLE_SELECTOR_LIMIT = 2000
# Keeping pandas from truncating long strings
pd.set_option('display.max_colwidth', -1)
# Creating a flask admin BaseView
class MetastoreBrowserView(BaseView, wwwutils.DataProfilingMixin):
@expose('/')
def index(self):
sql = """
SELECT
a.name as db, db_location_uri as location,
count(1) as object_count, a.desc as description
FROM DBS a
JOIN TBLS b ON a.DB_ID = b.DB_ID
GROUP BY a.name, db_location_uri, a.desc
""".format(**locals())
h = MySqlHook(METASTORE_MYSQL_CONN_ID)
df = h.get_pandas_df(sql)
df.db = (
'<a href="/admin/metastorebrowserview/db/?db=' +
df.db + '">' + df.db + '</a>')
table = df.to_html(
classes="table table-striped table-bordered table-hover",
index=False,
escape=False,
na_rep='',)
return self.render(
"metastore_browser/dbs.html", table=table)
@expose('/table/')
def table(self):
table_name = request.args.get("table")
m = HiveMetastoreHook(METASTORE_CONN_ID)
table = m.get_table(table_name)
return self.render(
"metastore_browser/table.html",
table=table, table_name=table_name, datetime=datetime, int=int)
@expose('/db/')
def db(self):
db = request.args.get("db")
m = HiveMetastoreHook(METASTORE_CONN_ID)
tables = sorted(m.get_tables(db=db), key=lambda x: x.tableName)
return self.render(
"metastore_browser/db.html", tables=tables, db=db)
@gzipped
@expose('/partitions/')
def partitions(self):
schema, table = request.args.get("table").split('.')
sql = """
SELECT
a.PART_NAME,
a.CREATE_TIME,
c.LOCATION,
c.IS_COMPRESSED,
c.INPUT_FORMAT,
c.OUTPUT_FORMAT
FROM PARTITIONS a
JOIN TBLS b ON a.TBL_ID = b.TBL_ID
JOIN DBS d ON b.DB_ID = d.DB_ID
JOIN SDS c ON a.SD_ID = c.SD_ID
WHERE
b.TBL_NAME like '{table}' AND
d.NAME like '{schema}'
ORDER BY PART_NAME DESC
""".format(**locals())
h = MySqlHook(METASTORE_MYSQL_CONN_ID)
df = h.get_pandas_df(sql)
return df.to_html(
classes="table table-striped table-bordered table-hover",
index=False,
na_rep='',)
@gzipped
@expose('/objects/')
def objects(self):
where_clause = ''
if DB_WHITELIST:
dbs = ",".join(["'" + db + "'" for db in DB_WHITELIST])
where_clause = "AND b.name IN ({})".format(dbs)
if DB_BLACKLIST:
dbs = ",".join(["'" + db + "'" for db in DB_BLACKLIST])
where_clause = "AND b.name NOT IN ({})".format(dbs)
sql = """
SELECT CONCAT(b.NAME, '.', a.TBL_NAME), TBL_TYPE
FROM TBLS a
JOIN DBS b ON a.DB_ID = b.DB_ID
WHERE
a.TBL_NAME NOT LIKE '%tmp%' AND
a.TBL_NAME NOT LIKE '%temp%' AND
b.NAME NOT LIKE '%tmp%' AND
b.NAME NOT LIKE '%temp%'
{where_clause}
LIMIT {LIMIT};
""".format(where_clause=where_clause, LIMIT=TABLE_SELECTOR_LIMIT)
h = MySqlHook(METASTORE_MYSQL_CONN_ID)
d = [
{'id': row[0], 'text': row[0]}
for row in h.get_records(sql)]
return json.dumps(d)
@gzipped
@expose('/data/')
def data(self):
table = request.args.get("table")
sql = "SELECT * FROM {table} LIMIT 1000;".format(table=table)
h = PrestoHook(PRESTO_CONN_ID)
df = h.get_pandas_df(sql)
return df.to_html(
classes="table table-striped table-bordered table-hover",
index=False,
na_rep='',)
@expose('/ddl/')
def ddl(self):
table = request.args.get("table")
sql = "SHOW CREATE TABLE {table};".format(table=table)
h = HiveCliHook(HIVE_CLI_CONN_ID)
return h.run_cli(sql)
v = MetastoreBrowserView(category="Plugins", name="Hive Metadata Browser")
# Creating a flask blueprint to intergrate the templates and static folder
bp = Blueprint(
"metastore_browser", __name__,
template_folder='templates',
static_folder='static',
static_url_path='/static/metastore_browser')
# Defining the plugin class
class MetastoreBrowserPlugin(AirflowPlugin):
name = "metastore_browser"
flask_blueprints = [bp]
admin_views = [v]
| [((44, 0, 44, 41), 'pandas.set_option', 'pd.set_option', ({(44, 14, 44, 36): '"""display.max_colwidth"""', (44, 38, 44, 40): '(-1)'}, {}), "('display.max_colwidth', -1)", True, 'import pandas as pd\n'), ((169, 5, 173, 48), 'flask.Blueprint', 'Blueprint', (), '', False, 'from flask import Blueprint, request\n'), ((50, 5, 50, 16), 'flask_admin.expose', 'expose', ({(50, 12, 50, 15): '"""/"""'}, {}), "('/')", False, 'from flask_admin import BaseView, expose\n'), ((73, 5, 73, 22), 'flask_admin.expose', 'expose', ({(73, 12, 73, 21): '"""/table/"""'}, {}), "('/table/')", False, 'from flask_admin import BaseView, expose\n'), ((82, 5, 82, 19), 'flask_admin.expose', 'expose', ({(82, 12, 82, 18): '"""/db/"""'}, {}), "('/db/')", False, 'from flask_admin import BaseView, expose\n'), ((91, 5, 91, 27), 'flask_admin.expose', 'expose', ({(91, 12, 91, 26): '"""/partitions/"""'}, {}), "('/partitions/')", False, 'from flask_admin import BaseView, expose\n'), ((119, 5, 119, 24), 'flask_admin.expose', 'expose', ({(119, 12, 119, 23): '"""/objects/"""'}, {}), "('/objects/')", False, 'from flask_admin import BaseView, expose\n'), ((147, 5, 147, 21), 'flask_admin.expose', 'expose', ({(147, 12, 147, 20): '"""/data/"""'}, {}), "('/data/')", False, 'from flask_admin import BaseView, expose\n'), ((158, 5, 158, 20), 'flask_admin.expose', 'expose', ({(158, 12, 158, 19): '"""/ddl/"""'}, {}), "('/ddl/')", False, 'from flask_admin import BaseView, expose\n'), ((60, 12, 60, 46), 'airflow.hooks.mysql_hook.MySqlHook', 'MySqlHook', ({(60, 22, 60, 45): 'METASTORE_MYSQL_CONN_ID'}, {}), '(METASTORE_MYSQL_CONN_ID)', False, 'from airflow.hooks.mysql_hook import MySqlHook\n'), ((75, 21, 75, 46), 'flask.request.args.get', 'request.args.get', ({(75, 38, 75, 45): '"""table"""'}, {}), "('table')", False, 'from flask import Blueprint, request\n'), ((76, 12, 76, 48), 'airflow.hooks.hive_hooks.HiveMetastoreHook', 'HiveMetastoreHook', ({(76, 30, 76, 47): 'METASTORE_CONN_ID'}, {}), '(METASTORE_CONN_ID)', False, 'from airflow.hooks.hive_hooks import HiveMetastoreHook, HiveCliHook\n'), ((84, 13, 84, 35), 'flask.request.args.get', 'request.args.get', ({(84, 30, 84, 34): '"""db"""'}, {}), "('db')", False, 'from flask import Blueprint, request\n'), ((85, 12, 85, 48), 'airflow.hooks.hive_hooks.HiveMetastoreHook', 'HiveMetastoreHook', ({(85, 30, 85, 47): 'METASTORE_CONN_ID'}, {}), '(METASTORE_CONN_ID)', False, 'from airflow.hooks.hive_hooks import HiveMetastoreHook, HiveCliHook\n'), ((111, 12, 111, 46), 'airflow.hooks.mysql_hook.MySqlHook', 'MySqlHook', ({(111, 22, 111, 45): 'METASTORE_MYSQL_CONN_ID'}, {}), '(METASTORE_MYSQL_CONN_ID)', False, 'from airflow.hooks.mysql_hook import MySqlHook\n'), ((140, 12, 140, 46), 'airflow.hooks.mysql_hook.MySqlHook', 'MySqlHook', ({(140, 22, 140, 45): 'METASTORE_MYSQL_CONN_ID'}, {}), '(METASTORE_MYSQL_CONN_ID)', False, 'from airflow.hooks.mysql_hook import MySqlHook\n'), ((144, 15, 144, 28), 'json.dumps', 'json.dumps', ({(144, 26, 144, 27): 'd'}, {}), '(d)', False, 'import json\n'), ((149, 16, 149, 41), 'flask.request.args.get', 'request.args.get', ({(149, 33, 149, 40): '"""table"""'}, {}), "('table')", False, 'from flask import Blueprint, request\n'), ((151, 12, 151, 38), 'airflow.hooks.presto_hook.PrestoHook', 'PrestoHook', ({(151, 23, 151, 37): 'PRESTO_CONN_ID'}, {}), '(PRESTO_CONN_ID)', False, 'from airflow.hooks.presto_hook import PrestoHook\n'), ((160, 16, 160, 41), 'flask.request.args.get', 'request.args.get', ({(160, 33, 160, 40): '"""table"""'}, {}), "('table')", False, 'from flask import Blueprint, request\n'), ((162, 12, 162, 41), 'airflow.hooks.hive_hooks.HiveCliHook', 'HiveCliHook', ({(162, 24, 162, 40): 'HIVE_CLI_CONN_ID'}, {}), '(HIVE_CLI_CONN_ID)', False, 'from airflow.hooks.hive_hooks import HiveMetastoreHook, HiveCliHook\n'), ((93, 24, 93, 49), 'flask.request.args.get', 'request.args.get', ({(93, 41, 93, 48): '"""table"""'}, {}), "('table')", False, 'from flask import Blueprint, request\n')] |
AaronDewes/compose-nonfree | app/lib/manage.py | 82ef3e58019ee03d163dea7aff4d7ed18d884238 | #!/usr/bin/env python3
# SPDX-FileCopyrightText: 2021 Aaron Dewes <[email protected]>
#
# SPDX-License-Identifier: MIT
import stat
import tempfile
import threading
from typing import List
from sys import argv
import os
import requests
import shutil
import json
import yaml
import subprocess
from lib.composegenerator.v0.generate import createComposeConfigFromV0
from lib.composegenerator.v1.generate import createComposeConfigFromV1
from lib.appymlgenerator import convertComposeYMLToAppYML
from lib.validate import findAndValidateApps
from lib.metadata import getAppRegistry, getSimpleAppRegistry
from lib.entropy import deriveEntropy
# For an array of threads, join them and wait for them to finish
def joinThreads(threads: List[threading.Thread]):
for thread in threads:
thread.join()
# The directory with this script
scriptDir = os.path.dirname(os.path.realpath(__file__))
nodeRoot = os.path.join(scriptDir, "..", "..")
appsDir = os.path.join(nodeRoot, "apps")
appSystemDir = os.path.join(nodeRoot, "app-system")
sourcesList = os.path.join(appSystemDir, "sources.list")
appDataDir = os.path.join(nodeRoot, "app-data")
userFile = os.path.join(nodeRoot, "db", "user.json")
legacyScript = os.path.join(nodeRoot, "scripts", "app")
def runCompose(app: str, args: str):
compose(app, args)
# Returns a list of every argument after the second one in sys.argv joined into a string by spaces
def getArguments():
arguments = ""
for i in range(3, len(argv)):
arguments += argv[i] + " "
return arguments
def getAppYml(name):
url = 'https://raw.githubusercontent.com/runcitadel/compose-nonfree/main/apps/' + \
name + '/' + 'app.yml'
response = requests.get(url)
if response.status_code == 200:
return response.text
else:
return False
def getAppYmlPath(app):
return os.path.join(appsDir, app, 'app.yml')
def composeToAppYml(app):
composeFile = os.path.join(appsDir, app, "docker-compose.yml")
appYml = os.path.join(appsDir, app, "app.yml")
# Read the compose file and parse it
with open(composeFile, "r") as f:
compose = yaml.safe_load(f)
registry = os.path.join(appsDir, "registry.json")
# Load the registry
with open(registry, "r") as f:
registryData = json.load(f)
converted = convertComposeYMLToAppYML(compose, app, registryData)
# Put converted into the app.yml after encoding it as YAML
with open(appYml, "w") as f:
f.write(yaml.dump(converted, sort_keys=False))
def update(verbose: bool = False):
apps = findAndValidateApps(appsDir)
# The compose generation process updates the registry, so we need to get it set up with the basics before that
registry = getAppRegistry(apps, appsDir)
with open(os.path.join(appsDir, "registry.json"), "w") as f:
json.dump(registry, f, indent=4, sort_keys=True)
print("Wrote registry to registry.json")
simpleRegistry = getSimpleAppRegistry(apps, appsDir)
with open(os.path.join(appSystemDir, "apps.json"), "w") as f:
json.dump(simpleRegistry, f, indent=4, sort_keys=True)
print("Wrote version information to apps.json")
# Loop through the apps and generate valid compose files from them, then put these into the app dir
for app in apps:
composeFile = os.path.join(appsDir, app, "docker-compose.yml")
appYml = os.path.join(appsDir, app, "app.yml")
with open(composeFile, "w") as f:
appCompose = getApp(appYml, app)
if(appCompose):
f.write(yaml.dump(appCompose, sort_keys=False))
if verbose:
print("Wrote " + app + " to " + composeFile)
print("Generated configuration successfully")
def download(app: str = None):
if(app is None):
apps = findAndValidateApps(appsDir)
for app in apps:
data = getAppYml(app)
if data:
with open(getAppYmlPath(app), 'w') as f:
f.write(data)
else:
print("Warning: Could not download " + app)
else:
data = getAppYml(app)
if data:
with open(getAppYmlPath(app), 'w') as f:
f.write(data)
else:
print("Warning: Could not download " + app)
def getUserData():
userData = {}
if os.path.isfile(userFile):
with open(userFile, "r") as f:
userData = json.load(f)
return userData
def startInstalled():
# If userfile doen't exist, just do nothing
userData = {}
if os.path.isfile(userFile):
with open(userFile, "r") as f:
userData = json.load(f)
threads = []
for app in userData["installedApps"]:
print("Starting app {}...".format(app))
# Run runCompose(args.app, "up --detach") asynchrounously for all apps, then exit(0) when all are finished
thread = threading.Thread(target=runCompose, args=(app, "up --detach"))
thread.start()
threads.append(thread)
joinThreads(threads)
def stopInstalled():
# If userfile doen't exist, just do nothing
userData = {}
if os.path.isfile(userFile):
with open(userFile, "r") as f:
userData = json.load(f)
threads = []
for app in userData["installedApps"]:
print("Stopping app {}...".format(app))
# Run runCompose(args.app, "up --detach") asynchrounously for all apps, then exit(0) when all are finished
thread = threading.Thread(
target=runCompose, args=(app, "rm --force --stop"))
thread.start()
threads.append(thread)
joinThreads(threads)
# Loads an app.yml and converts it to a docker-compose.yml
def getApp(appFile: str, appId: str):
with open(appFile, 'r') as f:
app = yaml.safe_load(f)
if not "metadata" in app:
raise Exception("Error: Could not find metadata in " + appFile)
app["metadata"]["id"] = appId
if('version' in app and str(app['version']) == "1"):
return createComposeConfigFromV1(app, nodeRoot)
else:
return createComposeConfigFromV0(app)
def compose(app, arguments):
# Runs a compose command in the app dir
# Before that, check if a docker-compose.yml exists in the app dir
composeFile = os.path.join(appsDir, app, "docker-compose.yml")
commonComposeFile = os.path.join(appSystemDir, "docker-compose.common.yml")
os.environ["APP_DOMAIN"] = subprocess.check_output(
"hostname -s 2>/dev/null || echo 'umbrel'", shell=True).decode("utf-8") + ".local"
os.environ["APP_HIDDEN_SERVICE"] = subprocess.check_output("cat {} 2>/dev/null || echo 'notyetset.onion'".format(
os.path.join(nodeRoot, "tor", "data", "app-{}/hostname".format(app))), shell=True).decode("utf-8")
os.environ["APP_SEED"] = deriveEntropy("app-{}-seed".format(app))
# Allow more app seeds, with random numbers from 1-5 assigned in a loop
for i in range(1, 6):
os.environ["APP_SEED_{}".format(i)] = deriveEntropy("app-{}-seed{}".format(app, i))
os.environ["APP_DATA_DIR"] = os.path.join(appDataDir, app)
os.environ["BITCOIN_DATA_DIR"] = os.path.join(nodeRoot, "bitcoin")
os.environ["LND_DATA_DIR"] = os.path.join(nodeRoot, "lnd")
# List all hidden services for an app and put their hostname in the environment
hiddenServices: List[str] = getAppHiddenServices(app)
for service in hiddenServices:
appHiddenServiceFile = os.path.join(
nodeRoot, "tor", "data", "app-{}-{}/hostname".format(app, service))
os.environ["APP_HIDDEN_SERVICE_{}".format(service.upper().replace("-", "_"))] = subprocess.check_output("cat {} 2>/dev/null || echo 'notyetset.onion'".format(
appHiddenServiceFile), shell=True).decode("utf-8")
if not os.path.isfile(composeFile):
print("Error: Could not find docker-compose.yml in " + app)
exit(1)
os.system(
"docker compose --env-file '{}' --project-name '{}' --file '{}' --file '{}' {}".format(
os.path.join(nodeRoot, ".env"), app, commonComposeFile, composeFile, arguments))
def remove_readonly(func, path, _):
os.chmod(path, stat.S_IWRITE)
func(path)
def deleteData(app: str):
dataDir = os.path.join(appDataDir, app)
try:
shutil.rmtree(dataDir, onerror=remove_readonly)
except FileNotFoundError:
pass
def createDataDir(app: str):
dataDir = os.path.join(appDataDir, app)
appDir = os.path.join(appsDir, app)
if os.path.isdir(dataDir):
deleteData(app)
# Recursively copy everything from appDir to dataDir while excluding .gitignore
shutil.copytree(appDir, dataDir, symlinks=False,
ignore=shutil.ignore_patterns(".gitignore"))
# Chown and chmod dataDir to have the same owner and permissions as appDir
os.chown(dataDir, os.stat(appDir).st_uid, os.stat(appDir).st_gid)
os.chmod(dataDir, os.stat(appDir).st_mode)
def setInstalled(app: str):
userData = getUserData()
if not "installedApps" in userData:
userData["installedApps"] = []
userData["installedApps"].append(app)
userData["installedApps"] = list(set(userData["installedApps"]))
with open(userFile, "w") as f:
json.dump(userData, f)
def setRemoved(app: str):
userData = getUserData()
if not "installedApps" in userData:
return
userData["installedApps"] = list(set(userData["installedApps"]))
userData["installedApps"].remove(app)
with open(userFile, "w") as f:
json.dump(userData, f)
def getAppHiddenServices(app: str):
torDir = os.path.join(nodeRoot, "tor", "data")
# List all subdirectories of torDir which start with app-${APP}-
# but return them without the app-${APP}- prefix
results = []
for subdir in os.listdir(torDir):
if subdir.startswith("app-{}-".format(app)):
results.append(subdir[len("app-{}-".format(app)):])
return results
# Parse the sources.list repo file, which contains a list of sources in the format
# <git-url> <branch>
# For every line, clone the repo to a temporary dir and checkout the branch
# Then, check that repos apps in the temporary dir/apps and for every app,
# overwrite the current app dir with the contents of the temporary dir/apps/app
# Also, keep a list of apps from every repo, a repo later in the file may not overwrite an app from a repo earlier in the file
def updateRepos():
# Get the list of repos
repos = []
with open(sourcesList) as f:
repos = f.readlines()
# For each repo, clone the repo to a temporary dir, checkout the branch,
# and overwrite the current app dir with the contents of the temporary dir/apps/app
alreadyInstalled = []
for repo in repos:
repo = repo.strip()
if repo == "":
continue
# Split the repo into the git url and the branch
repo = repo.split(" ")
if len(repo) != 2:
print("Error: Invalid repo format in " + sourcesList)
exit(1)
gitUrl = repo[0]
branch = repo[1]
# Clone the repo to a temporary dir
tempDir = tempfile.mkdtemp()
print("Cloning the repository")
# Git clone with a depth of 1 to avoid cloning the entire repo
# Dont print anything to stdout, as we don't want to see the git clone output
subprocess.run("git clone --depth 1 {} {}".format(gitUrl, tempDir), shell=True, stdout=subprocess.DEVNULL)
# Overwrite the current app dir with the contents of the temporary dir/apps/app
for app in os.listdir(os.path.join(tempDir, "apps")):
# if the app is already installed, don't overwrite it
if app in alreadyInstalled:
continue
if os.path.isdir(os.path.join(appsDir, app)):
shutil.rmtree(os.path.join(appsDir, app), onerror=remove_readonly)
if os.path.isdir(os.path.join(tempDir, "apps", app)):
shutil.copytree(os.path.join(tempDir, "apps", app), os.path.join(appsDir, app),
symlinks=False, ignore=shutil.ignore_patterns(".gitignore"))
alreadyInstalled.append(app)
# Remove the temporary dir
shutil.rmtree(tempDir)
| [((36, 11, 36, 46), 'os.path.join', 'os.path.join', ({(36, 24, 36, 33): 'scriptDir', (36, 35, 36, 39): '""".."""', (36, 41, 36, 45): '""".."""'}, {}), "(scriptDir, '..', '..')", False, 'import os\n'), ((37, 10, 37, 40), 'os.path.join', 'os.path.join', ({(37, 23, 37, 31): 'nodeRoot', (37, 33, 37, 39): '"""apps"""'}, {}), "(nodeRoot, 'apps')", False, 'import os\n'), ((38, 15, 38, 51), 'os.path.join', 'os.path.join', ({(38, 28, 38, 36): 'nodeRoot', (38, 38, 38, 50): '"""app-system"""'}, {}), "(nodeRoot, 'app-system')", False, 'import os\n'), ((39, 14, 39, 56), 'os.path.join', 'os.path.join', ({(39, 27, 39, 39): 'appSystemDir', (39, 41, 39, 55): '"""sources.list"""'}, {}), "(appSystemDir, 'sources.list')", False, 'import os\n'), ((40, 13, 40, 47), 'os.path.join', 'os.path.join', ({(40, 26, 40, 34): 'nodeRoot', (40, 36, 40, 46): '"""app-data"""'}, {}), "(nodeRoot, 'app-data')", False, 'import os\n'), ((41, 11, 41, 52), 'os.path.join', 'os.path.join', ({(41, 24, 41, 32): 'nodeRoot', (41, 34, 41, 38): '"""db"""', (41, 40, 41, 51): '"""user.json"""'}, {}), "(nodeRoot, 'db', 'user.json')", False, 'import os\n'), ((42, 15, 42, 55), 'os.path.join', 'os.path.join', ({(42, 28, 42, 36): 'nodeRoot', (42, 38, 42, 47): '"""scripts"""', (42, 49, 42, 54): '"""app"""'}, {}), "(nodeRoot, 'scripts', 'app')", False, 'import os\n'), ((35, 28, 35, 54), 'os.path.realpath', 'os.path.realpath', ({(35, 45, 35, 53): '__file__'}, {}), '(__file__)', False, 'import os\n'), ((61, 15, 61, 32), 'requests.get', 'requests.get', ({(61, 28, 61, 31): 'url'}, {}), '(url)', False, 'import requests\n'), ((69, 11, 69, 48), 'os.path.join', 'os.path.join', ({(69, 24, 69, 31): 'appsDir', (69, 33, 69, 36): 'app', (69, 38, 69, 47): '"""app.yml"""'}, {}), "(appsDir, app, 'app.yml')", False, 'import os\n'), ((73, 18, 73, 66), 'os.path.join', 'os.path.join', ({(73, 31, 73, 38): 'appsDir', (73, 40, 73, 43): 'app', (73, 45, 73, 65): '"""docker-compose.yml"""'}, {}), "(appsDir, app, 'docker-compose.yml')", False, 'import os\n'), ((74, 13, 74, 50), 'os.path.join', 'os.path.join', ({(74, 26, 74, 33): 'appsDir', (74, 35, 74, 38): 'app', (74, 40, 74, 49): '"""app.yml"""'}, {}), "(appsDir, app, 'app.yml')", False, 'import os\n'), ((78, 15, 78, 53), 'os.path.join', 'os.path.join', ({(78, 28, 78, 35): 'appsDir', (78, 37, 78, 52): '"""registry.json"""'}, {}), "(appsDir, 'registry.json')", False, 'import os\n'), ((82, 16, 82, 69), 'lib.appymlgenerator.convertComposeYMLToAppYML', 'convertComposeYMLToAppYML', ({(82, 42, 82, 49): 'compose', (82, 51, 82, 54): 'app', (82, 56, 82, 68): 'registryData'}, {}), '(compose, app, registryData)', False, 'from lib.appymlgenerator import convertComposeYMLToAppYML\n'), ((89, 11, 89, 39), 'lib.validate.findAndValidateApps', 'findAndValidateApps', ({(89, 31, 89, 38): 'appsDir'}, {}), '(appsDir)', False, 'from lib.validate import findAndValidateApps\n'), ((91, 15, 91, 44), 'lib.metadata.getAppRegistry', 'getAppRegistry', ({(91, 30, 91, 34): 'apps', (91, 36, 91, 43): 'appsDir'}, {}), '(apps, appsDir)', False, 'from lib.metadata import getAppRegistry, getSimpleAppRegistry\n'), ((96, 21, 96, 56), 'lib.metadata.getSimpleAppRegistry', 'getSimpleAppRegistry', ({(96, 42, 96, 46): 'apps', (96, 48, 96, 55): 'appsDir'}, {}), '(apps, appsDir)', False, 'from lib.metadata import getAppRegistry, getSimpleAppRegistry\n'), ((135, 7, 135, 31), 'os.path.isfile', 'os.path.isfile', ({(135, 22, 135, 30): 'userFile'}, {}), '(userFile)', False, 'import os\n'), ((144, 7, 144, 31), 'os.path.isfile', 'os.path.isfile', ({(144, 22, 144, 30): 'userFile'}, {}), '(userFile)', False, 'import os\n'), ((160, 7, 160, 31), 'os.path.isfile', 'os.path.isfile', ({(160, 22, 160, 30): 'userFile'}, {}), '(userFile)', False, 'import os\n'), ((193, 18, 193, 66), 'os.path.join', 'os.path.join', ({(193, 31, 193, 38): 'appsDir', (193, 40, 193, 43): 'app', (193, 45, 193, 65): '"""docker-compose.yml"""'}, {}), "(appsDir, app, 'docker-compose.yml')", False, 'import os\n'), ((194, 24, 194, 79), 'os.path.join', 'os.path.join', ({(194, 37, 194, 49): 'appSystemDir', (194, 51, 194, 78): '"""docker-compose.common.yml"""'}, {}), "(appSystemDir, 'docker-compose.common.yml')", False, 'import os\n'), ((203, 33, 203, 62), 'os.path.join', 'os.path.join', ({(203, 46, 203, 56): 'appDataDir', (203, 58, 203, 61): 'app'}, {}), '(appDataDir, app)', False, 'import os\n'), ((204, 37, 204, 70), 'os.path.join', 'os.path.join', ({(204, 50, 204, 58): 'nodeRoot', (204, 60, 204, 69): '"""bitcoin"""'}, {}), "(nodeRoot, 'bitcoin')", False, 'import os\n'), ((205, 33, 205, 62), 'os.path.join', 'os.path.join', ({(205, 46, 205, 54): 'nodeRoot', (205, 56, 205, 61): '"""lnd"""'}, {}), "(nodeRoot, 'lnd')", False, 'import os\n'), ((223, 4, 223, 33), 'os.chmod', 'os.chmod', ({(223, 13, 223, 17): 'path', (223, 19, 223, 32): 'stat.S_IWRITE'}, {}), '(path, stat.S_IWRITE)', False, 'import os\n'), ((228, 14, 228, 43), 'os.path.join', 'os.path.join', ({(228, 27, 228, 37): 'appDataDir', (228, 39, 228, 42): 'app'}, {}), '(appDataDir, app)', False, 'import os\n'), ((236, 14, 236, 43), 'os.path.join', 'os.path.join', ({(236, 27, 236, 37): 'appDataDir', (236, 39, 236, 42): 'app'}, {}), '(appDataDir, app)', False, 'import os\n'), ((237, 13, 237, 39), 'os.path.join', 'os.path.join', ({(237, 26, 237, 33): 'appsDir', (237, 35, 237, 38): 'app'}, {}), '(appsDir, app)', False, 'import os\n'), ((238, 7, 238, 29), 'os.path.isdir', 'os.path.isdir', ({(238, 21, 238, 28): 'dataDir'}, {}), '(dataDir)', False, 'import os\n'), ((269, 13, 269, 50), 'os.path.join', 'os.path.join', ({(269, 26, 269, 34): 'nodeRoot', (269, 36, 269, 41): '"""tor"""', (269, 43, 269, 49): '"""data"""'}, {}), "(nodeRoot, 'tor', 'data')", False, 'import os\n'), ((273, 18, 273, 36), 'os.listdir', 'os.listdir', ({(273, 29, 273, 35): 'torDir'}, {}), '(torDir)', False, 'import os\n'), ((77, 18, 77, 35), 'yaml.safe_load', 'yaml.safe_load', ({(77, 33, 77, 34): 'f'}, {}), '(f)', False, 'import yaml\n'), ((81, 23, 81, 35), 'json.load', 'json.load', ({(81, 33, 81, 34): 'f'}, {}), '(f)', False, 'import json\n'), ((93, 8, 93, 56), 'json.dump', 'json.dump', (), '', False, 'import json\n'), ((98, 8, 98, 62), 'json.dump', 'json.dump', (), '', False, 'import json\n'), ((103, 22, 103, 70), 'os.path.join', 'os.path.join', ({(103, 35, 103, 42): 'appsDir', (103, 44, 103, 47): 'app', (103, 49, 103, 69): '"""docker-compose.yml"""'}, {}), "(appsDir, app, 'docker-compose.yml')", False, 'import os\n'), ((104, 17, 104, 54), 'os.path.join', 'os.path.join', ({(104, 30, 104, 37): 'appsDir', (104, 39, 104, 42): 'app', (104, 44, 104, 53): '"""app.yml"""'}, {}), "(appsDir, app, 'app.yml')", False, 'import os\n'), ((116, 15, 116, 43), 'lib.validate.findAndValidateApps', 'findAndValidateApps', ({(116, 35, 116, 42): 'appsDir'}, {}), '(appsDir)', False, 'from lib.validate import findAndValidateApps\n'), ((151, 17, 151, 79), 'threading.Thread', 'threading.Thread', (), '', False, 'import threading\n'), ((167, 17, 168, 63), 'threading.Thread', 'threading.Thread', (), '', False, 'import threading\n'), ((178, 14, 178, 31), 'yaml.safe_load', 'yaml.safe_load', ({(178, 29, 178, 30): 'f'}, {}), '(f)', False, 'import yaml\n'), ((185, 15, 185, 55), 'lib.composegenerator.v1.generate.createComposeConfigFromV1', 'createComposeConfigFromV1', ({(185, 41, 185, 44): 'app', (185, 46, 185, 54): 'nodeRoot'}, {}), '(app, nodeRoot)', False, 'from lib.composegenerator.v1.generate import createComposeConfigFromV1\n'), ((187, 15, 187, 45), 'lib.composegenerator.v0.generate.createComposeConfigFromV0', 'createComposeConfigFromV0', ({(187, 41, 187, 44): 'app'}, {}), '(app)', False, 'from lib.composegenerator.v0.generate import createComposeConfigFromV0\n'), ((214, 11, 214, 38), 'os.path.isfile', 'os.path.isfile', ({(214, 26, 214, 37): 'composeFile'}, {}), '(composeFile)', False, 'import os\n'), ((230, 8, 230, 55), 'shutil.rmtree', 'shutil.rmtree', (), '', False, 'import shutil\n'), ((255, 8, 255, 30), 'json.dump', 'json.dump', ({(255, 18, 255, 26): 'userData', (255, 28, 255, 29): 'f'}, {}), '(userData, f)', False, 'import json\n'), ((265, 8, 265, 30), 'json.dump', 'json.dump', ({(265, 18, 265, 26): 'userData', (265, 28, 265, 29): 'f'}, {}), '(userData, f)', False, 'import json\n'), ((305, 18, 305, 36), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ({}, {}), '()', False, 'import tempfile\n'), ((322, 8, 322, 30), 'shutil.rmtree', 'shutil.rmtree', ({(322, 22, 322, 29): 'tempDir'}, {}), '(tempDir)', False, 'import shutil\n'), ((85, 16, 85, 53), 'yaml.dump', 'yaml.dump', (), '', False, 'import yaml\n'), ((92, 14, 92, 52), 'os.path.join', 'os.path.join', ({(92, 27, 92, 34): 'appsDir', (92, 36, 92, 51): '"""registry.json"""'}, {}), "(appsDir, 'registry.json')", False, 'import os\n'), ((97, 14, 97, 53), 'os.path.join', 'os.path.join', ({(97, 27, 97, 39): 'appSystemDir', (97, 41, 97, 52): '"""apps.json"""'}, {}), "(appSystemDir, 'apps.json')", False, 'import os\n'), ((137, 23, 137, 35), 'json.load', 'json.load', ({(137, 33, 137, 34): 'f'}, {}), '(f)', False, 'import json\n'), ((146, 23, 146, 35), 'json.load', 'json.load', ({(146, 33, 146, 34): 'f'}, {}), '(f)', False, 'import json\n'), ((162, 23, 162, 35), 'json.load', 'json.load', ({(162, 33, 162, 34): 'f'}, {}), '(f)', False, 'import json\n'), ((219, 12, 219, 42), 'os.path.join', 'os.path.join', ({(219, 25, 219, 33): 'nodeRoot', (219, 35, 219, 41): '""".env"""'}, {}), "(nodeRoot, '.env')", False, 'import os\n'), ((242, 27, 242, 63), 'shutil.ignore_patterns', 'shutil.ignore_patterns', ({(242, 50, 242, 62): '""".gitignore"""'}, {}), "('.gitignore')", False, 'import shutil\n'), ((244, 22, 244, 37), 'os.stat', 'os.stat', ({(244, 30, 244, 36): 'appDir'}, {}), '(appDir)', False, 'import os\n'), ((244, 46, 244, 61), 'os.stat', 'os.stat', ({(244, 54, 244, 60): 'appDir'}, {}), '(appDir)', False, 'import os\n'), ((245, 22, 245, 37), 'os.stat', 'os.stat', ({(245, 30, 245, 36): 'appDir'}, {}), '(appDir)', False, 'import os\n'), ((311, 30, 311, 59), 'os.path.join', 'os.path.join', ({(311, 43, 311, 50): 'tempDir', (311, 52, 311, 58): '"""apps"""'}, {}), "(tempDir, 'apps')", False, 'import os\n'), ((195, 31, 196, 63), 'subprocess.check_output', 'subprocess.check_output', (), '', False, 'import subprocess\n'), ((315, 29, 315, 55), 'os.path.join', 'os.path.join', ({(315, 42, 315, 49): 'appsDir', (315, 51, 315, 54): 'app'}, {}), '(appsDir, app)', False, 'import os\n'), ((317, 29, 317, 63), 'os.path.join', 'os.path.join', ({(317, 42, 317, 49): 'tempDir', (317, 51, 317, 57): '"""apps"""', (317, 59, 317, 62): 'app'}, {}), "(tempDir, 'apps', app)", False, 'import os\n'), ((108, 24, 108, 62), 'yaml.dump', 'yaml.dump', (), '', False, 'import yaml\n'), ((316, 30, 316, 56), 'os.path.join', 'os.path.join', ({(316, 43, 316, 50): 'appsDir', (316, 52, 316, 55): 'app'}, {}), '(appsDir, app)', False, 'import os\n'), ((318, 32, 318, 66), 'os.path.join', 'os.path.join', ({(318, 45, 318, 52): 'tempDir', (318, 54, 318, 60): '"""apps"""', (318, 62, 318, 65): 'app'}, {}), "(tempDir, 'apps', app)", False, 'import os\n'), ((318, 68, 318, 94), 'os.path.join', 'os.path.join', ({(318, 81, 318, 88): 'appsDir', (318, 90, 318, 93): 'app'}, {}), '(appsDir, app)', False, 'import os\n'), ((319, 55, 319, 91), 'shutil.ignore_patterns', 'shutil.ignore_patterns', ({(319, 78, 319, 90): '""".gitignore"""'}, {}), "('.gitignore')", False, 'import shutil\n')] |
YuanruiZJU/SZZ-TSE | features/jit-features/query/query.py | 093506f9019a0d8b412dad4672525f93150ca181 | from query.base import BaseQuery
class CommitMetaQuery(BaseQuery):
table_name = 'commit_meta'
class DiffusionFeaturesQuery(BaseQuery):
table_name = 'diffusion_features'
class SizeFeaturesQuery(BaseQuery):
table_name = 'size_features'
class PurposeFeaturesQuery(BaseQuery):
table_name = 'purpose_features'
class HistoryFeaturesQuery(BaseQuery):
table_name = 'history_features'
class ExperienceFeaturesQuery(BaseQuery):
table_name = 'experience_features'
class ProjectQuery:
def __init__(self, project):
self.project = project
self.cms = CommitMetaQuery(project).do_query()
self.diffusion_features = DiffusionFeaturesQuery(project).do_query()
self.size_features = SizeFeaturesQuery(project).do_query()
self.purpose_features = PurposeFeaturesQuery(project).do_query()
self.history_features = HistoryFeaturesQuery(project).do_query()
self.exp_features = ExperienceFeaturesQuery(project).do_query()
self.__cache_end_commit_id = None
@property
def end_commit_id(self):
if self.__cache_end_commit_id is not None:
return self.__cache_end_commit_id
commit_id = None
for pf in self.purpose_features:
if pf.fix:
commit_id = pf.commit_id
self.__cache_end_commit_id = commit_id
return self.__cache_end_commit_id
def combine(self):
features_dict = dict()
for sf in self.size_features:
features_dict[sf.commit_id] = dict()
features_dict[sf.commit_id]['la'] = sf.la
features_dict[sf.commit_id]['ld'] = sf.ld
features_dict[sf.commit_id]['lt'] = sf.lt
for df in self.diffusion_features:
features_dict[df.commit_id]['ns'] = df.ns
features_dict[df.commit_id]['nd'] = df.nd
features_dict[df.commit_id]['nf'] = df.nf
features_dict[df.commit_id]['entropy'] = df.entropy
for pf in self.purpose_features:
features_dict[pf.commit_id]['fix'] = pf.fix
for hf in self.history_features:
features_dict[hf.commit_id]['ndev'] = hf.ndev
features_dict[hf.commit_id]['age'] = hf.age
features_dict[hf.commit_id]['nuc'] = hf.nuc
for ef in self.exp_features:
features_dict[ef.commit_id]['exp'] = ef.exp
features_dict[ef.commit_id]['rexp'] = ef.rexp
features_dict[ef.commit_id]['sexp'] = ef.sexp
ret_list = list()
for cm in self.cms:
cm_dict = features_dict[cm.commit_id]
if len(cm_dict) == 14:
cm_dict['commit_id'] = cm.commit_id
ret_list.append(cm_dict)
if cm.commit_id == self.end_commit_id:
break
return ret_list
| [] |
mikeywaites/flask-arrested | example/mappers.py | 6b97ce2ad2765f9acab10f4726e310258aa51de0 | from kim import Mapper, field
from example.models import Planet, Character
class PlanetMapper(Mapper):
__type__ = Planet
id = field.Integer(read_only=True)
name = field.String()
description = field.String()
created_at = field.DateTime(read_only=True)
class CharacterMapper(Mapper):
__type__ = Character
id = field.Integer(read_only=True)
name = field.String()
created_at = field.DateTime(read_only=True)
| [((10, 9, 10, 38), 'kim.field.Integer', 'field.Integer', (), '', False, 'from kim import Mapper, field\n'), ((11, 11, 11, 25), 'kim.field.String', 'field.String', ({}, {}), '()', False, 'from kim import Mapper, field\n'), ((12, 18, 12, 32), 'kim.field.String', 'field.String', ({}, {}), '()', False, 'from kim import Mapper, field\n'), ((13, 17, 13, 47), 'kim.field.DateTime', 'field.DateTime', (), '', False, 'from kim import Mapper, field\n'), ((20, 9, 20, 38), 'kim.field.Integer', 'field.Integer', (), '', False, 'from kim import Mapper, field\n'), ((21, 11, 21, 25), 'kim.field.String', 'field.String', ({}, {}), '()', False, 'from kim import Mapper, field\n'), ((22, 17, 22, 47), 'kim.field.DateTime', 'field.DateTime', (), '', False, 'from kim import Mapper, field\n')] |
escalate/ansible-gitops-example-repository | collections/ansible_collections/community/general/plugins/connection/saltstack.py | f7f7a9fcd09abd982f5fcd3bd196809a6c4c2f08 | # Based on local.py (c) 2012, Michael DeHaan <[email protected]>
# Based on chroot.py (c) 2013, Maykel Moya <[email protected]>
# Based on func.py
# (c) 2014, Michael Scherer <[email protected]>
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
author: Michael Scherer (@mscherer) <[email protected]>
name: saltstack
short_description: Allow ansible to piggyback on salt minions
description:
- This allows you to use existing Saltstack infrastructure to connect to targets.
'''
import os
import base64
from ansible import errors
from ansible.plugins.connection import ConnectionBase
HAVE_SALTSTACK = False
try:
import salt.client as sc
HAVE_SALTSTACK = True
except ImportError:
pass
class Connection(ConnectionBase):
""" Salt-based connections """
has_pipelining = False
# while the name of the product is salt, naming that module salt cause
# trouble with module import
transport = 'community.general.saltstack'
def __init__(self, play_context, new_stdin, *args, **kwargs):
super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs)
self.host = self._play_context.remote_addr
def _connect(self):
if not HAVE_SALTSTACK:
raise errors.AnsibleError("saltstack is not installed")
self.client = sc.LocalClient()
self._connected = True
return self
def exec_command(self, cmd, sudoable=False, in_data=None):
""" run a command on the remote minion """
super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
if in_data:
raise errors.AnsibleError("Internal Error: this module does not support optimized module pipelining")
self._display.vvv("EXEC %s" % cmd, host=self.host)
# need to add 'true;' to work around https://github.com/saltstack/salt/issues/28077
res = self.client.cmd(self.host, 'cmd.exec_code_all', ['bash', 'true;' + cmd])
if self.host not in res:
raise errors.AnsibleError("Minion %s didn't answer, check if salt-minion is running and the name is correct" % self.host)
p = res[self.host]
return p['retcode'], p['stdout'], p['stderr']
@staticmethod
def _normalize_path(path, prefix):
if not path.startswith(os.path.sep):
path = os.path.join(os.path.sep, path)
normpath = os.path.normpath(path)
return os.path.join(prefix, normpath[1:])
def put_file(self, in_path, out_path):
""" transfer a file from local to remote """
super(Connection, self).put_file(in_path, out_path)
out_path = self._normalize_path(out_path, '/')
self._display.vvv("PUT %s TO %s" % (in_path, out_path), host=self.host)
with open(in_path, 'rb') as in_fh:
content = in_fh.read()
self.client.cmd(self.host, 'hashutil.base64_decodefile', [base64.b64encode(content), out_path])
# TODO test it
def fetch_file(self, in_path, out_path):
""" fetch a file from remote to local """
super(Connection, self).fetch_file(in_path, out_path)
in_path = self._normalize_path(in_path, '/')
self._display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self.host)
content = self.client.cmd(self.host, 'cp.get_file_str', [in_path])[self.host]
open(out_path, 'wb').write(content)
def close(self):
""" terminate the connection; nothing to do here """
pass
| [((49, 22, 49, 38), 'salt.client.LocalClient', 'sc.LocalClient', ({}, {}), '()', True, 'import salt.client as sc\n'), ((73, 19, 73, 41), 'os.path.normpath', 'os.path.normpath', ({(73, 36, 73, 40): 'path'}, {}), '(path)', False, 'import os\n'), ((74, 15, 74, 49), 'os.path.join', 'os.path.join', ({(74, 28, 74, 34): 'prefix', (74, 36, 74, 48): 'normpath[1:]'}, {}), '(prefix, normpath[1:])', False, 'import os\n'), ((47, 18, 47, 67), 'ansible.errors.AnsibleError', 'errors.AnsibleError', ({(47, 38, 47, 66): '"""saltstack is not installed"""'}, {}), "('saltstack is not installed')", False, 'from ansible import errors\n'), ((58, 18, 58, 113), 'ansible.errors.AnsibleError', 'errors.AnsibleError', ({(58, 38, 58, 112): '"""Internal Error: this module does not support optimized module pipelining"""'}, {}), "(\n 'Internal Error: this module does not support optimized module pipelining')", False, 'from ansible import errors\n'), ((64, 18, 64, 133), 'ansible.errors.AnsibleError', 'errors.AnsibleError', ({(64, 38, 64, 132): '("Minion %s didn\'t answer, check if salt-minion is running and the name is correct"\n % self.host)'}, {}), '(\n "Minion %s didn\'t answer, check if salt-minion is running and the name is correct"\n % self.host)', False, 'from ansible import errors\n'), ((72, 19, 72, 50), 'os.path.join', 'os.path.join', ({(72, 32, 72, 43): 'os.path.sep', (72, 45, 72, 49): 'path'}, {}), '(os.path.sep, path)', False, 'import os\n'), ((85, 66, 85, 91), 'base64.b64encode', 'base64.b64encode', ({(85, 83, 85, 90): 'content'}, {}), '(content)', False, 'import base64\n')] |
normaldotcom/webvirtmgr | create/views.py | 8d822cb94105abf82eb0ff6651a36c43b0911d2a | from django.shortcuts import render_to_response
from django.http import HttpResponseRedirect
from django.template import RequestContext
from django.utils.translation import ugettext_lazy as _
from servers.models import Compute
from create.models import Flavor
from instance.models import Instance
from libvirt import libvirtError
from vrtManager.create import wvmCreate
from vrtManager import util
from create.forms import FlavorAddForm, NewVMForm
def create(request, host_id):
"""
Create new instance.
"""
if not request.user.is_authenticated():
return HttpResponseRedirect('/login')
errors = []
compute = Compute.objects.get(id=host_id)
flavors = Flavor.objects.filter().order_by('id')
try:
conn = wvmCreate(compute.hostname,
compute.login,
compute.password,
compute.type)
storages = sorted(conn.get_storages())
networks = sorted(conn.get_networks())
instances = conn.get_instances()
get_images = sorted(conn.get_storages_images())
mac_auto = util.randomMAC()
except libvirtError as err:
errors.append(err.message)
if not storages:
msg = _("You haven't defined have any storage pools")
errors.append(msg)
if not networks:
msg = _("You haven't defined have any network pools")
errors.append(msg)
if request.method == 'POST':
if 'create_flavor' in request.POST:
form = FlavorAddForm(request.POST)
if form.is_valid():
data = form.cleaned_data
create_flavor = Flavor(label=data['label'],
vcpu=data['vcpu'],
memory=data['memory'],
disk=data['disk'])
create_flavor.save()
return HttpResponseRedirect(request.get_full_path())
if 'delete_flavor' in request.POST:
flavor_id = request.POST.get('flavor', '')
delete_flavor = Flavor.objects.get(id=flavor_id)
delete_flavor.delete()
return HttpResponseRedirect(request.get_full_path())
if 'create' in request.POST:
volumes = {}
form = NewVMForm(request.POST)
if form.is_valid():
data = form.cleaned_data
if instances:
if data['name'] in instances:
msg = _("A virtual machine with this name already exists")
errors.append(msg)
if not errors:
if data['hdd_size']:
if not data['mac']:
msg = _("No Virtual Machine MAC has been entered")
errors.append(msg)
else:
try:
path = conn.create_volume(data['storage'], data['name'], data['hdd_size'])
volumes[path] = conn.get_volume_type(path)
except libvirtError as msg_error:
errors.append(msg_error.message)
elif data['template']:
templ_path = conn.get_volume_path(data['template'])
clone_path = conn.clone_from_template(data['name'], templ_path)
volumes[clone_path] = conn.get_volume_type(clone_path)
else:
if not data['images']:
msg = _("First you need to create or select an image")
errors.append(msg)
else:
for vol in data['images'].split(','):
try:
path = conn.get_volume_path(vol)
volumes[path] = conn.get_volume_type(path)
except libvirtError as msg_error:
errors.append(msg_error.message)
if not errors:
uuid = util.randomUUID()
try:
conn.create_instance(data['name'], data['memory'], data['vcpu'], data['host_model'],
uuid, volumes, data['networks'], data['virtio'], data['mac'])
create_instance = Instance(compute_id=host_id, name=data['name'], uuid=uuid)
create_instance.save()
return HttpResponseRedirect('/instance/%s/%s/' % (host_id, data['name']))
except libvirtError as msg_error:
if data['hdd_size']:
conn.delete_volume(volumes.keys()[0])
errors.append(msg_error.message)
conn.close()
return render_to_response('create.html', locals(), context_instance=RequestContext(request))
| [((25, 14, 25, 45), 'servers.models.Compute.objects.get', 'Compute.objects.get', (), '', False, 'from servers.models import Compute\n'), ((22, 15, 22, 45), 'django.http.HttpResponseRedirect', 'HttpResponseRedirect', ({(22, 36, 22, 44): '"""/login"""'}, {}), "('/login')", False, 'from django.http import HttpResponseRedirect\n'), ((29, 15, 32, 38), 'vrtManager.create.wvmCreate', 'wvmCreate', ({(29, 25, 29, 41): 'compute.hostname', (30, 25, 30, 38): 'compute.login', (31, 25, 31, 41): 'compute.password', (32, 25, 32, 37): 'compute.type'}, {}), '(compute.hostname, compute.login, compute.password, compute.type)', False, 'from vrtManager.create import wvmCreate\n'), ((38, 19, 38, 35), 'vrtManager.util.randomMAC', 'util.randomMAC', ({}, {}), '()', False, 'from vrtManager import util\n'), ((43, 14, 43, 61), 'django.utils.translation.ugettext_lazy', '_', ({(43, 16, 43, 60): '"""You haven\'t defined have any storage pools"""'}, {}), '("You haven\'t defined have any storage pools")', True, 'from django.utils.translation import ugettext_lazy as _\n'), ((46, 14, 46, 61), 'django.utils.translation.ugettext_lazy', '_', ({(46, 16, 46, 60): '"""You haven\'t defined have any network pools"""'}, {}), '("You haven\'t defined have any network pools")', True, 'from django.utils.translation import ugettext_lazy as _\n'), ((26, 14, 26, 37), 'create.models.Flavor.objects.filter', 'Flavor.objects.filter', ({}, {}), '()', False, 'from create.models import Flavor\n'), ((51, 19, 51, 46), 'create.forms.FlavorAddForm', 'FlavorAddForm', ({(51, 33, 51, 45): 'request.POST'}, {}), '(request.POST)', False, 'from create.forms import FlavorAddForm, NewVMForm\n'), ((62, 28, 62, 60), 'create.models.Flavor.objects.get', 'Flavor.objects.get', (), '', False, 'from create.models import Flavor\n'), ((67, 19, 67, 42), 'create.forms.NewVMForm', 'NewVMForm', ({(67, 29, 67, 41): 'request.POST'}, {}), '(request.POST)', False, 'from create.forms import FlavorAddForm, NewVMForm\n'), ((114, 72, 114, 95), 'django.template.RequestContext', 'RequestContext', ({(114, 87, 114, 94): 'request'}, {}), '(request)', False, 'from django.template import RequestContext\n'), ((54, 32, 57, 57), 'create.models.Flavor', 'Flavor', (), '', False, 'from create.models import Flavor\n'), ((72, 30, 72, 82), 'django.utils.translation.ugettext_lazy', '_', ({(72, 32, 72, 81): '"""A virtual machine with this name already exists"""'}, {}), "('A virtual machine with this name already exists')", True, 'from django.utils.translation import ugettext_lazy as _\n'), ((101, 31, 101, 48), 'vrtManager.util.randomUUID', 'util.randomUUID', ({}, {}), '()', False, 'from vrtManager import util\n'), ((77, 34, 77, 78), 'django.utils.translation.ugettext_lazy', '_', ({(77, 36, 77, 77): '"""No Virtual Machine MAC has been entered"""'}, {}), "('No Virtual Machine MAC has been entered')", True, 'from django.utils.translation import ugettext_lazy as _\n'), ((105, 46, 105, 104), 'instance.models.Instance', 'Instance', (), '', False, 'from instance.models import Instance\n'), ((107, 35, 107, 101), 'django.http.HttpResponseRedirect', 'HttpResponseRedirect', ({(107, 56, 107, 100): "('/instance/%s/%s/' % (host_id, data['name']))"}, {}), "('/instance/%s/%s/' % (host_id, data['name']))", False, 'from django.http import HttpResponseRedirect\n'), ((91, 34, 91, 82), 'django.utils.translation.ugettext_lazy', '_', ({(91, 36, 91, 81): '"""First you need to create or select an image"""'}, {}), "('First you need to create or select an image')", True, 'from django.utils.translation import ugettext_lazy as _\n')] |
andimarafioti/GACELA | utils/wassersteinGradientPenalty.py | 34649fb01bdecbcb266db046a8b9c48c141f16e1 | import torch
__author__ = 'Andres'
def calc_gradient_penalty_bayes(discriminator, real_data, fake_data, gamma):
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
batch_size = real_data.size()[0]
alpha = torch.rand(batch_size, 1, 1, 1)
alpha = alpha.expand(real_data.size()).to(device)
interpolates = alpha * real_data + ((1 - alpha) * fake_data)
interpolates = torch.autograd.Variable(interpolates, requires_grad=True).to(device)
disc_interpolates = discriminator(interpolates)
gradients = torch.autograd.grad(outputs=disc_interpolates, inputs=interpolates,
grad_outputs=torch.ones(disc_interpolates.size()).to(device),
create_graph=True, retain_graph=True, only_inputs=True)[0]
gradient_penalty = ((gradients.norm(2) - 1) ** 2) * gamma
return gradient_penalty | [((9, 12, 9, 43), 'torch.rand', 'torch.rand', ({(9, 23, 9, 33): 'batch_size', (9, 35, 9, 36): '1', (9, 38, 9, 39): '1', (9, 41, 9, 42): '1'}, {}), '(batch_size, 1, 1, 1)', False, 'import torch\n'), ((6, 36, 6, 61), 'torch.cuda.is_available', 'torch.cuda.is_available', ({}, {}), '()', False, 'import torch\n'), ((13, 19, 13, 76), 'torch.autograd.Variable', 'torch.autograd.Variable', (), '', False, 'import torch\n')] |
butla/experiments | pytest_capture_log_error/test_file.py | 8c8ade15bb01978763d6618342fa42ad7563e38f | import a_file
def test_a(capsys):
assert a_file.bla() == 5
assert a_file.LOG_MESSAGE in capsys.readouterr().err
| [((4, 11, 4, 23), 'a_file.bla', 'a_file.bla', ({}, {}), '()', False, 'import a_file\n')] |
Magier/Aetia | src_py/ui/identify_page.py | 7f6045d99904b808e1201f445d0d10b0dce54c37 | import streamlit as st
from ui.session_state import SessionState, get_state
from infer import ModelStage
def show(state: SessionState):
st.header("identify")
state = get_state()
if state.model.stage < ModelStage.DEFINED:
st.error("Please create the model first!")
| [((7, 4, 7, 25), 'streamlit.header', 'st.header', ({(7, 14, 7, 24): '"""identify"""'}, {}), "('identify')", True, 'import streamlit as st\n'), ((8, 12, 8, 23), 'ui.session_state.get_state', 'get_state', ({}, {}), '()', False, 'from ui.session_state import SessionState, get_state\n'), ((10, 8, 10, 50), 'streamlit.error', 'st.error', ({(10, 17, 10, 49): '"""Please create the model first!"""'}, {}), "('Please create the model first!')", True, 'import streamlit as st\n')] |
luofeisg/OpenKE-PuTransE | openke/data/UniverseTrainDataLoader.py | 0bfefb3917e7479520917febd91a9f4d7353c7fc | '''
MIT License
Copyright (c) 2020 Rashid Lafraie
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
import os
import ctypes
import numpy as np
from .TrainDataLoader import TrainDataLoader
class UniverseTrainDataLoader(TrainDataLoader):
def __init__(self, in_path="./", batch_size=None, nbatches=None, threads=8, sampling_mode="normal", bern_flag=0,
filter_flag=1, neg_ent=1, neg_rel=0, initial_random_seed=2):
super(UniverseTrainDataLoader, self).__init__(in_path=in_path, batch_size=batch_size, nbatches=nbatches,
threads=threads, sampling_mode=sampling_mode, bern_flag=bern_flag,
filter_flag=filter_flag, neg_ent=neg_ent, neg_rel=neg_rel,
initial_random_seed=initial_random_seed)
self.entity_total_universe = 0
self.relation_total_universe = 0
self.train_total_universe = 0
"""argtypes"""
self.lib.sampling.argtypes = [
ctypes.c_void_p,
ctypes.c_void_p,
ctypes.c_void_p,
ctypes.c_void_p,
ctypes.c_int64,
ctypes.c_int64,
ctypes.c_int64,
ctypes.c_int64,
ctypes.c_int64,
ctypes.c_int64,
ctypes.c_int64
]
self.lib.getParallelUniverse.argtypes = [
ctypes.c_int64,
ctypes.c_float,
ctypes.c_int64
]
self.lib.getEntityRemapping.argtypes = [
ctypes.c_void_p
]
self.lib.getRelationRemapping.argtypes = [
ctypes.c_void_p
]
self.lib.getEntityTotalUniverse.restype = ctypes.c_int64
self.lib.getRelationTotalUniverse.restype = ctypes.c_int64
self.lib.getTrainTotalUniverse.restype = ctypes.c_int64
def swap_helpers(self):
self.lib.swapHelpers()
def reset_universe(self):
self.lib.resetUniverse()
self.set_nbatches(self.lib.getTrainTotal, self.nbatches)
def get_universe_mappings(self):
entity_remapping = np.zeros(self.entity_total_universe, dtype=np.int64)
relation_remapping = np.zeros(self.relation_total_universe, dtype=np.int64)
entity_remapping_addr = entity_remapping.__array_interface__["data"][0]
relation_remapping_addr = relation_remapping.__array_interface__["data"][0]
self.lib.getEntityRemapping(entity_remapping_addr)
self.lib.getRelationRemapping(relation_remapping_addr)
return entity_remapping, relation_remapping
def compile_universe_dataset(self, triple_constraint, balance_param, relation_in_focus):
self.lib.getParallelUniverse(triple_constraint, balance_param, relation_in_focus)
self.entity_total_universe = self.lib.getEntityTotalUniverse()
self.relation_total_universe = self.lib.getRelationTotalUniverse()
self.train_total_universe = self.lib.getTrainTotalUniverse()
self.set_nbatches(self.train_total_universe, self.nbatches)
| [((84, 27, 84, 79), 'numpy.zeros', 'np.zeros', (), '', True, 'import numpy as np\n'), ((85, 29, 85, 83), 'numpy.zeros', 'np.zeros', (), '', True, 'import numpy as np\n')] |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.