repo_name
stringlengths
7
94
repo_path
stringlengths
4
237
repo_head_hexsha
stringlengths
40
40
content
stringlengths
10
680k
apis
stringlengths
2
840k
louisenje/Pitches
app/auth/__init__.py
11248906ffa7d1b6ed6c47db0c5e7bd3b4768825
from flask import Blueprint auth=Blueprint('auth',__name__) from .import views,forms
[((2, 5, 2, 31), 'flask.Blueprint', 'Blueprint', ({(2, 15, 2, 21): '"""auth"""', (2, 22, 2, 30): '__name__'}, {}), "('auth', __name__)", False, 'from flask import Blueprint\n')]
Rono-Barto-Co/Project-QR
forms/QRGenerator.py
e80fc5a41f25542038c090311844912790cb1478
from flask_wtf import FlaskForm from wtforms import StringField, SubmitField, SelectField from wtforms.validators import DataRequired class QRGenerator(FlaskForm): code_content = StringField('Content', validators=[DataRequired()]) code_size = SelectField('Size', choices=[('15', 'Size'), ('5', '5'), ('10', '10'), ('15', '15'), ('20', '20'), ('25', '25'), ('30', '30')]) code_color = SelectField('Colour', choices=[('white', 'Colour'), ("white", "White"), ('yellow', "Yellow"), ('lime', "Green"), ("#ffa500", "Orange")]) code_correction = SelectField('Error Correction', choices=[("H", "Error Correction"), ("H", "H"), ("L", "L"), ("M", "M"), ("Q", "Q")]) code_image = StringField('Image URL') generate_code = SubmitField('Generate QR Code')
[((8, 16, 14, 59), 'wtforms.SelectField', 'SelectField', (), '', False, 'from wtforms import StringField, SubmitField, SelectField\n'), ((15, 17, 19, 71), 'wtforms.SelectField', 'SelectField', (), '', False, 'from wtforms import StringField, SubmitField, SelectField\n'), ((20, 22, 24, 75), 'wtforms.SelectField', 'SelectField', (), '', False, 'from wtforms import StringField, SubmitField, SelectField\n'), ((25, 17, 25, 41), 'wtforms.StringField', 'StringField', ({(25, 29, 25, 40): '"""Image URL"""'}, {}), "('Image URL')", False, 'from wtforms import StringField, SubmitField, SelectField\n'), ((26, 20, 26, 51), 'wtforms.SubmitField', 'SubmitField', ({(26, 32, 26, 50): '"""Generate QR Code"""'}, {}), "('Generate QR Code')", False, 'from wtforms import StringField, SubmitField, SelectField\n'), ((7, 54, 7, 68), 'wtforms.validators.DataRequired', 'DataRequired', ({}, {}), '()', False, 'from wtforms.validators import DataRequired\n')]
jcrangel/AI-for-Trading
Quiz/m2_advanced_quants/l5_volatility/volatility_estimation.py
c3b865e992f8eb8deda91e7641428eef1d343636
import pandas as pd import numpy as np def estimate_volatility(prices, l): """Create an exponential moving average model of the volatility of a stock price, and return the most recent (last) volatility estimate. Parameters ---------- prices : pandas.Series A series of adjusted closing prices for a stock. l : float The 'lambda' parameter of the exponential moving average model. Making this value smaller will cause the model to weight older terms less relative to more recent terms. Returns ------- last_vol : float The last element of your exponential moving averge volatility model series. """ # TODO: Implement the exponential moving average volatility model and return the last value. return prices.ewm(alpha=(1-l)).mean()[-1] def test_run(filename='data.csv'): """Test run get_most_volatile() with stock prices from a file.""" prices = pd.read_csv(filename, parse_dates=[ 'date'], index_col='date', squeeze=True) print("Most recent volatility estimate: {:.6f}".format(estimate_volatility(prices, 0.7))) # print(estimate_volatility(prices, 0.7)) if __name__ == '__main__': test_run()
[((32, 13, 33, 65), 'pandas.read_csv', 'pd.read_csv', (), '', True, 'import pandas as pd\n')]
kagemeka/atcoder-submissions
jp.atcoder/abc122/abc122_c/9516079.py
91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e
import sys n, q = map(int, sys.stdin.readline().split()) s = '$' + sys.stdin.readline().rstrip() lr = zip(*[map(int, sys.stdin.read().split())] * 2) def main(): res = [None] * (n + 1); res[0] = 0 prev = '$' for i in range(1, n+1): res[i] = res[i-1] res[i] += (prev == 'A' and s[i] == 'C') & 1 prev = s[i] for l, r in lr: yield res[r] - res[l] if __name__ == '__main__': ans = main() print(*ans, sep='\n')
[((3, 16, 3, 36), 'sys.stdin.readline', 'sys.stdin.readline', ({}, {}), '()', False, 'import sys\n'), ((4, 10, 4, 30), 'sys.stdin.readline', 'sys.stdin.readline', ({}, {}), '()', False, 'import sys\n'), ((5, 20, 5, 36), 'sys.stdin.read', 'sys.stdin.read', ({}, {}), '()', False, 'import sys\n')]
gokulsg/Attention-is-all-you-need-implementation-from-scratch
Decoder.py
f5eb591d169cbef3ef8b066d8d462fee11badc3b
import torch import torch.nn as nn from DecoderLayer import DecoderLayer import math class Decoder(nn.Module): def __init__(self, output_dim, embed_dim, num_layers, num_heads, expand_dim, dropout, device, max_length = 30): super().__init__() self.tok_embedding = nn.Embedding(output_dim, embed_dim) #self.pos_embedding = nn.Embedding(max_length, embed_dim) self.pos_embedding = nn.Embedding.from_pretrained(self.get_positional_encoding(max_length, embed_dim)) self.layers = nn.ModuleList([DecoderLayer(embed_dim, num_heads, expand_dim, dropout) for _ in range(num_layers)]) self.fc_out = nn.Linear(embed_dim, output_dim) self.dropout = nn.Dropout(dropout) self.scale = torch.sqrt(torch.FloatTensor([embed_dim])).to(device) self.device = device def forward(self, trg, enc_src, trg_mask, src_mask): #trg = [batch size, trg len] #enc_src = [batch size, src len, embed dim] #trg_mask = [batch size, 1, trg len, trg len] #src_mask = [batch size, 1, 1, src len] batch_size = trg.shape[0] trg_len = trg.shape[1] pos = torch.arange(0, trg_len).unsqueeze(0).repeat(batch_size, 1).to(self.device) #pos = [batch size, trg len] trg = self.dropout((self.tok_embedding(trg) * self.scale) + self.pos_embedding(pos)) #trg = [batch size, trg len, embed dim] for layer in self.layers: trg = layer(trg, enc_src, trg_mask, src_mask) #trg = [batch size, trg len, embed dim] output = self.fc_out(trg) #output = [batch size, trg len, output dim] return output def get_positional_encoding(self, max_seq_len, embed_dim): pos_enc = torch.zeros(max_seq_len, embed_dim) position = torch.arange(0, max_seq_len).unsqueeze(1) div_term = torch.exp(torch.arange(0, embed_dim, 2) * (-math.log(10000.0) / embed_dim)) pos_enc[:, 0::2] = torch.sin(position * div_term) pos_enc[:, 1::2] = torch.cos(position * div_term) return pos_enc
[((9, 29, 9, 64), 'torch.nn.Embedding', 'nn.Embedding', ({(9, 42, 9, 52): 'output_dim', (9, 54, 9, 63): 'embed_dim'}, {}), '(output_dim, embed_dim)', True, 'import torch.nn as nn\n'), ((13, 22, 13, 54), 'torch.nn.Linear', 'nn.Linear', ({(13, 32, 13, 41): 'embed_dim', (13, 43, 13, 53): 'output_dim'}, {}), '(embed_dim, output_dim)', True, 'import torch.nn as nn\n'), ((14, 23, 14, 42), 'torch.nn.Dropout', 'nn.Dropout', ({(14, 34, 14, 41): 'dropout'}, {}), '(dropout)', True, 'import torch.nn as nn\n'), ((47, 18, 47, 53), 'torch.zeros', 'torch.zeros', ({(47, 30, 47, 41): 'max_seq_len', (47, 43, 47, 52): 'embed_dim'}, {}), '(max_seq_len, embed_dim)', False, 'import torch\n'), ((51, 27, 51, 57), 'torch.sin', 'torch.sin', ({(51, 37, 51, 56): 'position * div_term'}, {}), '(position * div_term)', False, 'import torch\n'), ((52, 27, 52, 57), 'torch.cos', 'torch.cos', ({(52, 37, 52, 56): 'position * div_term'}, {}), '(position * div_term)', False, 'import torch\n'), ((12, 37, 12, 92), 'DecoderLayer.DecoderLayer', 'DecoderLayer', ({(12, 50, 12, 59): 'embed_dim', (12, 61, 12, 70): 'num_heads', (12, 72, 12, 82): 'expand_dim', (12, 84, 12, 91): 'dropout'}, {}), '(embed_dim, num_heads, expand_dim, dropout)', False, 'from DecoderLayer import DecoderLayer\n'), ((48, 19, 48, 47), 'torch.arange', 'torch.arange', ({(48, 32, 48, 33): '0', (48, 35, 48, 46): 'max_seq_len'}, {}), '(0, max_seq_len)', False, 'import torch\n'), ((49, 29, 49, 58), 'torch.arange', 'torch.arange', ({(49, 42, 49, 43): '0', (49, 45, 49, 54): 'embed_dim', (49, 56, 49, 57): '2'}, {}), '(0, embed_dim, 2)', False, 'import torch\n'), ((15, 32, 15, 62), 'torch.FloatTensor', 'torch.FloatTensor', ({(15, 50, 15, 61): '[embed_dim]'}, {}), '([embed_dim])', False, 'import torch\n'), ((49, 63, 49, 80), 'math.log', 'math.log', ({(49, 72, 49, 79): '10000.0'}, {}), '(10000.0)', False, 'import math\n'), ((27, 14, 27, 38), 'torch.arange', 'torch.arange', ({(27, 27, 27, 28): '0', (27, 30, 27, 37): 'trg_len'}, {}), '(0, trg_len)', False, 'import torch\n')]
babs/salt
salt/grains/nxos.py
c536ea716d5308880b244e7980f4b659d86fc104
""" Grains for Cisco NX-OS minions .. versionadded:: 2016.11.0 For documentation on setting up the nxos proxy minion look in the documentation for :mod:`salt.proxy.nxos<salt.proxy.nxos>`. """ import logging import salt.utils.nxos import salt.utils.platform from salt.exceptions import NxosClientError log = logging.getLogger(__name__) __proxyenabled__ = ["nxos"] __virtualname__ = "nxos" def __virtual__(): try: salt.utils.nxos.version_info() except NxosClientError as err: return False, err return __virtualname__ def system_information(proxy=None): if salt.utils.platform.is_proxy(): if proxy is None: return {} if proxy["nxos.initialized"]() is False: return {} return {"nxos": proxy["nxos.grains"]()} else: data = salt.utils.nxos.version_info() return salt.utils.nxos.system_info(data)
[((16, 6, 16, 33), 'logging.getLogger', 'logging.getLogger', ({(16, 24, 16, 32): '__name__'}, {}), '(__name__)', False, 'import logging\n')]
tvip/tmsproviderapisdk
tmsproviderapisdk/tms_device.py
f385ddb0d7e87e7a62d1caef3e2c9769e844a4a1
from typing import List, Optional, Tuple from tmsproviderapisdk.tms_extended_model import TmsExtendedModel class TmsDevice(TmsExtendedModel): _path_url = "/devices/" def __init__(self, unique_id: str, account: int, device_id: int = None, ipaddr: str = None, mac: str = None, remote_custom_field: str = None, comment: str = None, last_online: str = None, last_fw_ver: str = None, first_online: str = None, use_nat: bool = False, operation_system: str = None, udpxy_addr: str = None, device_type: int = None, provider: int = None): self.unique_id = unique_id self.account = account self.id = device_id self.ipaddr = ipaddr self.mac = mac self.remote_custom_field = remote_custom_field self.comment = comment self.last_online = last_online self.last_fw_ver = last_fw_ver self.first_online = first_online self.use_nat = use_nat self.operation_system = operation_system self.udpxy_addr = udpxy_addr self.device_type = device_type self.provider = provider @staticmethod def _dict_to_object(device_dict: dict) -> object: device = TmsDevice( unique_id=device_dict["unique_id"], device_id=device_dict["id"], ipaddr=device_dict["ipaddr"], mac=device_dict["mac"], remote_custom_field=device_dict["remote_custom_field"], comment=device_dict["comment"], last_online=device_dict["last_online"], last_fw_ver=device_dict["last_fw_ver"], first_online=device_dict["first_online"], use_nat=device_dict["use_nat"], operation_system=device_dict["operation_system"], udpxy_addr=device_dict["udpxy_addr"], device_type=device_dict["device_type"], provider=device_dict["provider"], account=device_dict["account"] ) return device @classmethod def get_list(cls, account: int = None, device_type: int = None, limit: int = 50, provider: int = None, quick_search: str = "", remote_custom_field: str = None, sort: str = "", start: int = 0, unique_id: str = "") -> Optional[Tuple[List[object], int]]: devices = super().get_list(start=start, limit=limit, account=account, device_type=device_type, provider=provider, quick_search=quick_search, remote_custom_field=remote_custom_field, sort=sort, unique_id=unique_id) return devices def __str__(self): return """id:{}, ipaddr:{}, mac:{}, unique_id:{}, remote_custom_field: {}, comment: {}, last_online: {}, \ last_fw_ver: {}, first_online: {}, use_nat: {}, operation_system: {}, \ udpxy_addr: {}, device_type: {}, provider: {}, account: {}""".format( self.id, self.ipaddr, self.mac, self.unique_id, self.remote_custom_field, self.comment, self.last_online, self.last_fw_ver, self.first_online, self.use_nat, self.operation_system, self.udpxy_addr, self.device_type, self.provider, self.account )
[]
eddiejessup/fealty
fealty/fields.py
03745eb98d85bc2a5d08920773ab9c4515462d30
""" A class hierarchy relating to fields of all kinds. """ from __future__ import print_function, division import numpy as np from ciabatta.meta import make_repr_str from fealty import lattice, field_numerics, walled_field_numerics class Space(object): def __init__(self, L, dim): self.L = L self.dim = dim @property def L_half(self): return self.L / 2.0 @property def A(self): return self.L ** self.dim def iterate(self, *args, **kwargs): pass def __repr__(self): fs = [('L', self.L), ('dim', self.dim)] return make_repr_str(self, fs) class Field(Space): def __init__(self, L, dim, dx): Space.__init__(self, L, dim) self.M = int(round(self.L / dx)) @property def dx(self): return self.L / self.M @property def A_i(self): return self.M ** self.dim @property def dA(self): return self.dx ** self.dim def density_field(self, r): return density(r, self.L, self.dx) def r_to_i(self, r): return lattice.r_to_i(r, self.L, self.dx) def i_to_r(self, i): return lattice.i_to_r(i, self.L, self.dx) def __repr__(self): fs = [('L', self.L), ('dim', self.dim), ('dx', self.dx)] return make_repr_str(self, fs) class Scalar(Field): def __init__(self, L, dim, dx, a_0=0.0): Field.__init__(self, L, dim, dx) self.a = np.ones(self.dim * (self.M,), dtype=np.float) * a_0 def grad(self): return _grad(self.a, self.dx) def grad_i(self, r): return _grad_i(self.a, self.r_to_i(r), self.dx) def laplacian(self): return _laplace(self.a, self.dx) def __repr__(self): fs = [('L', self.L), ('dim', self.dim), ('dx', self.dx), ('a_0', self.a_0)] return make_repr_str(self, fs) class Diffusing(Scalar): def __init__(self, L, dim, dx, D, dt, a_0=0.0): Scalar.__init__(self, L, dim, dx, a_0=a_0) self.D = D self.dt = dt if self.D > self.dx ** 2 / (2.0 * self.dim * self.dt): raise Exception('Unstable diffusion constant') def iterate(self): self.a += self.D * self.laplacian() * self.dt def __repr__(self): fs = [('L', self.L), ('dim', self.dim), ('dx', self.dx), ('D', self.D), ('dt', self.dt), ('a_0', self.a_0)] return make_repr_str(self, fs) class WalledScalar(Scalar): def __init__(self, L, dim, dx, walls, a_0=0.0): Scalar.__init__(self, L, dim, dx, a_0=a_0) self.walls = walls # Make field zero-valued where obstructed self.a *= np.logical_not(self.walls) def grad(self): return _walled_grad(self.a, self.dx, self.walls) def grad_i(self, r): return _walled_grad_i(self.a, self.r_to_i(r), self.dx, self.walls) def laplacian(self): return _walled_laplace(self.a, self.dx, self.walls) def __repr__(self): fs = [('L', self.L), ('dim', self.dim), ('dx', self.dx), ('walls', self.walls), ('a_0', self.a_0)] return make_repr_str(self, fs) # Note, inheritance order matters to get walled grad & laplacian call # (see diamond problem on wikipedia and how python handles it) class WalledDiffusing(WalledScalar, Diffusing): def __init__(self, L, dim, dx, walls, D, dt, a_0=0.0): Diffusing.__init__(self, L, dim, dx, D, dt, a_0=a_0) WalledScalar.__init__(self, L, dim, dx, walls, a_0=a_0) def __repr__(self): fs = [('L', self.L), ('dim', self.dim), ('dx', self.dx), ('walls', self.walls), ('D', self.D), ('dt', self.dt), ('a_0', self.a_0)] return make_repr_str(self, fs) def density(r, L, dx): assert r.ndim == 2 M = int(round(L / dx)) dx = L / M inds = lattice.r_to_i(r, L, dx) f = np.zeros(r.shape[1] * (M,), dtype=np.int) if f.ndim == 1: field_numerics.density_1d(inds, f) elif f.ndim == 2: field_numerics.density_2d(inds, f) elif f.ndim == 3: field_numerics.density_3d(inds, f) else: raise Exception('Density calc not implemented in this dimension') return f / dx ** r.shape[1] def _laplace(field, dx): assert dx > 0.0 laplace = np.empty_like(field) if field.ndim == 1: field_numerics.laplace_1d(field, laplace, dx) elif field.ndim == 2: field_numerics.laplace_2d(field, laplace, dx) elif field.ndim == 3: field_numerics.laplace_3d(field, laplace, dx) else: raise Exception('Laplacian not implemented in this dimension') return laplace def _grad_i(field, inds, dx): assert dx > 0.0 assert inds.ndim == 2 assert field.ndim == inds.shape[1] grad_i = np.empty(inds.shape, dtype=field.dtype) if field.ndim == 1: field_numerics.grad_i_1d(field, inds, grad_i, dx) elif field.ndim == 2: field_numerics.grad_i_2d(field, inds, grad_i, dx) elif field.ndim == 3: field_numerics.grad_i_3d(field, grad_i, dx) else: raise Exception("Grad_i not implemented in this dimension") return grad_i def _grad(field, dx): assert dx > 0.0 grad = np.empty(field.shape + (field.ndim,), dtype=field.dtype) if field.ndim == 1: field_numerics.grad_1d(field, grad, dx) elif field.ndim == 2: field_numerics.grad_2d(field, grad, dx) elif field.ndim == 3: field_numerics.grad_3d(field, grad, dx) else: raise Exception('Grad not implemented in this dimension') return grad def _div(field, dx): assert dx > 0.0 div = np.empty(field.shape[:-1], dtype=field.dtype) if field.ndim == 2: field_numerics.div_1d(field, div, dx) elif field.ndim == 3: field_numerics.div_2d(field, div, dx) elif field.ndim == 4: field_numerics.div_3d(field, div, dx) else: raise Exception('Divergence not implemented in this dimension') return div def _walled_grad(field, dx, walls): assert field.shape == walls.shape assert dx > 0.0 grad = np.empty(field.shape + (field.ndim,), dtype=field.dtype) if field.ndim == 1: walled_field_numerics.grad_1d(field, grad, dx, walls) elif field.ndim == 2: walled_field_numerics.grad_2d(field, grad, dx, walls) elif field.ndim == 3: walled_field_numerics.grad_3d(field, grad, dx, walls) else: raise Exception("Walled grad not implemented in this dimension") return grad def _walled_grad_i(field, inds, dx, walls): assert field.shape == walls.shape assert dx > 0.0 assert inds.ndim == 2 assert field.ndim == inds.shape[1] grad_i = np.empty(inds.shape, dtype=field.dtype) if field.ndim == 1: walled_field_numerics.grad_i_1d(field, inds, grad_i, dx, walls) elif field.ndim == 2: walled_field_numerics.grad_i_2d(field, inds, grad_i, dx, walls) elif field.ndim == 3: walled_field_numerics.grad_i_3d(field, inds, grad_i, dx, walls) else: raise Exception("Walled Grad_i not implemented in this dimension") return grad_i def _walled_laplace(field, dx, walls): assert field.shape == walls.shape assert dx > 0.0 laplace = np.empty_like(field) if field.ndim == 1: walled_field_numerics.laplace_1d(field, laplace, dx, walls) elif field.ndim == 2: walled_field_numerics.laplace_2d(field, laplace, dx, walls) elif field.ndim == 3: walled_field_numerics.laplace_3d(field, laplace, dx, walls) else: raise Exception('Laplacian not implemented in this dimension') return laplace
[((147, 11, 147, 35), 'fealty.lattice.r_to_i', 'lattice.r_to_i', ({(147, 26, 147, 27): 'r', (147, 29, 147, 30): 'L', (147, 32, 147, 34): 'dx'}, {}), '(r, L, dx)', False, 'from fealty import lattice, field_numerics, walled_field_numerics\n'), ((148, 8, 148, 49), 'numpy.zeros', 'np.zeros', (), '', True, 'import numpy as np\n'), ((162, 14, 162, 34), 'numpy.empty_like', 'np.empty_like', ({(162, 28, 162, 33): 'field'}, {}), '(field)', True, 'import numpy as np\n'), ((178, 13, 178, 52), 'numpy.empty', 'np.empty', (), '', True, 'import numpy as np\n'), ((192, 11, 192, 67), 'numpy.empty', 'np.empty', (), '', True, 'import numpy as np\n'), ((206, 10, 206, 55), 'numpy.empty', 'np.empty', (), '', True, 'import numpy as np\n'), ((221, 11, 221, 67), 'numpy.empty', 'np.empty', (), '', True, 'import numpy as np\n'), ((238, 13, 238, 52), 'numpy.empty', 'np.empty', (), '', True, 'import numpy as np\n'), ((253, 14, 253, 34), 'numpy.empty_like', 'np.empty_like', ({(253, 28, 253, 33): 'field'}, {}), '(field)', True, 'import numpy as np\n'), ((29, 15, 29, 38), 'ciabatta.meta.make_repr_str', 'make_repr_str', ({(29, 29, 29, 33): 'self', (29, 35, 29, 37): 'fs'}, {}), '(self, fs)', False, 'from ciabatta.meta import make_repr_str\n'), ((54, 15, 54, 49), 'fealty.lattice.r_to_i', 'lattice.r_to_i', ({(54, 30, 54, 31): 'r', (54, 33, 54, 39): 'self.L', (54, 41, 54, 48): 'self.dx'}, {}), '(r, self.L, self.dx)', False, 'from fealty import lattice, field_numerics, walled_field_numerics\n'), ((57, 15, 57, 49), 'fealty.lattice.i_to_r', 'lattice.i_to_r', ({(57, 30, 57, 31): 'i', (57, 33, 57, 39): 'self.L', (57, 41, 57, 48): 'self.dx'}, {}), '(i, self.L, self.dx)', False, 'from fealty import lattice, field_numerics, walled_field_numerics\n'), ((61, 15, 61, 38), 'ciabatta.meta.make_repr_str', 'make_repr_str', ({(61, 29, 61, 33): 'self', (61, 35, 61, 37): 'fs'}, {}), '(self, fs)', False, 'from ciabatta.meta import make_repr_str\n'), ((82, 15, 82, 38), 'ciabatta.meta.make_repr_str', 'make_repr_str', ({(82, 29, 82, 33): 'self', (82, 35, 82, 37): 'fs'}, {}), '(self, fs)', False, 'from ciabatta.meta import make_repr_str\n'), ((101, 15, 101, 38), 'ciabatta.meta.make_repr_str', 'make_repr_str', ({(101, 29, 101, 33): 'self', (101, 35, 101, 37): 'fs'}, {}), '(self, fs)', False, 'from ciabatta.meta import make_repr_str\n'), ((110, 18, 110, 44), 'numpy.logical_not', 'np.logical_not', ({(110, 33, 110, 43): 'self.walls'}, {}), '(self.walls)', True, 'import numpy as np\n'), ((125, 15, 125, 38), 'ciabatta.meta.make_repr_str', 'make_repr_str', ({(125, 29, 125, 33): 'self', (125, 35, 125, 37): 'fs'}, {}), '(self, fs)', False, 'from ciabatta.meta import make_repr_str\n'), ((140, 15, 140, 38), 'ciabatta.meta.make_repr_str', 'make_repr_str', ({(140, 29, 140, 33): 'self', (140, 35, 140, 37): 'fs'}, {}), '(self, fs)', False, 'from ciabatta.meta import make_repr_str\n'), ((150, 8, 150, 42), 'fealty.field_numerics.density_1d', 'field_numerics.density_1d', ({(150, 34, 150, 38): 'inds', (150, 40, 150, 41): 'f'}, {}), '(inds, f)', False, 'from fealty import lattice, field_numerics, walled_field_numerics\n'), ((164, 8, 164, 53), 'fealty.field_numerics.laplace_1d', 'field_numerics.laplace_1d', ({(164, 34, 164, 39): 'field', (164, 41, 164, 48): 'laplace', (164, 50, 164, 52): 'dx'}, {}), '(field, laplace, dx)', False, 'from fealty import lattice, field_numerics, walled_field_numerics\n'), ((180, 8, 180, 57), 'fealty.field_numerics.grad_i_1d', 'field_numerics.grad_i_1d', ({(180, 33, 180, 38): 'field', (180, 40, 180, 44): 'inds', (180, 46, 180, 52): 'grad_i', (180, 54, 180, 56): 'dx'}, {}), '(field, inds, grad_i, dx)', False, 'from fealty import lattice, field_numerics, walled_field_numerics\n'), ((194, 8, 194, 47), 'fealty.field_numerics.grad_1d', 'field_numerics.grad_1d', ({(194, 31, 194, 36): 'field', (194, 38, 194, 42): 'grad', (194, 44, 194, 46): 'dx'}, {}), '(field, grad, dx)', False, 'from fealty import lattice, field_numerics, walled_field_numerics\n'), ((208, 8, 208, 45), 'fealty.field_numerics.div_1d', 'field_numerics.div_1d', ({(208, 30, 208, 35): 'field', (208, 37, 208, 40): 'div', (208, 42, 208, 44): 'dx'}, {}), '(field, div, dx)', False, 'from fealty import lattice, field_numerics, walled_field_numerics\n'), ((223, 8, 223, 61), 'fealty.walled_field_numerics.grad_1d', 'walled_field_numerics.grad_1d', ({(223, 38, 223, 43): 'field', (223, 45, 223, 49): 'grad', (223, 51, 223, 53): 'dx', (223, 55, 223, 60): 'walls'}, {}), '(field, grad, dx, walls)', False, 'from fealty import lattice, field_numerics, walled_field_numerics\n'), ((240, 8, 240, 71), 'fealty.walled_field_numerics.grad_i_1d', 'walled_field_numerics.grad_i_1d', ({(240, 40, 240, 45): 'field', (240, 47, 240, 51): 'inds', (240, 53, 240, 59): 'grad_i', (240, 61, 240, 63): 'dx', (240, 65, 240, 70): 'walls'}, {}), '(field, inds, grad_i, dx, walls)', False, 'from fealty import lattice, field_numerics, walled_field_numerics\n'), ((255, 8, 255, 67), 'fealty.walled_field_numerics.laplace_1d', 'walled_field_numerics.laplace_1d', ({(255, 41, 255, 46): 'field', (255, 48, 255, 55): 'laplace', (255, 57, 255, 59): 'dx', (255, 61, 255, 66): 'walls'}, {}), '(field, laplace, dx, walls)', False, 'from fealty import lattice, field_numerics, walled_field_numerics\n'), ((68, 17, 68, 62), 'numpy.ones', 'np.ones', (), '', True, 'import numpy as np\n'), ((152, 8, 152, 42), 'fealty.field_numerics.density_2d', 'field_numerics.density_2d', ({(152, 34, 152, 38): 'inds', (152, 40, 152, 41): 'f'}, {}), '(inds, f)', False, 'from fealty import lattice, field_numerics, walled_field_numerics\n'), ((166, 8, 166, 53), 'fealty.field_numerics.laplace_2d', 'field_numerics.laplace_2d', ({(166, 34, 166, 39): 'field', (166, 41, 166, 48): 'laplace', (166, 50, 166, 52): 'dx'}, {}), '(field, laplace, dx)', False, 'from fealty import lattice, field_numerics, walled_field_numerics\n'), ((182, 8, 182, 57), 'fealty.field_numerics.grad_i_2d', 'field_numerics.grad_i_2d', ({(182, 33, 182, 38): 'field', (182, 40, 182, 44): 'inds', (182, 46, 182, 52): 'grad_i', (182, 54, 182, 56): 'dx'}, {}), '(field, inds, grad_i, dx)', False, 'from fealty import lattice, field_numerics, walled_field_numerics\n'), ((196, 8, 196, 47), 'fealty.field_numerics.grad_2d', 'field_numerics.grad_2d', ({(196, 31, 196, 36): 'field', (196, 38, 196, 42): 'grad', (196, 44, 196, 46): 'dx'}, {}), '(field, grad, dx)', False, 'from fealty import lattice, field_numerics, walled_field_numerics\n'), ((210, 8, 210, 45), 'fealty.field_numerics.div_2d', 'field_numerics.div_2d', ({(210, 30, 210, 35): 'field', (210, 37, 210, 40): 'div', (210, 42, 210, 44): 'dx'}, {}), '(field, div, dx)', False, 'from fealty import lattice, field_numerics, walled_field_numerics\n'), ((225, 8, 225, 61), 'fealty.walled_field_numerics.grad_2d', 'walled_field_numerics.grad_2d', ({(225, 38, 225, 43): 'field', (225, 45, 225, 49): 'grad', (225, 51, 225, 53): 'dx', (225, 55, 225, 60): 'walls'}, {}), '(field, grad, dx, walls)', False, 'from fealty import lattice, field_numerics, walled_field_numerics\n'), ((242, 8, 242, 71), 'fealty.walled_field_numerics.grad_i_2d', 'walled_field_numerics.grad_i_2d', ({(242, 40, 242, 45): 'field', (242, 47, 242, 51): 'inds', (242, 53, 242, 59): 'grad_i', (242, 61, 242, 63): 'dx', (242, 65, 242, 70): 'walls'}, {}), '(field, inds, grad_i, dx, walls)', False, 'from fealty import lattice, field_numerics, walled_field_numerics\n'), ((257, 8, 257, 67), 'fealty.walled_field_numerics.laplace_2d', 'walled_field_numerics.laplace_2d', ({(257, 41, 257, 46): 'field', (257, 48, 257, 55): 'laplace', (257, 57, 257, 59): 'dx', (257, 61, 257, 66): 'walls'}, {}), '(field, laplace, dx, walls)', False, 'from fealty import lattice, field_numerics, walled_field_numerics\n'), ((154, 8, 154, 42), 'fealty.field_numerics.density_3d', 'field_numerics.density_3d', ({(154, 34, 154, 38): 'inds', (154, 40, 154, 41): 'f'}, {}), '(inds, f)', False, 'from fealty import lattice, field_numerics, walled_field_numerics\n'), ((168, 8, 168, 53), 'fealty.field_numerics.laplace_3d', 'field_numerics.laplace_3d', ({(168, 34, 168, 39): 'field', (168, 41, 168, 48): 'laplace', (168, 50, 168, 52): 'dx'}, {}), '(field, laplace, dx)', False, 'from fealty import lattice, field_numerics, walled_field_numerics\n'), ((184, 8, 184, 51), 'fealty.field_numerics.grad_i_3d', 'field_numerics.grad_i_3d', ({(184, 33, 184, 38): 'field', (184, 40, 184, 46): 'grad_i', (184, 48, 184, 50): 'dx'}, {}), '(field, grad_i, dx)', False, 'from fealty import lattice, field_numerics, walled_field_numerics\n'), ((198, 8, 198, 47), 'fealty.field_numerics.grad_3d', 'field_numerics.grad_3d', ({(198, 31, 198, 36): 'field', (198, 38, 198, 42): 'grad', (198, 44, 198, 46): 'dx'}, {}), '(field, grad, dx)', False, 'from fealty import lattice, field_numerics, walled_field_numerics\n'), ((212, 8, 212, 45), 'fealty.field_numerics.div_3d', 'field_numerics.div_3d', ({(212, 30, 212, 35): 'field', (212, 37, 212, 40): 'div', (212, 42, 212, 44): 'dx'}, {}), '(field, div, dx)', False, 'from fealty import lattice, field_numerics, walled_field_numerics\n'), ((227, 8, 227, 61), 'fealty.walled_field_numerics.grad_3d', 'walled_field_numerics.grad_3d', ({(227, 38, 227, 43): 'field', (227, 45, 227, 49): 'grad', (227, 51, 227, 53): 'dx', (227, 55, 227, 60): 'walls'}, {}), '(field, grad, dx, walls)', False, 'from fealty import lattice, field_numerics, walled_field_numerics\n'), ((244, 8, 244, 71), 'fealty.walled_field_numerics.grad_i_3d', 'walled_field_numerics.grad_i_3d', ({(244, 40, 244, 45): 'field', (244, 47, 244, 51): 'inds', (244, 53, 244, 59): 'grad_i', (244, 61, 244, 63): 'dx', (244, 65, 244, 70): 'walls'}, {}), '(field, inds, grad_i, dx, walls)', False, 'from fealty import lattice, field_numerics, walled_field_numerics\n'), ((259, 8, 259, 67), 'fealty.walled_field_numerics.laplace_3d', 'walled_field_numerics.laplace_3d', ({(259, 41, 259, 46): 'field', (259, 48, 259, 55): 'laplace', (259, 57, 259, 59): 'dx', (259, 61, 259, 66): 'walls'}, {}), '(field, laplace, dx, walls)', False, 'from fealty import lattice, field_numerics, walled_field_numerics\n')]
cpascariello/aleph-vm
examples/example_django/example_django/asgi.py
1b4920bec211ef3bd379e9359f57f06b9308c1a1
""" ASGI config for example_django project. It exposes the ASGI callable as a module-level variable named ``application``. For more information on this file, see https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/ """ import os from django.core.asgi import get_asgi_application os.environ.setdefault("DJANGO_SETTINGS_MODULE", "example_django.settings") application = get_asgi_application() os.system("/usr/bin/python3 /opt/code/manage.py migrate") os.system("/usr/bin/python3 /opt/code/manage.py " "loaddata /opt/code/blog/fixtures/default_articles.json")
[((14, 0, 14, 74), 'os.environ.setdefault', 'os.environ.setdefault', ({(14, 22, 14, 46): '"""DJANGO_SETTINGS_MODULE"""', (14, 48, 14, 73): '"""example_django.settings"""'}, {}), "('DJANGO_SETTINGS_MODULE', 'example_django.settings')", False, 'import os\n'), ((16, 14, 16, 36), 'django.core.asgi.get_asgi_application', 'get_asgi_application', ({}, {}), '()', False, 'from django.core.asgi import get_asgi_application\n'), ((18, 0, 18, 57), 'os.system', 'os.system', ({(18, 10, 18, 56): '"""/usr/bin/python3 /opt/code/manage.py migrate"""'}, {}), "('/usr/bin/python3 /opt/code/manage.py migrate')", False, 'import os\n'), ((20, 0, 21, 67), 'os.system', 'os.system', ({(20, 10, 21, 66): '"""/usr/bin/python3 /opt/code/manage.py loaddata /opt/code/blog/fixtures/default_articles.json"""'}, {}), "(\n '/usr/bin/python3 /opt/code/manage.py loaddata /opt/code/blog/fixtures/default_articles.json'\n )", False, 'import os\n')]
loikein/ekw-lectures
lectures/extensions/hyperbolic_discounting/replication_code/src/analysis/get_bivariate_distr_data.py
a2f5436f10515ab26eab323fca8c37c91bdc5dcd
"""Generate values of Method of Simulated Moments criterion function. Given observed moments and weighting matrix in `OUT_ANALYSIS`, "msm_estimation", generate values of Method of Simulated Moments criterion function for combinations of discount factor and present bias values. The goal is to study the bivariate distribution of the time preference parameters around the combination of true parameter values. """ import itertools import numpy as np import pandas as pd import respy as rp import yaml from bld.project_paths import project_paths_join as ppj from src.library.compute_moments import _replace_nans from src.library.compute_moments import calc_restricted_choice_probabilities from src.library.compute_moments import calc_restricted_wage_distribution from src.library.compute_moments import calc_unrestricted_choice_probabilities from src.library.compute_moments import calc_unrestricted_wage_distribution from src.library.compute_moments import calc_very_restricted_choice_probabilities from src.library.compute_moments import calc_very_restricted_wage_distribution from src.library.housekeeping import _load_pickle from src.library.housekeeping import _temporary_working_directory from tqdm import tqdm def get_bivariate_distribution(params, crit_func, grid_delta, grid_beta): """Compute value of criterion function. Args: params (pd.DataFrame): DataFrame containing model parameters. crit_func (dict): Dictionary containing model options. grid_delta (np.array): Values of discount factor. grid_beta (np.array): Values of present-bias parameter. Returns: pd.DataFrame """ results = [] for beta, delta in tqdm(itertools.product(grid_beta, grid_delta)): params_ = params.copy() params_.loc[("beta", "beta"), "value"] = beta params_.loc[("delta", "delta"), "value"] = delta val = crit_func(params_) result = {"beta": beta, "delta": delta, "val": val} results.append(result) return pd.DataFrame.from_dict(results) if __name__ == "__main__": # load params params = pd.read_csv( ppj("IN_MODEL_SPECS", "params_hyp.csv"), sep=";", index_col=["category", "name"], ) params["value"] = params["value"].astype(float) # load options with open(ppj("IN_MODEL_SPECS", "options_hyp.yaml")) as options: options = yaml.safe_load(options) # get empirical moments empirical_moments = _load_pickle(ppj("OUT_ANALYSIS", "msm_estimation", "moments_hyp.pickle")) # get weighting matrix weighting_matrix = _load_pickle( ppj("OUT_ANALYSIS", "msm_estimation", "weighting_matrix_hyp.pickle") ) calc_moments = { "Choice Probabilities Very Restricted": calc_very_restricted_choice_probabilities, "Choice Probabilities Restricted": calc_restricted_choice_probabilities, "Choice Probabilities Unrestricted": calc_unrestricted_choice_probabilities, "Wage Distribution Very Restricted": calc_very_restricted_wage_distribution, "Wage Distribution Restricted": calc_restricted_wage_distribution, "Wage Distribution Unrestricted": calc_unrestricted_wage_distribution, } with _temporary_working_directory(snippet="heatmap"): # get criterion function weighted_sum_squared_errors = rp.get_moment_errors_func( params=params, options=options, calc_moments=calc_moments, replace_nans=_replace_nans, empirical_moments=empirical_moments, weighting_matrix=weighting_matrix, ) # get bivariate distribution results results = get_bivariate_distribution( crit_func=weighted_sum_squared_errors, params=params, grid_delta=np.arange(0.945, 0.9625, 0.0025), grid_beta=np.arange(0.75, 1.05, 0.01), ) results.to_csv(ppj("OUT_ANALYSIS", "heatmap.csv"))
[((53, 11, 53, 42), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', ({(53, 34, 53, 41): 'results'}, {}), '(results)', True, 'import pandas as pd\n'), ((45, 28, 45, 68), 'itertools.product', 'itertools.product', ({(45, 46, 45, 55): 'grid_beta', (45, 57, 45, 67): 'grid_delta'}, {}), '(grid_beta, grid_delta)', False, 'import itertools\n'), ((60, 8, 60, 47), 'bld.project_paths.project_paths_join', 'ppj', ({(60, 12, 60, 28): '"""IN_MODEL_SPECS"""', (60, 30, 60, 46): '"""params_hyp.csv"""'}, {}), "('IN_MODEL_SPECS', 'params_hyp.csv')", True, 'from bld.project_paths import project_paths_join as ppj\n'), ((68, 18, 68, 41), 'yaml.safe_load', 'yaml.safe_load', ({(68, 33, 68, 40): 'options'}, {}), '(options)', False, 'import yaml\n'), ((71, 37, 71, 96), 'bld.project_paths.project_paths_join', 'ppj', ({(71, 41, 71, 55): '"""OUT_ANALYSIS"""', (71, 57, 71, 73): '"""msm_estimation"""', (71, 75, 71, 95): '"""moments_hyp.pickle"""'}, {}), "('OUT_ANALYSIS', 'msm_estimation', 'moments_hyp.pickle')", True, 'from bld.project_paths import project_paths_join as ppj\n'), ((75, 8, 75, 76), 'bld.project_paths.project_paths_join', 'ppj', ({(75, 12, 75, 26): '"""OUT_ANALYSIS"""', (75, 28, 75, 44): '"""msm_estimation"""', (75, 46, 75, 75): '"""weighting_matrix_hyp.pickle"""'}, {}), "('OUT_ANALYSIS', 'msm_estimation', 'weighting_matrix_hyp.pickle')", True, 'from bld.project_paths import project_paths_join as ppj\n'), ((87, 9, 87, 56), 'src.library.housekeeping._temporary_working_directory', '_temporary_working_directory', (), '', False, 'from src.library.housekeeping import _temporary_working_directory\n'), ((90, 38, 97, 9), 'respy.get_moment_errors_func', 'rp.get_moment_errors_func', (), '', True, 'import respy as rp\n'), ((67, 14, 67, 55), 'bld.project_paths.project_paths_join', 'ppj', ({(67, 18, 67, 34): '"""IN_MODEL_SPECS"""', (67, 36, 67, 54): '"""options_hyp.yaml"""'}, {}), "('IN_MODEL_SPECS', 'options_hyp.yaml')", True, 'from bld.project_paths import project_paths_join as ppj\n'), ((107, 23, 107, 57), 'bld.project_paths.project_paths_join', 'ppj', ({(107, 27, 107, 41): '"""OUT_ANALYSIS"""', (107, 43, 107, 56): '"""heatmap.csv"""'}, {}), "('OUT_ANALYSIS', 'heatmap.csv')", True, 'from bld.project_paths import project_paths_join as ppj\n'), ((103, 23, 103, 55), 'numpy.arange', 'np.arange', ({(103, 33, 103, 38): '0.945', (103, 40, 103, 46): '0.9625', (103, 48, 103, 54): '0.0025'}, {}), '(0.945, 0.9625, 0.0025)', True, 'import numpy as np\n'), ((104, 22, 104, 49), 'numpy.arange', 'np.arange', ({(104, 32, 104, 36): '0.75', (104, 38, 104, 42): '1.05', (104, 44, 104, 48): '0.01'}, {}), '(0.75, 1.05, 0.01)', True, 'import numpy as np\n')]
Iwomichu/probable-giggle
space_game/events/KeyPressedEvent.py
2af5ed83a60d65ec9d509c217cb5fcb880d5dbcc
from dataclasses import dataclass from space_game.domain_names import KeyId from space_game.events.Event import Event @dataclass class KeyPressedEvent(Event): key_id: KeyId
[]
navoday-91/oncall
src/oncall/messengers/teams_messenger.py
0a977f06bbf308978d0d2c2b46e0aca23937ca9a
import pymsteams import logging from oncall.constants import TEAMS_SUPPORT class teams_messenger(object): supports = frozenset([TEAMS_SUPPORT]) def __init__(self, config): self.webhook = config['webhook'] def send(self, message): heading = message.get("subject") final_message = "User: " + message.get("user") + " Message: " + message.get("body") try: myTeamsMessage = pymsteams.connectorcard(self.webhook) myTeamsMessage.title(str(heading)) myTeamsMessage.text(str(final_message)) myTeamsMessage.send() except: logging.info("An issue occured while sending message to teams messenger")
[((17, 29, 17, 66), 'pymsteams.connectorcard', 'pymsteams.connectorcard', ({(17, 53, 17, 65): 'self.webhook'}, {}), '(self.webhook)', False, 'import pymsteams\n'), ((22, 12, 22, 85), 'logging.info', 'logging.info', ({(22, 25, 22, 84): '"""An issue occured while sending message to teams messenger"""'}, {}), "('An issue occured while sending message to teams messenger')", False, 'import logging\n')]
Laniakea94/BigDL
python/chronos/src/bigdl/chronos/autots/model/auto_prophet.py
4d01734086dda893a7f08ba53251dc3c5c8ecfd1
# + # # Copyright 2016 The BigDL Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either exp' # ress or implied. # See the License for the specific language governing permissions and # limitations under the License. # import pandas as pd import warnings from bigdl.chronos.model.prophet import ProphetBuilder, ProphetModel from bigdl.chronos.autots.utils import recalculate_n_sampling # - class AutoProphet: def __init__(self, changepoint_prior_scale=None, seasonality_prior_scale=None, holidays_prior_scale=None, seasonality_mode=None, changepoint_range=None, metric='mse', logs_dir="/tmp/auto_prophet_logs", cpus_per_trial=1, name="auto_prophet", remote_dir=None, load_dir=None, **prophet_config ): """ Create an automated Prophet Model. User need to specify either the exact value or the search space of the Prophet model hyperparameters. For details of the Prophet model hyperparameters, refer to https://facebook.github.io/prophet/docs/diagnostics.html#hyperparameter-tuning. :param changepoint_prior_scale: Int or hp sampling function from an integer space for hyperparameter changepoint_prior_scale for the Prophet model. For hp sampling, see bigdl.chronos.orca.automl.hp for more details. e.g. hp.loguniform(0.001, 0.5). :param seasonality_prior_scale: hyperparameter seasonality_prior_scale for the Prophet model. e.g. hp.loguniform(0.01, 10). :param holidays_prior_scale: hyperparameter holidays_prior_scale for the Prophet model. e.g. hp.loguniform(0.01, 10). :param seasonality_mode: hyperparameter seasonality_mode for the Prophet model. e.g. hp.choice(['additive', 'multiplicative']). :param changepoint_range: hyperparameter changepoint_range for the Prophet model. e.g. hp.uniform(0.8, 0.95). :param metric: String. The evaluation metric name to optimize. e.g. "mse" :param logs_dir: Local directory to save logs and results. It defaults to "/tmp/auto_prophet_logs" :param cpus_per_trial: Int. Number of cpus for each trial. It defaults to 1. :param name: name of the AutoProphet. It defaults to "auto_prophet" :param remote_dir: String. Remote directory to sync training results and checkpoints. It defaults to None and doesn't take effects while running in local. While running in cluster, it defaults to "hdfs:///tmp/{name}". :param load_dir: Load the ckpt from load_dir. The value defaults to None. :param prophet_config: Other Prophet hyperparameters. """ if load_dir: self.best_model = ProphetModel() self.best_model.restore(load_dir) try: from bigdl.orca.automl.auto_estimator import AutoEstimator import bigdl.orca.automl.hp as hp self.search_space = { "changepoint_prior_scale": hp.grid_search([0.005, 0.05, 0.1, 0.5]) if changepoint_prior_scale is None else changepoint_prior_scale, "seasonality_prior_scale": hp.grid_search([0.01, 0.1, 1.0, 10.0]) if seasonality_prior_scale is None else seasonality_prior_scale, "holidays_prior_scale": hp.loguniform(0.01, 10) if holidays_prior_scale is None else holidays_prior_scale, "seasonality_mode": hp.choice(['additive', 'multiplicative']) if seasonality_mode is None else seasonality_mode, "changepoint_range": hp.uniform(0.8, 0.95) if changepoint_range is None else changepoint_range } self.search_space.update(prophet_config) # update other configs self.metric = metric model_builder = ProphetBuilder() self.auto_est = AutoEstimator(model_builder=model_builder, logs_dir=logs_dir, resources_per_trial={"cpu": cpus_per_trial}, remote_dir=remote_dir, name=name) except ImportError: warnings.warn("You need to install `bigdl-orca[automl]` to use `fit` function.") def fit(self, data, cross_validation=True, expect_horizon=None, freq=None, metric_threshold=None, n_sampling=16, search_alg=None, search_alg_params=None, scheduler=None, scheduler_params=None, ): """ Automatically fit the model and search for the best hyperparameters. :param data: training data, a pandas dataframe with Td rows, and 2 columns, with column 'ds' indicating date and column 'y' indicating value and Td is the time dimension :param cross_validation: bool, if the eval result comes from cross_validation. The value is set to True by default. Setting this option to False to speed up the process. :param expect_horizon: int, validation data will be automatically splited from training data, and expect_horizon is the horizon you may need to use once the mode is fitted. The value defaults to None, where 10% of training data will be taken as the validation data. :param freq: the freqency of the training dataframe. the frequency can be anything from the pandas list of frequency strings here: https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#timeseries-offset-aliasesDefaulted to None, where an unreliable frequency will be infer implicitly. :param metric_threshold: a trial will be terminated when metric threshold is met :param n_sampling: Number of trials to evaluate in total. Defaults to 16. If hp.grid_search is in search_space, the grid will be run n_sampling of trials and round up n_sampling according to hp.grid_search. If this is -1, (virtually) infinite samples are generated until a stopping condition is met. :param search_alg: str, all supported searcher provided by ray tune (i.e."variant_generator", "random", "ax", "dragonfly", "skopt", "hyperopt", "bayesopt", "bohb", "nevergrad", "optuna", "zoopt" and "sigopt") :param search_alg_params: extra parameters for searcher algorithm besides search_space, metric and searcher mode :param scheduler: str, all supported scheduler provided by ray tune :param scheduler_params: parameters for scheduler """ if expect_horizon is None: expect_horizon = int(0.1*len(data)) if freq is None: assert len(data) >= 2, "The training dataframe should contains more than 2 records." assert pd.api.types.is_datetime64_any_dtype(data["ds"].dtypes), \ "The 'ds' col should be in datetime 64 type, or you need to set `freq` in fit." self._freq = data["ds"].iloc[1] - data["ds"].iloc[0] else: self._freq = pd.Timedelta(freq) expect_horizon_str = str(self._freq * expect_horizon) self.search_space.update({"expect_horizon": expect_horizon_str, "cross_validation": cross_validation}) train_data = data if cross_validation else data[:len(data)-expect_horizon] validation_data = None if cross_validation else data[len(data)-expect_horizon:] n_sampling = recalculate_n_sampling(self.search_space, n_sampling) if n_sampling != -1 else -1 self.auto_est.fit(data=train_data, validation_data=validation_data, metric=self.metric, metric_threshold=metric_threshold, n_sampling=n_sampling, search_space=self.search_space, search_alg=search_alg, search_alg_params=search_alg_params, scheduler=scheduler, scheduler_params=scheduler_params ) # use the best config to fit a new prophet model on whole data self.best_model = ProphetBuilder().build(self.auto_est.get_best_config()) self.best_model.model.fit(data) def predict(self, horizon=1, freq="D", ds_data=None): """ Predict using the best model after HPO. :param horizon: the number of steps forward to predict :param freq: the freqency of the predicted dataframe, defaulted to day("D"), the frequency can be anything from the pandas list of frequency strings here: https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#timeseries-offset-aliases :param ds_data: a dataframe that has 1 column 'ds' indicating date. """ if self.best_model.model is None: raise RuntimeError( "You must call fit or restore first before calling predict!") return self.best_model.predict(horizon=horizon, freq=freq, ds_data=ds_data) def evaluate(self, data, metrics=['mse']): """ Evaluate using the best model after HPO. :param data: evaluation data, a pandas dataframe with Td rows, and 2 columns, with column 'ds' indicating date and column 'y' indicating value and Td is the time dimension :param metrics: A list contains metrics for test/valid data. """ if data is None: raise ValueError("Input invalid data of None") if self.best_model.model is None: raise RuntimeError( "You must call fit or restore first before calling evaluate!") return self.best_model.evaluate(target=data, metrics=metrics) def save(self, checkpoint_file): """ Save the best model after HPO. :param checkpoint_file: The location you want to save the best model, should be a json file """ if self.best_model.model is None: raise RuntimeError( "You must call fit or restore first before calling save!") self.best_model.save(checkpoint_file) def restore(self, checkpoint_file): """ Restore the best model after HPO. :param checkpoint_file: The checkpoint file location you want to load the best model. """ self.best_model.restore(checkpoint_file) def get_best_model(self): """ Get the best Prophet model. """ return self.best_model.model
[((78, 30, 78, 44), 'bigdl.chronos.model.prophet.ProphetModel', 'ProphetModel', ({}, {}), '()', False, 'from bigdl.chronos.model.prophet import ProphetBuilder, ProphetModel\n'), ((102, 28, 102, 44), 'bigdl.chronos.model.prophet.ProphetBuilder', 'ProphetBuilder', ({}, {}), '()', False, 'from bigdl.chronos.model.prophet import ProphetBuilder, ProphetModel\n'), ((103, 28, 107, 52), 'bigdl.orca.automl.auto_estimator.AutoEstimator', 'AutoEstimator', (), '', False, 'from bigdl.orca.automl.auto_estimator import AutoEstimator\n'), ((159, 19, 159, 74), 'pandas.api.types.is_datetime64_any_dtype', 'pd.api.types.is_datetime64_any_dtype', ({(159, 56, 159, 73): "data['ds'].dtypes"}, {}), "(data['ds'].dtypes)", True, 'import pandas as pd\n'), ((163, 25, 163, 43), 'pandas.Timedelta', 'pd.Timedelta', ({(163, 38, 163, 42): 'freq'}, {}), '(freq)', True, 'import pandas as pd\n'), ((169, 21, 170, 55), 'bigdl.chronos.autots.utils.recalculate_n_sampling', 'recalculate_n_sampling', ({(169, 44, 169, 61): 'self.search_space', (170, 44, 170, 54): 'n_sampling'}, {}), '(self.search_space, n_sampling)', False, 'from bigdl.chronos.autots.utils import recalculate_n_sampling\n'), ((109, 12, 109, 92), 'warnings.warn', 'warnings.warn', ({(109, 26, 109, 91): '"""You need to install `bigdl-orca[automl]` to use `fit` function."""'}, {}), "('You need to install `bigdl-orca[automl]` to use `fit` function.'\n )", False, 'import warnings\n'), ((183, 26, 183, 42), 'bigdl.chronos.model.prophet.ProphetBuilder', 'ProphetBuilder', ({}, {}), '()', False, 'from bigdl.chronos.model.prophet import ProphetBuilder, ProphetModel\n'), ((84, 43, 84, 82), 'bigdl.orca.automl.hp.grid_search', 'hp.grid_search', ({(84, 58, 84, 81): '[0.005, 0.05, 0.1, 0.5]'}, {}), '([0.005, 0.05, 0.1, 0.5])', True, 'import bigdl.orca.automl.hp as hp\n'), ((87, 43, 87, 81), 'bigdl.orca.automl.hp.grid_search', 'hp.grid_search', ({(87, 58, 87, 80): '[0.01, 0.1, 1.0, 10.0]'}, {}), '([0.01, 0.1, 1.0, 10.0])', True, 'import bigdl.orca.automl.hp as hp\n'), ((90, 40, 90, 63), 'bigdl.orca.automl.hp.loguniform', 'hp.loguniform', ({(90, 54, 90, 58): '(0.01)', (90, 60, 90, 62): '(10)'}, {}), '(0.01, 10)', True, 'import bigdl.orca.automl.hp as hp\n'), ((93, 36, 93, 77), 'bigdl.orca.automl.hp.choice', 'hp.choice', ({(93, 46, 93, 76): "['additive', 'multiplicative']"}, {}), "(['additive', 'multiplicative'])", True, 'import bigdl.orca.automl.hp as hp\n'), ((96, 37, 96, 58), 'bigdl.orca.automl.hp.uniform', 'hp.uniform', ({(96, 48, 96, 51): '(0.8)', (96, 53, 96, 57): '(0.95)'}, {}), '(0.8, 0.95)', True, 'import bigdl.orca.automl.hp as hp\n')]
arita37/normalizing-flows
nf/flows.py
c9896656bfd2007b0c17b801c0fe068560127301
import math import numpy as np import scipy as sp import scipy.linalg import torch import torch.nn as nn import torch.nn.init as init import torch.nn.functional as F from nf.utils import unconstrained_RQS # supported non-linearities: note that the function must be invertible functional_derivatives = { torch.tanh: lambda x: 1 - torch.pow(torch.tanh(x), 2), F.leaky_relu: lambda x: (x > 0).type(torch.FloatTensor) + \ (x < 0).type(torch.FloatTensor) * -0.01, F.elu: lambda x: (x > 0).type(torch.FloatTensor) + \ (x < 0).type(torch.FloatTensor) * torch.exp(x) } class Planar(nn.Module): """ Planar flow. z = f(x) = x + u h(wᵀx + b) [Rezende and Mohamed, 2015] """ def __init__(self, dim, nonlinearity=torch.tanh): super().__init__() self.h = nonlinearity self.w = nn.Parameter(torch.Tensor(dim)) self.u = nn.Parameter(torch.Tensor(dim)) self.b = nn.Parameter(torch.Tensor(1)) self.reset_parameters(dim) def reset_parameters(self, dim): init.uniform_(self.w, -math.sqrt(1/dim), math.sqrt(1/dim)) init.uniform_(self.u, -math.sqrt(1/dim), math.sqrt(1/dim)) init.uniform_(self.b, -math.sqrt(1/dim), math.sqrt(1/dim)) def forward(self, x): """ Given x, returns z and the log-determinant log|df/dx|. Returns ------- """ if self.h in (F.elu, F.leaky_relu): u = self.u elif self.h == torch.tanh: scal = torch.log(1+torch.exp(self.w @ self.u)) - self.w @ self.u - 1 u = self.u + scal * self.w / torch.norm(self.w) else: raise NotImplementedError("Non-linearity is not supported.") lin = torch.unsqueeze(x @ self.w, 1) + self.b z = x + u * self.h(lin) phi = functional_derivatives[self.h](lin) * self.w log_det = torch.log(torch.abs(1 + phi @ u) + 1e-4) return z, log_det def backward(self, z): raise NotImplementedError("Planar flow has no algebraic inverse.") class Radial(nn.Module): """ Radial flow. z = f(x) = = x + β h(α, r)(z − z0) [Rezende and Mohamed 2015] """ def __init__(self, dim): super().__init__() self.x0 = nn.Parameter(torch.Tensor(dim)) self.log_alpha = nn.Parameter(torch.Tensor(1)) self.beta = nn.Parameter(torch.Tensor(1)) def reset_parameters(dim): init.uniform_(self.z0, -math.sqrt(1/dim), math.sqrt(1/dim)) init.uniform_(self.log_alpha, -math.sqrt(1/dim), math.sqrt(1/dim)) init.uniform_(self.beta, -math.sqrt(1/dim), math.sqrt(1/dim)) def forward(self, x): """ Given x, returns z and the log-determinant log|df/dx|. """ m, n = x.shape r = torch.norm(x - self.x0) h = 1 / (torch.exp(self.log_alpha) + r) beta = -torch.exp(self.log_alpha) + torch.log(1 + torch.exp(self.beta)) z = x + beta * h * (x - self.x0) log_det = (n - 1) * torch.log(1 + beta * h) + \ torch.log(1 + beta * h - \ beta * r / (torch.exp(self.log_alpha) + r) ** 2) return z, log_det class FCNN(nn.Module): """ Simple fully connected neural network. """ def __init__(self, in_dim, out_dim, hidden_dim): super().__init__() self.network = nn.Sequential( nn.Linear(in_dim, hidden_dim), nn.Tanh(), nn.Linear(hidden_dim, hidden_dim), nn.Tanh(), nn.Linear(hidden_dim, out_dim), ) def forward(self, x): return self.network(x) class RealNVP(nn.Module): """ Non-volume preserving flow. [Dinh et. al. 2017] """ def __init__(self, dim, hidden_dim = 8, base_network=FCNN): super().__init__() self.dim = dim self.t1 = base_network(dim // 2, dim // 2, hidden_dim) self.s1 = base_network(dim // 2, dim // 2, hidden_dim) self.t2 = base_network(dim // 2, dim // 2, hidden_dim) self.s2 = base_network(dim // 2, dim // 2, hidden_dim) def forward(self, x): lower, upper = x[:,:self.dim // 2], x[:,self.dim // 2:] t1_transformed = self.t1(lower) s1_transformed = self.s1(lower) upper = t1_transformed + upper * torch.exp(s1_transformed) t2_transformed = self.t2(upper) s2_transformed = self.s2(upper) lower = t2_transformed + lower * torch.exp(s2_transformed) z = torch.cat([lower, upper], dim=1) log_det = torch.sum(s1_transformed, dim=1) + \ torch.sum(s2_transformed, dim=1) return z, log_det def backward(self, z): lower, upper = z[:,:self.dim // 2], z[:,self.dim // 2:] t2_transformed = self.t2(upper) s2_transformed = self.s2(upper) lower = (lower - t2_transformed) * torch.exp(-s2_transformed) t1_transformed = self.t1(lower) s1_transformed = self.s1(lower) upper = (upper - t1_transformed) * torch.exp(-s1_transformed) x = torch.cat([lower, upper], dim=1) log_det = torch.sum(-s1_transformed, dim=1) + \ torch.sum(-s2_transformed, dim=1) return x, log_det class MAF(nn.Module): """ Masked auto-regressive flow. [Papamakarios et al. 2018] """ def __init__(self, dim, hidden_dim = 8, base_network=FCNN): super().__init__() self.dim = dim self.layers = nn.ModuleList() self.initial_param = nn.Parameter(torch.Tensor(2)) for i in range(1, dim): self.layers += [base_network(i, 2, hidden_dim)] self.reset_parameters() def reset_parameters(self): init.uniform_(self.initial_param, -math.sqrt(0.5), math.sqrt(0.5)) def forward(self, x): z = torch.zeros_like(x) log_det = torch.zeros(z.shape[0]) for i in range(self.dim): if i == 0: mu, alpha = self.initial_param[0], self.initial_param[1] else: out = self.layers[i - 1](x[:, :i]) mu, alpha = out[:, 0], out[:, 1] z[:, i] = (x[:, i] - mu) / torch.exp(alpha) log_det -= alpha return z.flip(dims=(1,)), log_det def backward(self, z): x = torch.zeros_like(z) log_det = torch.zeros(z.shape[0]) z = z.flip(dims=(1,)) for i in range(self.dim): if i == 0: mu, alpha = self.initial_param[0], self.initial_param[1] else: out = self.layers[i - 1](x[:, :i]) mu, alpha = out[:, 0], out[:, 1] x[:, i] = mu + torch.exp(alpha) * z[:, i] log_det += alpha return x, log_det class ActNorm(nn.Module): """ ActNorm layer. [Kingma and Dhariwal, 2018.] """ def __init__(self, dim): super().__init__() self.dim = dim self.mu = nn.Parameter(torch.zeros(dim, dtype = torch.float)) self.log_sigma = nn.Parameter(torch.zeros(dim, dtype = torch.float)) def forward(self, x): z = x * torch.exp(self.log_sigma) + self.mu log_det = torch.sum(self.log_sigma) return z, log_det def backward(self, z): x = (z - self.mu) / torch.exp(self.log_sigma) log_det = -torch.sum(self.log_sigma) return x, log_det class OneByOneConv(nn.Module): """ Invertible 1x1 convolution. [Kingma and Dhariwal, 2018.] """ def __init__(self, dim): super().__init__() self.dim = dim W, _ = sp.linalg.qr(np.random.randn(dim, dim)) P, L, U = sp.linalg.lu(W) self.P = torch.tensor(P, dtype = torch.float) self.L = nn.Parameter(torch.tensor(L, dtype = torch.float)) self.S = nn.Parameter(torch.tensor(np.diag(U), dtype = torch.float)) self.U = nn.Parameter(torch.triu(torch.tensor(U, dtype = torch.float), diagonal = 1)) self.W_inv = None def forward(self, x): L = torch.tril(self.L, diagonal = -1) + torch.diag(torch.ones(self.dim)) U = torch.triu(self.U, diagonal = 1) z = x @ self.P @ L @ (U + torch.diag(self.S)) log_det = torch.sum(torch.log(torch.abs(self.S))) return z, log_det def backward(self, z): if not self.W_inv: L = torch.tril(self.L, diagonal = -1) + \ torch.diag(torch.ones(self.dim)) U = torch.triu(self.U, diagonal = 1) W = self.P @ L @ (U + torch.diag(self.S)) self.W_inv = torch.inverse(W) x = z @ self.W_inv log_det = -torch.sum(torch.log(torch.abs(self.S))) return x, log_det class NSF_AR(nn.Module): """ Neural spline flow, auto-regressive. [Durkan et al. 2019] """ def __init__(self, dim, K = 5, B = 3, hidden_dim = 8, base_network = FCNN): super().__init__() self.dim = dim self.K = K self.B = B self.layers = nn.ModuleList() self.init_param = nn.Parameter(torch.Tensor(3 * K - 1)) for i in range(1, dim): self.layers += [base_network(i, 3 * K - 1, hidden_dim)] self.reset_parameters() def reset_parameters(self): init.uniform_(self.init_param, - 1 / 2, 1 / 2) def forward(self, x): z = torch.zeros_like(x) log_det = torch.zeros(z.shape[0]) for i in range(self.dim): if i == 0: init_param = self.init_param.expand(x.shape[0], 3 * self.K - 1) W, H, D = torch.split(init_param, self.K, dim = 1) else: out = self.layers[i - 1](x[:, :i]) W, H, D = torch.split(out, self.K, dim = 1) W, H = torch.softmax(W, dim = 1), torch.softmax(H, dim = 1) W, H = 2 * self.B * W, 2 * self.B * H D = F.softplus(D) z[:, i], ld = unconstrained_RQS( x[:, i], W, H, D, inverse=False, tail_bound=self.B) log_det += ld return z, log_det def backward(self, z): x = torch.zeros_like(z) log_det = torch.zeros(x.shape[0]) for i in range(self.dim): if i == 0: init_param = self.init_param.expand(x.shape[0], 3 * self.K - 1) W, H, D = torch.split(init_param, self.K, dim = 1) else: out = self.layers[i - 1](x[:, :i]) W, H, D = torch.split(out, self.K, dim = 1) W, H = torch.softmax(W, dim = 1), torch.softmax(H, dim = 1) W, H = 2 * self.B * W, 2 * self.B * H D = F.softplus(D) x[:, i], ld = unconstrained_RQS( z[:, i], W, H, D, inverse = True, tail_bound = self.B) log_det += ld return x, log_det class NSF_CL(nn.Module): """ Neural spline flow, coupling layer. [Durkan et al. 2019] """ def __init__(self, dim, K = 5, B = 3, hidden_dim = 8, base_network = FCNN): super().__init__() self.dim = dim self.K = K self.B = B self.f1 = base_network(dim // 2, (3 * K - 1) * dim // 2, hidden_dim) self.f2 = base_network(dim // 2, (3 * K - 1) * dim // 2, hidden_dim) def forward(self, x): log_det = torch.zeros(x.shape[0]) lower, upper = x[:, :self.dim // 2], x[:, self.dim // 2:] out = self.f1(lower).reshape(-1, self.dim // 2, 3 * self.K - 1) W, H, D = torch.split(out, self.K, dim = 2) W, H = torch.softmax(W, dim = 2), torch.softmax(H, dim = 2) W, H = 2 * self.B * W, 2 * self.B * H D = F.softplus(D) upper, ld = unconstrained_RQS( upper, W, H, D, inverse=False, tail_bound=self.B) log_det += torch.sum(ld, dim = 1) out = self.f2(upper).reshape(-1, self.dim // 2, 3 * self.K - 1) W, H, D = torch.split(out, self.K, dim = 2) W, H = torch.softmax(W, dim = 2), torch.softmax(H, dim = 2) W, H = 2 * self.B * W, 2 * self.B * H D = F.softplus(D) lower, ld = unconstrained_RQS( lower, W, H, D, inverse=False, tail_bound=self.B) log_det += torch.sum(ld, dim = 1) return torch.cat([lower, upper], dim = 1), log_det def backward(self, z): log_det = torch.zeros(z.shape[0]) lower, upper = z[:, :self.dim // 2], z[:, self.dim // 2:] out = self.f2(upper).reshape(-1, self.dim // 2, 3 * self.K - 1) W, H, D = torch.split(out, self.K, dim = 2) W, H = torch.softmax(W, dim = 2), torch.softmax(H, dim = 2) W, H = 2 * self.B * W, 2 * self.B * H D = F.softplus(D) lower, ld = unconstrained_RQS( lower, W, H, D, inverse=True, tail_bound=self.B) log_det += torch.sum(ld, dim = 1) out = self.f1(lower).reshape(-1, self.dim // 2, 3 * self.K - 1) W, H, D = torch.split(out, self.K, dim = 2) W, H = torch.softmax(W, dim = 2), torch.softmax(H, dim = 2) W, H = 2 * self.B * W, 2 * self.B * H D = F.softplus(D) upper, ld = unconstrained_RQS( upper, W, H, D, inverse = True, tail_bound = self.B) log_det += torch.sum(ld, dim = 1) return torch.cat([lower, upper], dim = 1), log_det
[((90, 12, 90, 35), 'torch.norm', 'torch.norm', ({(90, 23, 90, 34): 'x - self.x0'}, {}), '(x - self.x0)', False, 'import torch\n'), ((140, 12, 140, 44), 'torch.cat', 'torch.cat', (), '', False, 'import torch\n'), ((153, 12, 153, 44), 'torch.cat', 'torch.cat', (), '', False, 'import torch\n'), ((168, 22, 168, 37), 'torch.nn.ModuleList', 'nn.ModuleList', ({}, {}), '()', True, 'import torch.nn as nn\n'), ((178, 12, 178, 31), 'torch.zeros_like', 'torch.zeros_like', ({(178, 29, 178, 30): 'x'}, {}), '(x)', False, 'import torch\n'), ((179, 18, 179, 41), 'torch.zeros', 'torch.zeros', ({(179, 30, 179, 40): 'z.shape[0]'}, {}), '(z.shape[0])', False, 'import torch\n'), ((191, 12, 191, 31), 'torch.zeros_like', 'torch.zeros_like', ({(191, 29, 191, 30): 'z'}, {}), '(z)', False, 'import torch\n'), ((192, 18, 192, 41), 'torch.zeros', 'torch.zeros', ({(192, 30, 192, 40): 'z.shape[0]'}, {}), '(z.shape[0])', False, 'import torch\n'), ((219, 18, 219, 43), 'torch.sum', 'torch.sum', ({(219, 28, 219, 42): 'self.log_sigma'}, {}), '(self.log_sigma)', False, 'import torch\n'), ((238, 18, 238, 33), 'scipy.linalg.lu', 'sp.linalg.lu', ({(238, 31, 238, 32): 'W'}, {}), '(W)', True, 'import scipy as sp\n'), ((239, 17, 239, 53), 'torch.tensor', 'torch.tensor', (), '', False, 'import torch\n'), ((248, 12, 248, 44), 'torch.triu', 'torch.triu', (), '', False, 'import torch\n'), ((276, 22, 276, 37), 'torch.nn.ModuleList', 'nn.ModuleList', ({}, {}), '()', True, 'import torch.nn as nn\n'), ((283, 8, 283, 54), 'torch.nn.init.uniform_', 'init.uniform_', ({(283, 22, 283, 37): 'self.init_param', (283, 39, 283, 46): '(-1 / 2)', (283, 48, 283, 53): '(1 / 2)'}, {}), '(self.init_param, -1 / 2, 1 / 2)', True, 'import torch.nn.init as init\n'), ((286, 12, 286, 31), 'torch.zeros_like', 'torch.zeros_like', ({(286, 29, 286, 30): 'x'}, {}), '(x)', False, 'import torch\n'), ((287, 18, 287, 41), 'torch.zeros', 'torch.zeros', ({(287, 30, 287, 40): 'z.shape[0]'}, {}), '(z.shape[0])', False, 'import torch\n'), ((304, 12, 304, 31), 'torch.zeros_like', 'torch.zeros_like', ({(304, 29, 304, 30): 'z'}, {}), '(z)', False, 'import torch\n'), ((305, 18, 305, 41), 'torch.zeros', 'torch.zeros', ({(305, 30, 305, 40): 'x.shape[0]'}, {}), '(x.shape[0])', False, 'import torch\n'), ((337, 18, 337, 41), 'torch.zeros', 'torch.zeros', ({(337, 30, 337, 40): 'x.shape[0]'}, {}), '(x.shape[0])', False, 'import torch\n'), ((340, 18, 340, 51), 'torch.split', 'torch.split', (), '', False, 'import torch\n'), ((343, 12, 343, 25), 'torch.nn.functional.softplus', 'F.softplus', ({(343, 23, 343, 24): 'D'}, {}), '(D)', True, 'import torch.nn.functional as F\n'), ((344, 20, 345, 61), 'nf.utils.unconstrained_RQS', 'unconstrained_RQS', (), '', False, 'from nf.utils import unconstrained_RQS\n'), ((346, 19, 346, 41), 'torch.sum', 'torch.sum', (), '', False, 'import torch\n'), ((348, 18, 348, 51), 'torch.split', 'torch.split', (), '', False, 'import torch\n'), ((351, 12, 351, 25), 'torch.nn.functional.softplus', 'F.softplus', ({(351, 23, 351, 24): 'D'}, {}), '(D)', True, 'import torch.nn.functional as F\n'), ((352, 20, 353, 61), 'nf.utils.unconstrained_RQS', 'unconstrained_RQS', (), '', False, 'from nf.utils import unconstrained_RQS\n'), ((354, 19, 354, 41), 'torch.sum', 'torch.sum', (), '', False, 'import torch\n'), ((358, 18, 358, 41), 'torch.zeros', 'torch.zeros', ({(358, 30, 358, 40): 'z.shape[0]'}, {}), '(z.shape[0])', False, 'import torch\n'), ((361, 18, 361, 51), 'torch.split', 'torch.split', (), '', False, 'import torch\n'), ((364, 12, 364, 25), 'torch.nn.functional.softplus', 'F.softplus', ({(364, 23, 364, 24): 'D'}, {}), '(D)', True, 'import torch.nn.functional as F\n'), ((365, 20, 366, 60), 'nf.utils.unconstrained_RQS', 'unconstrained_RQS', (), '', False, 'from nf.utils import unconstrained_RQS\n'), ((367, 19, 367, 41), 'torch.sum', 'torch.sum', (), '', False, 'import torch\n'), ((369, 18, 369, 51), 'torch.split', 'torch.split', (), '', False, 'import torch\n'), ((372, 12, 372, 25), 'torch.nn.functional.softplus', 'F.softplus', ({(372, 23, 372, 24): 'D'}, {}), '(D)', True, 'import torch.nn.functional as F\n'), ((373, 20, 374, 64), 'nf.utils.unconstrained_RQS', 'unconstrained_RQS', (), '', False, 'from nf.utils import unconstrained_RQS\n'), ((375, 19, 375, 41), 'torch.sum', 'torch.sum', (), '', False, 'import torch\n'), ((32, 30, 32, 47), 'torch.Tensor', 'torch.Tensor', ({(32, 43, 32, 46): 'dim'}, {}), '(dim)', False, 'import torch\n'), ((33, 30, 33, 47), 'torch.Tensor', 'torch.Tensor', ({(33, 43, 33, 46): 'dim'}, {}), '(dim)', False, 'import torch\n'), ((34, 30, 34, 45), 'torch.Tensor', 'torch.Tensor', ({(34, 43, 34, 44): '1'}, {}), '(1)', False, 'import torch\n'), ((38, 49, 38, 65), 'math.sqrt', 'math.sqrt', ({(38, 59, 38, 64): '(1 / dim)'}, {}), '(1 / dim)', False, 'import math\n'), ((39, 49, 39, 65), 'math.sqrt', 'math.sqrt', ({(39, 59, 39, 64): '(1 / dim)'}, {}), '(1 / dim)', False, 'import math\n'), ((40, 49, 40, 65), 'math.sqrt', 'math.sqrt', ({(40, 59, 40, 64): '(1 / dim)'}, {}), '(1 / dim)', False, 'import math\n'), ((56, 14, 56, 44), 'torch.unsqueeze', 'torch.unsqueeze', ({(56, 30, 56, 40): '(x @ self.w)', (56, 42, 56, 43): '(1)'}, {}), '(x @ self.w, 1)', False, 'import torch\n'), ((76, 31, 76, 48), 'torch.Tensor', 'torch.Tensor', ({(76, 44, 76, 47): 'dim'}, {}), '(dim)', False, 'import torch\n'), ((77, 38, 77, 53), 'torch.Tensor', 'torch.Tensor', ({(77, 51, 77, 52): '1'}, {}), '(1)', False, 'import torch\n'), ((78, 33, 78, 48), 'torch.Tensor', 'torch.Tensor', ({(78, 46, 78, 47): '1'}, {}), '(1)', False, 'import torch\n'), ((81, 50, 81, 66), 'math.sqrt', 'math.sqrt', ({(81, 60, 81, 65): '(1 / dim)'}, {}), '(1 / dim)', False, 'import math\n'), ((82, 57, 82, 73), 'math.sqrt', 'math.sqrt', ({(82, 67, 82, 72): '(1 / dim)'}, {}), '(1 / dim)', False, 'import math\n'), ((83, 52, 83, 68), 'math.sqrt', 'math.sqrt', ({(83, 62, 83, 67): '(1 / dim)'}, {}), '(1 / dim)', False, 'import math\n'), ((107, 12, 107, 41), 'torch.nn.Linear', 'nn.Linear', ({(107, 22, 107, 28): 'in_dim', (107, 30, 107, 40): 'hidden_dim'}, {}), '(in_dim, hidden_dim)', True, 'import torch.nn as nn\n'), ((108, 12, 108, 21), 'torch.nn.Tanh', 'nn.Tanh', ({}, {}), '()', True, 'import torch.nn as nn\n'), ((109, 12, 109, 45), 'torch.nn.Linear', 'nn.Linear', ({(109, 22, 109, 32): 'hidden_dim', (109, 34, 109, 44): 'hidden_dim'}, {}), '(hidden_dim, hidden_dim)', True, 'import torch.nn as nn\n'), ((110, 12, 110, 21), 'torch.nn.Tanh', 'nn.Tanh', ({}, {}), '()', True, 'import torch.nn as nn\n'), ((111, 12, 111, 42), 'torch.nn.Linear', 'nn.Linear', ({(111, 22, 111, 32): 'hidden_dim', (111, 34, 111, 41): 'out_dim'}, {}), '(hidden_dim, out_dim)', True, 'import torch.nn as nn\n'), ((141, 18, 141, 50), 'torch.sum', 'torch.sum', (), '', False, 'import torch\n'), ((142, 18, 142, 50), 'torch.sum', 'torch.sum', (), '', False, 'import torch\n'), ((149, 43, 149, 69), 'torch.exp', 'torch.exp', ({(149, 53, 149, 68): '(-s2_transformed)'}, {}), '(-s2_transformed)', False, 'import torch\n'), ((152, 43, 152, 69), 'torch.exp', 'torch.exp', ({(152, 53, 152, 68): '(-s1_transformed)'}, {}), '(-s1_transformed)', False, 'import torch\n'), ((154, 18, 154, 51), 'torch.sum', 'torch.sum', (), '', False, 'import torch\n'), ((155, 18, 155, 51), 'torch.sum', 'torch.sum', (), '', False, 'import torch\n'), ((169, 42, 169, 57), 'torch.Tensor', 'torch.Tensor', ({(169, 55, 169, 56): '2'}, {}), '(2)', False, 'import torch\n'), ((175, 59, 175, 73), 'math.sqrt', 'math.sqrt', ({(175, 69, 175, 72): '(0.5)'}, {}), '(0.5)', False, 'import math\n'), ((214, 31, 214, 68), 'torch.zeros', 'torch.zeros', (), '', False, 'import torch\n'), ((215, 38, 215, 75), 'torch.zeros', 'torch.zeros', (), '', False, 'import torch\n'), ((223, 28, 223, 53), 'torch.exp', 'torch.exp', ({(223, 38, 223, 52): 'self.log_sigma'}, {}), '(self.log_sigma)', False, 'import torch\n'), ((224, 19, 224, 44), 'torch.sum', 'torch.sum', ({(224, 29, 224, 43): 'self.log_sigma'}, {}), '(self.log_sigma)', False, 'import torch\n'), ((237, 28, 237, 53), 'numpy.random.randn', 'np.random.randn', ({(237, 44, 237, 47): 'dim', (237, 49, 237, 52): 'dim'}, {}), '(dim, dim)', True, 'import numpy as np\n'), ((240, 30, 240, 66), 'torch.tensor', 'torch.tensor', (), '', False, 'import torch\n'), ((247, 12, 247, 45), 'torch.tril', 'torch.tril', (), '', False, 'import torch\n'), ((257, 16, 257, 48), 'torch.triu', 'torch.triu', (), '', False, 'import torch\n'), ((259, 25, 259, 41), 'torch.inverse', 'torch.inverse', ({(259, 39, 259, 40): 'W'}, {}), '(W)', False, 'import torch\n'), ((277, 39, 277, 62), 'torch.Tensor', 'torch.Tensor', ({(277, 52, 277, 61): '3 * K - 1'}, {}), '(3 * K - 1)', False, 'import torch\n'), ((297, 16, 297, 29), 'torch.nn.functional.softplus', 'F.softplus', ({(297, 27, 297, 28): 'D'}, {}), '(D)', True, 'import torch.nn.functional as F\n'), ((298, 26, 299, 67), 'nf.utils.unconstrained_RQS', 'unconstrained_RQS', (), '', False, 'from nf.utils import unconstrained_RQS\n'), ((315, 16, 315, 29), 'torch.nn.functional.softplus', 'F.softplus', ({(315, 27, 315, 28): 'D'}, {}), '(D)', True, 'import torch.nn.functional as F\n'), ((316, 26, 317, 70), 'nf.utils.unconstrained_RQS', 'unconstrained_RQS', (), '', False, 'from nf.utils import unconstrained_RQS\n'), ((341, 15, 341, 40), 'torch.softmax', 'torch.softmax', (), '', False, 'import torch\n'), ((341, 42, 341, 67), 'torch.softmax', 'torch.softmax', (), '', False, 'import torch\n'), ((349, 15, 349, 40), 'torch.softmax', 'torch.softmax', (), '', False, 'import torch\n'), ((349, 42, 349, 67), 'torch.softmax', 'torch.softmax', (), '', False, 'import torch\n'), ((355, 15, 355, 49), 'torch.cat', 'torch.cat', (), '', False, 'import torch\n'), ((362, 15, 362, 40), 'torch.softmax', 'torch.softmax', (), '', False, 'import torch\n'), ((362, 42, 362, 67), 'torch.softmax', 'torch.softmax', (), '', False, 'import torch\n'), ((370, 15, 370, 40), 'torch.softmax', 'torch.softmax', (), '', False, 'import torch\n'), ((370, 42, 370, 67), 'torch.softmax', 'torch.softmax', (), '', False, 'import torch\n'), ((376, 15, 376, 49), 'torch.cat', 'torch.cat', (), '', False, 'import torch\n'), ((13, 40, 13, 53), 'torch.tanh', 'torch.tanh', ({(13, 51, 13, 52): 'x'}, {}), '(x)', False, 'import torch\n'), ((17, 55, 17, 67), 'torch.exp', 'torch.exp', ({(17, 65, 17, 66): 'x'}, {}), '(x)', False, 'import torch\n'), ((38, 31, 38, 47), 'math.sqrt', 'math.sqrt', ({(38, 41, 38, 46): '(1 / dim)'}, {}), '(1 / dim)', False, 'import math\n'), ((39, 31, 39, 47), 'math.sqrt', 'math.sqrt', ({(39, 41, 39, 46): '(1 / dim)'}, {}), '(1 / dim)', False, 'import math\n'), ((40, 31, 40, 47), 'math.sqrt', 'math.sqrt', ({(40, 41, 40, 46): '(1 / dim)'}, {}), '(1 / dim)', False, 'import math\n'), ((59, 28, 59, 50), 'torch.abs', 'torch.abs', ({(59, 38, 59, 49): '1 + phi @ u'}, {}), '(1 + phi @ u)', False, 'import torch\n'), ((81, 32, 81, 48), 'math.sqrt', 'math.sqrt', ({(81, 42, 81, 47): '(1 / dim)'}, {}), '(1 / dim)', False, 'import math\n'), ((82, 39, 82, 55), 'math.sqrt', 'math.sqrt', ({(82, 49, 82, 54): '(1 / dim)'}, {}), '(1 / dim)', False, 'import math\n'), ((83, 34, 83, 50), 'math.sqrt', 'math.sqrt', ({(83, 44, 83, 49): '(1 / dim)'}, {}), '(1 / dim)', False, 'import math\n'), ((91, 17, 91, 42), 'torch.exp', 'torch.exp', ({(91, 27, 91, 41): 'self.log_alpha'}, {}), '(self.log_alpha)', False, 'import torch\n'), ((92, 16, 92, 41), 'torch.exp', 'torch.exp', ({(92, 26, 92, 40): 'self.log_alpha'}, {}), '(self.log_alpha)', False, 'import torch\n'), ((94, 28, 94, 51), 'torch.log', 'torch.log', ({(94, 38, 94, 50): '(1 + beta * h)'}, {}), '(1 + beta * h)', False, 'import torch\n'), ((136, 41, 136, 66), 'torch.exp', 'torch.exp', ({(136, 51, 136, 65): 's1_transformed'}, {}), '(s1_transformed)', False, 'import torch\n'), ((139, 41, 139, 66), 'torch.exp', 'torch.exp', ({(139, 51, 139, 65): 's2_transformed'}, {}), '(s2_transformed)', False, 'import torch\n'), ((175, 43, 175, 57), 'math.sqrt', 'math.sqrt', ({(175, 53, 175, 56): '(0.5)'}, {}), '(0.5)', False, 'import math\n'), ((186, 39, 186, 55), 'torch.exp', 'torch.exp', ({(186, 49, 186, 54): 'alpha'}, {}), '(alpha)', False, 'import torch\n'), ((218, 16, 218, 41), 'torch.exp', 'torch.exp', ({(218, 26, 218, 40): 'self.log_sigma'}, {}), '(self.log_sigma)', False, 'import torch\n'), ((241, 43, 241, 53), 'numpy.diag', 'np.diag', ({(241, 51, 241, 52): 'U'}, {}), '(U)', True, 'import numpy as np\n'), ((242, 41, 242, 77), 'torch.tensor', 'torch.tensor', (), '', False, 'import torch\n'), ((247, 59, 247, 79), 'torch.ones', 'torch.ones', ({(247, 70, 247, 78): 'self.dim'}, {}), '(self.dim)', False, 'import torch\n'), ((249, 34, 249, 52), 'torch.diag', 'torch.diag', ({(249, 45, 249, 51): 'self.S'}, {}), '(self.S)', False, 'import torch\n'), ((250, 38, 250, 55), 'torch.abs', 'torch.abs', ({(250, 48, 250, 54): 'self.S'}, {}), '(self.S)', False, 'import torch\n'), ((255, 16, 255, 49), 'torch.tril', 'torch.tril', (), '', False, 'import torch\n'), ((291, 26, 291, 66), 'torch.split', 'torch.split', (), '', False, 'import torch\n'), ((294, 26, 294, 59), 'torch.split', 'torch.split', (), '', False, 'import torch\n'), ((295, 19, 295, 44), 'torch.softmax', 'torch.softmax', (), '', False, 'import torch\n'), ((295, 46, 295, 71), 'torch.softmax', 'torch.softmax', (), '', False, 'import torch\n'), ((309, 26, 309, 66), 'torch.split', 'torch.split', (), '', False, 'import torch\n'), ((312, 26, 312, 59), 'torch.split', 'torch.split', (), '', False, 'import torch\n'), ((313, 19, 313, 44), 'torch.softmax', 'torch.softmax', (), '', False, 'import torch\n'), ((313, 46, 313, 71), 'torch.softmax', 'torch.softmax', (), '', False, 'import torch\n'), ((92, 58, 92, 78), 'torch.exp', 'torch.exp', ({(92, 68, 92, 77): 'self.beta'}, {}), '(self.beta)', False, 'import torch\n'), ((200, 27, 200, 43), 'torch.exp', 'torch.exp', ({(200, 37, 200, 42): 'alpha'}, {}), '(alpha)', False, 'import torch\n'), ((256, 27, 256, 47), 'torch.ones', 'torch.ones', ({(256, 38, 256, 46): 'self.dim'}, {}), '(self.dim)', False, 'import torch\n'), ((258, 34, 258, 52), 'torch.diag', 'torch.diag', ({(258, 45, 258, 51): 'self.S'}, {}), '(self.S)', False, 'import torch\n'), ((261, 39, 261, 56), 'torch.abs', 'torch.abs', ({(261, 49, 261, 55): 'self.S'}, {}), '(self.S)', False, 'import torch\n'), ((53, 41, 53, 59), 'torch.norm', 'torch.norm', ({(53, 52, 53, 58): 'self.w'}, {}), '(self.w)', False, 'import torch\n'), ((52, 31, 52, 57), 'torch.exp', 'torch.exp', ({(52, 41, 52, 56): '(self.w @ self.u)'}, {}), '(self.w @ self.u)', False, 'import torch\n'), ((96, 40, 96, 65), 'torch.exp', 'torch.exp', ({(96, 50, 96, 64): 'self.log_alpha'}, {}), '(self.log_alpha)', False, 'import torch\n')]
HakaiInstitute/ioos_qc
ioos_qc/config_creator/fx_parser.py
dfb28ee404a17c8355747b792fba0471093953c4
# module pyparsing.py # # Copyright (c) 2003-2019 Paul T. McGuire # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. # IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY # CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, # TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE # SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # from pyparsing import ( Literal, Word, Group, Forward, alphas, alphanums, Regex, CaselessKeyword, Suppress, delimitedList, ) import math import operator # map operator symbols to corresponding arithmetic operations epsilon = 1e-12 opn = { "+": operator.add, "-": operator.sub, "*": operator.mul, "/": operator.truediv, "^": operator.pow, } fn = { "sin": math.sin, "cos": math.cos, "tan": math.tan, "exp": math.exp, "abs": abs, "trunc": lambda a: int(a), "round": round, "sgn": lambda a: -1 if a < -epsilon else 1 if a > epsilon else 0, } exprStack = [] def push_first(toks): exprStack.append(toks[0]) def push_unary_minus(toks): for t in toks: if t == "-": exprStack.append("unary -") else: break def BNF(): """ expop :: '^' multop :: '*' | '/' addop :: '+' | '-' integer :: ['+' | '-'] '0'..'9'+ atom :: PI | E | real | fn '(' expr ')' | '(' expr ')' factor :: atom [ expop factor ]* term :: factor [ multop factor ]* expr :: term [ addop term ]* """ # use CaselessKeyword for e and pi, to avoid accidentally matching # functions that start with 'e' or 'pi' (such as 'exp'); Keyword # and CaselessKeyword only match whole words e = CaselessKeyword("E") pi = CaselessKeyword("PI") # fnumber = Combine(Word("+-"+nums, nums) + # Optional("." + Optional(Word(nums))) + # Optional(e + Word("+-"+nums, nums))) # or use provided pyparsing_common.number, but convert back to str: # fnumber = ppc.number().addParseAction(lambda t: str(t[0])) fnumber = Regex(r"[+-]?\d+(?:\.\d*)?(?:[eE][+-]?\d+)?") ident = Word(alphas, alphanums + "_$") plus, minus, mult, div = map(Literal, "+-*/") lpar, rpar = map(Suppress, "()") addop = plus | minus multop = mult | div expop = Literal("^") expr = Forward() expr_list = delimitedList(Group(expr)) # add parse action that replaces the function identifier with a (name, number of args) tuple fn_call = (ident + lpar - Group(expr_list) + rpar).setParseAction( lambda t: t.insert(0, (t.pop(0), len(t[0]))) ) atom = ( addop[...] + ( (fn_call | pi | e | fnumber | ident).setParseAction(push_first) | Group(lpar + expr + rpar) ) ).setParseAction(push_unary_minus) # by defining exponentiation as "atom [ ^ factor ]..." instead of "atom [ ^ atom ]...", we get right-to-left # exponents, instead of left-to-right that is, 2^3^2 = 2^(3^2), not (2^3)^2. factor = Forward() factor <<= atom + (expop + factor).setParseAction(push_first)[...] term = factor + (multop + factor).setParseAction(push_first)[...] expr <<= term + (addop + term).setParseAction(push_first)[...] bnf = expr return bnf def evaluate_stack(s, stats): op, num_args = s.pop(), 0 if isinstance(op, tuple): op, num_args = op if op == "unary -": return -evaluate_stack(s, stats) if op in "+-*/^": # note: operands are pushed onto the stack in reverse order op2 = evaluate_stack(s, stats) op1 = evaluate_stack(s, stats) return opn[op](op1, op2) elif op == "PI": return math.pi # 3.1415926535 elif op == "E": return math.e # 2.718281828 elif op == "mean": return stats['mean'] elif op == "min": return stats['min'] elif op == "max": return stats['max'] elif op == "std": return stats['std'] elif op in fn: # note: args are pushed onto the stack in reverse order args = reversed([evaluate_stack(s, stats) for _ in range(num_args)]) return fn[op](*args) elif op[0].isalpha(): raise Exception("invalid identifier '%s'" % op) else: return float(op) def eval_fx(fx, stats): """Given fx and stats ('min', 'max', 'mean', 'std') return the result""" _ = BNF().parseString(fx, parseAll=True) val = evaluate_stack(exprStack[:], stats) return val
[((89, 8, 89, 28), 'pyparsing.CaselessKeyword', 'CaselessKeyword', ({(89, 24, 89, 27): '"""E"""'}, {}), "('E')", False, 'from pyparsing import Literal, Word, Group, Forward, alphas, alphanums, Regex, CaselessKeyword, Suppress, delimitedList\n'), ((90, 9, 90, 30), 'pyparsing.CaselessKeyword', 'CaselessKeyword', ({(90, 25, 90, 29): '"""PI"""'}, {}), "('PI')", False, 'from pyparsing import Literal, Word, Group, Forward, alphas, alphanums, Regex, CaselessKeyword, Suppress, delimitedList\n'), ((96, 14, 96, 59), 'pyparsing.Regex', 'Regex', ({(96, 20, 96, 58): '"""[+-]?\\\\d+(?:\\\\.\\\\d*)?(?:[eE][+-]?\\\\d+)?"""'}, {}), "('[+-]?\\\\d+(?:\\\\.\\\\d*)?(?:[eE][+-]?\\\\d+)?')", False, 'from pyparsing import Literal, Word, Group, Forward, alphas, alphanums, Regex, CaselessKeyword, Suppress, delimitedList\n'), ((97, 12, 97, 42), 'pyparsing.Word', 'Word', ({(97, 17, 97, 23): 'alphas', (97, 25, 97, 41): "alphanums + '_$'"}, {}), "(alphas, alphanums + '_$')", False, 'from pyparsing import Literal, Word, Group, Forward, alphas, alphanums, Regex, CaselessKeyword, Suppress, delimitedList\n'), ((103, 12, 103, 24), 'pyparsing.Literal', 'Literal', ({(103, 20, 103, 23): '"""^"""'}, {}), "('^')", False, 'from pyparsing import Literal, Word, Group, Forward, alphas, alphanums, Regex, CaselessKeyword, Suppress, delimitedList\n'), ((105, 11, 105, 20), 'pyparsing.Forward', 'Forward', ({}, {}), '()', False, 'from pyparsing import Literal, Word, Group, Forward, alphas, alphanums, Regex, CaselessKeyword, Suppress, delimitedList\n'), ((121, 13, 121, 22), 'pyparsing.Forward', 'Forward', ({}, {}), '()', False, 'from pyparsing import Literal, Word, Group, Forward, alphas, alphanums, Regex, CaselessKeyword, Suppress, delimitedList\n'), ((106, 30, 106, 41), 'pyparsing.Group', 'Group', ({(106, 36, 106, 40): 'expr'}, {}), '(expr)', False, 'from pyparsing import Literal, Word, Group, Forward, alphas, alphanums, Regex, CaselessKeyword, Suppress, delimitedList\n'), ((108, 30, 108, 46), 'pyparsing.Group', 'Group', ({(108, 36, 108, 45): 'expr_list'}, {}), '(expr_list)', False, 'from pyparsing import Literal, Word, Group, Forward, alphas, alphanums, Regex, CaselessKeyword, Suppress, delimitedList\n'), ((115, 14, 115, 39), 'pyparsing.Group', 'Group', ({(115, 20, 115, 38): 'lpar + expr + rpar'}, {}), '(lpar + expr + rpar)', False, 'from pyparsing import Literal, Word, Group, Forward, alphas, alphanums, Regex, CaselessKeyword, Suppress, delimitedList\n')]
pythonhacker/pyscanlogd
scanlogger.py
64d6ad38127243e5c422be7f899ecfa802e1ad21
# -- coding: utf-8 #!/usr/bin/env python """ pyscanlogger: Port scan detector/logger tool, inspired by scanlogd {http://www.openwall.com/scanlogd} but with added ability to log slow port-scans. Features 1. Detects all stealth (half-open) and full-connect scans. 2. Detects Idle scan and logs it correctly using correlation! 3. Detects SCTP scan. 4. Detects slow port-scans also. Modification History Mar 17 2010 - Cleaned up code to publish to google. Apr 8 2010 - Better detection of TCP full-connect scan without spurious and incorrect logging. Better logging functions. Licensed under GNU GPL v3.0. """ import sys, os import dpkt, pcap import struct import socket import time import threading import optparse import entry import timerlist __author__ = "pythonhacker" __maintainer__ = "pythonhacker" __version__ = '0.5.1' __modified__ = 'Thu Apr 8 19:21:11 IST 2010' # UDP - in progress... SCAN_TIMEOUT = 5 WEIGHT_THRESHOLD = 25 PIDFILE="/var/run/pyscanlogger.pid" # TCP flag constants TH_URG=dpkt.tcp.TH_URG TH_ACK=dpkt.tcp.TH_ACK TH_PSH=dpkt.tcp.TH_PUSH TH_RST=dpkt.tcp.TH_RST TH_SYN=dpkt.tcp.TH_SYN TH_FIN=dpkt.tcp.TH_FIN # Protocols TCP=dpkt.tcp.TCP UDP=dpkt.udp.UDP SCTP=dpkt.sctp.SCTP get_timestamp = lambda : time.strftime('%Y-%m-%d %H:%M:%S', time.localtime()) ip2quad = lambda x: socket.inet_ntoa(struct.pack('I', x)) scan_ip2quad = lambda scan: map(ip2quad, [scan.src, scan.dst]) class ScanLogger(object): """ Port scan detector/logger """ # TCP flags to scan type mapping scan_types = {0: 'TCP null', TH_FIN: 'TCP fin', TH_SYN: 'TCP syn', TH_SYN|TH_RST: 'TCP syn', TH_ACK: 'TCP ack', TH_URG|TH_PSH|TH_FIN: 'TCP x-mas', TH_URG|TH_PSH|TH_FIN|TH_ACK: 'TCP x-mas', TH_SYN|TH_FIN: 'TCP syn/fin', TH_FIN|TH_ACK: 'TCP fin/ack', TH_SYN|TH_ACK: 'TCP full-connect', TH_URG|TH_PSH|TH_ACK|TH_RST|TH_SYN|TH_FIN: 'TCP all-flags', TH_SYN|TH_ACK|TH_RST: 'TCP full-connect', # Not a scan TH_RST|TH_ACK: 'reply'} def __init__(self, timeout, threshold, maxsize, daemon=True, logfile='/var/log/scanlog'): self.scans = entry.EntryLog(maxsize) self.long_scans = entry.EntryLog(maxsize) # Port scan weight threshold self.threshold = threshold # Timeout for scan entries self.timeout = timeout # Long-period scan timeouts self.timeout_l = 3600 # Long-period scan threshold self.threshold_l = self.threshold/2 # Daemonize ? self.daemon = daemon # Log file try: self.scanlog = open(logfile,'a') print >> sys.stderr, 'Scan logs will be saved to %s' % logfile except (IOError, OSError), (errno, strerror): print >> sys.stderr, "Error opening scan log file %s => %s" % (logfile, strerror) self.scanlog = None # Recent scans - this list allows to keep scan information # upto last 'n' seconds, so as to not call duplicate scans # in the same time-period. 'n' is 60 sec by default. # Since entries time out in 60 seconds, max size is equal # to maximum such entries possible in 60 sec - assuming # a scan occurs at most every 5 seconds, this would be 12. self.recent_scans = timerlist.TimerList(12, 60.0) def hash_func(self, addr): """ Hash a host address """ value = addr h = 0 while value: # print value h ^= value value = value >> 9 return h & (8192-1) def mix(self, a, b, c): a -= b; a -= c; a ^= (c>>13) b -= c; b -= a; b ^= (a<<8) c -= a; c -= b; c ^= (b>>13) a -= b; a -= c; a ^= (c>>12) b -= c; b -= a; b ^= (a<<16) c -= a; c -= b; c ^= (b>>5) a -= b; a -= c; a ^= (c>>3) b -= c; b -= a; b ^= (a<<10) c -= a; c -= b; c ^= (b>>15) return abs(c) def host_hash(self, src, dst): """ Hash mix two host addresses """ return self.hash_func(self.mix(src, dst, 0xffffff)) def log(self, msg): """ Log a message to console and/or log file """ line = '[%s]: %s' % (get_timestamp(), msg) if self.scanlog: self.scanlog.write(line + '\n') self.scanlog.flush() if not self.daemon: print >> sys.stderr, line def log_scan(self, scan, continuation=False, slow_scan=False, unsure=False): """ Log the scan to file and/or console """ srcip, dstip = scan_ip2quad(scan) ports = ','.join([str(port) for port in scan.ports]) if not continuation: tup = [scan.type,scan.flags_or,srcip,dstip, ports] if not slow_scan: if scan.type != 'Idle': line = '%s scan (flags:%d) from %s to %s (ports:%s)' else: tup.append(ip2quad(scan.zombie)) line = '%s scan (flags: %d) from %s to %s (ports: %s) using zombie host %s' else: tup.append(scan.time_avg) if unsure: line = 'Possible slow %s scan (flags:%d) from %s to %s (ports:%s), average timediff %.2fs' else: line = 'Slow %s scan (flags:%d) from %s to %s (ports:%s), average timediff %.2fs' else: tup = [scan.type, srcip,dstip, ports] if not slow_scan: if scan.type != 'Idle': line = 'Continuation of %s scan from %s to %s (ports:%s)' else: tup.append(ip2quad(scan.zombie)) line = 'Continuation of %s scan from %s to %s (ports: %s) using zombie host %s' else: tup.append(scan.time_avg) line = 'Continuation of slow %s scan from %s to %s (ports:%s), average timediff %.2fs' msg = line % tuple(tup) self.log(msg) def update_ports(self, scan, dport, flags): scan.flags_or |= flags if dport in scan.ports: return # Add weight for port if dport < 1024: scan.weight += 3 else: scan.weight += 1 scan.ports.append(dport) def inspect_scan(self, scan, slow_scan=False): # Sure scan is_scan = ((slow_scan and scan.weight >= self.threshold_l) or (not slow_scan and scan.weight >= self.threshold)) # Possible scan maybe_scan = (slow_scan and len(scan.ports)>=3 and len(scan.timediffs)>=4 and (scan.weight < self.threshold_l)) not_scan = False if is_scan or maybe_scan: scan.logged = True if scan.proto==TCP: idle_scan = False if scan.flags_or==TH_RST: # None does scan using RST, however this could be # return packets from a zombie host to the scanning # host when a scanning host is doing an idle scan. # Basically # A -scanning host # B - zombie host # C - target host # If A does an idle scan on C with B as zombie, # it will appear to C as if B is syn scanning it # and later we could get an apparent RST "scan" # from B to A # Correlation: If 'RST scan' detected from X to Y # See if there was a SYN scan recently from host # X to host Z. Then actually Y is idle scanning # Z dummy_scans, idle_ports = [], [] for item in reversed(self.recent_scans): rscan = item[1] if rscan.src==scan.src and rscan.flags_or==TH_SYN and ((rscan.timestamp - scan.timestamp)<30): idle_scan = True idle_ports.append(rscan.ports) dummy_scans.append(item) if idle_scan: scan.src = scan.dst scan.dst = rscan.dst scan.zombie = rscan.src scan.type = 'Idle' scan.ports = idle_ports # for d in dummy_scans: # self.recent_scans.remove(d) else: # Remove entry if slow_scan: del self.long_scans[scan.hash] else: del self.scans[scan.hash] return False else: scan.type = self.scan_types.get(scan.flags_or,'unknown') if scan.type in ('', 'reply'): not_scan = True # If we see scan flags 22 from A->B, make sure that # there was no recent full-connect scan from B->A, if # so this is spurious and should be ignored. if scan.flags_or == (TH_SYN|TH_ACK|TH_RST) and len(self.recent_scans): recent1 = self.recent_scans[-1:-2:-1] for recent in recent1: # Was not a scan, skip if not recent.is_scan: continue if recent.type == 'TCP full-connect' and ((scan.src == recent.dst) and (scan.dst == recent.src)): # Spurious self.log("Ignoring spurious TCP full-connect scan from %s" % ' to '.join(scan_ip2quad(scan))) not_scan = True break # If this is a syn scan, see if there was a recent idle scan # with this as zombie, then ignore it... elif scan.flags_or == TH_SYN and len(self.recent_scans): # Try last 1 scans recent1 = self.recent_scans[-1:-2:-1] for recent in recent1: if recent.type=='Idle' and scan.src==recent.zombie: self.log('Ignoring mis-interpreted syn scan from zombie host %s' % ' to '.join(scan_ip2quad(scan))) break # Reply from B->A for full-connect scan from A->B elif (recent.type == 'reply' and ((scan.src == recent.dst) and (scan.dst == recent.src))): scan.type = 'TCP full-connect' break elif scan.proto==UDP: scan.type = 'UDP' # Reset flags for UDP scan scan.flags_or = 0 elif scan.proto==SCTP: if scan.chunk_type==1: scan.type = 'SCTP Init' elif scan.chunk_type==10: scan.type = 'SCTP COOKIE_ECHO' # See if this was logged recently scanentry = entry.RecentScanEntry(scan, not not_scan) if scanentry not in self.recent_scans: continuation=False self.recent_scans.append(scanentry) else: continuation=True if not not_scan: self.log_scan(scan, continuation=continuation, slow_scan=slow_scan, unsure=maybe_scan) # Remove entry if slow_scan: del self.long_scans[scan.hash] else: del self.scans[scan.hash] return True else: return False def process(self, pkt): if not hasattr(pkt, 'ip'): return ip = pkt.ip # Ignore non-tcp, non-udp packets if type(ip.data) not in (TCP, UDP, SCTP): return pload = ip.data src,dst,dport,flags = int(struct.unpack('I',ip.src)[0]),int(struct.unpack('I', ip.dst)[0]),int(pload.dport),0 proto = type(pload) if proto == TCP: flags = pload.flags key = self.host_hash(src,dst) curr=time.time() # Keep dropping old entries self.recent_scans.collect() if key in self.scans: scan = self.scans[key] if scan.src != src: # Skip packets in reverse direction or invalid protocol return timediff = curr - scan.timestamp # Update only if not too old, else skip and remove entry if (timediff > self.timeout): # Add entry in long_scans if timediff not larger # than longscan timeout prev = self.scans[key].timestamp if timediff<=self.timeout_l: if key not in self.long_scans: lscan = entry.ScanEntry(key) lscan.src = src lscan.dst = dst lscan.timestamp = curr lscan.timediffs.append(curr - prev) lscan.flags_or |= flags lscan.ports.append(dport) lscan.proto = proto self.long_scans[key] = lscan else: lscan = self.long_scans[key] lscan.timestamp = curr lscan.flags_or |= flags lscan.timediffs.append(curr - prev) lscan.update_time_sd() self.update_ports(lscan, dport, flags) if lscan.time_sd<2: # SD is less than 2, possible slow scan # update port weights... # print 'Weight=>',lscan.weight if not self.inspect_scan(lscan, True): # Not a scan, check # of entries - if too many # then this is a regular network activity # but not a scan, so remove entry if len(lscan.timediffs)>=10: # print lscan.src, lscan.timediffs, lscan.time_sd print 'Removing',key,lscan.src,'since not a scan' del self.long_scans[key] elif len(lscan.timediffs)>2: # More than 2 entries, but SD is too large, # delete the entry # print 'Removing',key,lscan.src,'since SD is',lscan.time_sd del self.long_scans[key] else: # Too large timeout, remove key del self.long_scans[key] del self.scans[key] return if scan.logged: return scan.timestamp = curr self.update_ports(scan, dport, flags) self.inspect_scan(scan) else: # Add new entry scan = entry.ScanEntry(key) scan.src = src scan.dst = dst scan.timestamp = curr scan.flags_or |= flags if proto==SCTP: scan.chunk_type = pload.chunks[0].type scan.ports.append(dport) scan.proto = proto self.scans[key] = scan def loop(self): pc = pcap.pcap() decode = { pcap.DLT_LOOP:dpkt.loopback.Loopback, pcap.DLT_NULL:dpkt.loopback.Loopback, pcap.DLT_EN10MB:dpkt.ethernet.Ethernet } [pc.datalink()] try: print 'listening on %s: %s' % (pc.name, pc.filter) for ts, pkt in pc: self.process(decode(pkt)) except KeyboardInterrupt: if not self.daemon: nrecv, ndrop, nifdrop = pc.stats() print '\n%d packets received by filter' % nrecv print '%d packets dropped by kernel' % ndrop def run_daemon(self): # Disconnect from tty try: pid = os.fork() if pid>0: sys.exit(0) except OSError, e: print >>sys.stderr, "fork #1 failed", e sys.exit(1) os.setsid() os.umask(0) # Second fork try: pid = os.fork() if pid>0: open(PIDFILE,'w').write(str(pid)) sys.exit(0) except OSError, e: print >>sys.stderr, "fork #2 failed", e sys.exit(1) self.loop() def run(self): # If dameon, then create a new thread and wait for it if self.daemon: print 'Daemonizing...' self.run_daemon() else: # Run in foreground self.loop() def main(): if os.geteuid() != 0: sys.exit("You must be super-user to run this program") o=optparse.OptionParser() o.add_option("-d", "--daemonize", dest="daemon", help="Daemonize", action="store_true", default=False) o.add_option("-f", "--logfile", dest="logfile", help="File to save logs to", default="/var/log/scanlog") options, args = o.parse_args() s=ScanLogger(SCAN_TIMEOUT, WEIGHT_THRESHOLD, 8192, options.daemon, options.logfile) s.run() if __name__ == '__main__': main()
[]
ashleylst/DSDmodel
src/util/util.py
4276c832e0335539aef2ae2b33e23719957a3f08
from itertools import combinations import copy def get_reverse(n): if n == 1: return 0 else: return 1 def get_edge_info(e): v = [0 for i in range(2)] n = [0 for i in range(2)] t = 0 for x in e: v[t], n[t] = x t += 1 return v, n def sort_e_by_domain(val): return val[0][1] def sort_by_strand(val): return val[0][0] def check_edge_in_tuplelist(edge, tpl): for i in tpl: if edge in i: return True return False def compare(a, b): return (a > b) - (a < b) def flip(i): if i == 0: i = 1 elif i == 1: i = 0 return i def get_free_domains(limits, blocks, bound): limits = sorted(limits) interval = limits[1] - limits[0] for i in blocks: if limits[1] > i > limits[0]: tmp = abs(bound - i) if tmp < interval: interval = tmp return interval def get_combinations(oldlen, newlen, cursor, indexlist): combold = list(combinations(indexlist[cursor:oldlen], 2)) combself = [(i, i) for i in range(0, oldlen)] combnew = [] if oldlen != newlen: for i in range(0, oldlen): for j in range(oldlen, newlen): combnew.append((i, j)) return combold + combnew + combself def get_migrate_nodes(edges, indices, startstrand): d = [] for i in indices: vi, ni = get_edge_info(edges[i][0]) if vi[0] == startstrand: d.append(ni[0]) else: d.append(ni[1]) d.sort() return d def check_following_migration(edges, p=0): """ :param edges: :return: """ e = copy.copy(edges) visited = [False for _ in e] miggroup = [] cnt = -1 for i in range(0, len(e)): if visited[i]: continue e[i] = list(e[i]) e[i][p] = list(e[i][p]) t1 = sorted(e[i][p], key=lambda tup: tup[0]) if not visited[i]: visited[i] = True miggroup.append([i]) cnt += 1 for j in range(0, len(e)): if j != i and not visited[j]: e[j] = list(e[j]) e[j][p] = list(e[j][p]) t2 = sorted(e[j][p], key=lambda tup: tup[0]) if (t2[0][0] != t1[0][0]) or (t2[1][0] != t1[1][0]): continue for num in range(0, len(miggroup[cnt])): t1 = sorted(e[miggroup[cnt][num]][p], key=lambda tup: tup[0]) if (t1[0][1] + 1 == t2[0][1] and t1[1][1] - 1 == t2[1][1]) \ or (t1[0][1] - 1 == t2[0][1] and t1[1][1] + 1 == t2[1][1]): visited[j] = True miggroup[cnt].append(j) break return miggroup def get_absdist(domain1, domain2): """ :param domain1: :param domain2: :return: """ return abs(domain1[1] - domain2[1]) def get_closet_domain_to_target(target, domains): """ :param target: :param domains: :return: """ closet = 10000 closetd = () for i in domains: dist = get_absdist(i, target) if dist < closet: closet = dist closetd = i return closetd def get_domains_on_2sides(target1, target2, domains1, domains2): """ :param target1: :param target2: :param domains1: :param domains2: :return: """ if target1[0] == domains1[0][0]: closetd1 = get_closet_domain_to_target(target1, domains1) elif target2[0] == domains1[0][0]: closetd1 = get_closet_domain_to_target(target2, domains1) if target1[0] == domains2[0][0]: closetd2 = get_closet_domain_to_target(target1, domains2) elif target2[0] == domains2[0][0]: closetd2 = get_closet_domain_to_target(target2, domains2) return closetd1, closetd2 def get_closest_target(domains, targets): """ :return: """ domains = sorted(domains, key=lambda tup: tup[1]) mindist = 10000 mint = None for t in targets: dist = min(get_absdist(t, domains[0]), get_absdist(t, domains[len(domains) - 1])) if dist < mindist: mint = t return mint def check_continuity(a, b): for i in a: for j in b: if i + 1 == j or i - 1 == j: return i, j return None def check_bond_existence(d1, d2, l1, l2): for i in range(len(l1)): if d1 == l1[i] and d2 == l2[i]: return True return False
[((91, 8, 91, 24), 'copy.copy', 'copy.copy', ({(91, 18, 91, 23): 'edges'}, {}), '(edges)', False, 'import copy\n'), ((62, 19, 62, 60), 'itertools.combinations', 'combinations', ({(62, 32, 62, 56): 'indexlist[cursor:oldlen]', (62, 58, 62, 59): '2'}, {}), '(indexlist[cursor:oldlen], 2)', False, 'from itertools import combinations\n')]
ebell495/nn_pruning
examples/question_answering/qa_sparse_train.py
41263ab898117a639f3f219c23a4cecc8bc0e3f3
# coding=utf-8 # Copyright 2020 The HuggingFace Team All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Sparse Fine-tuning the library models for question answering. """ # You can also adapt this script on your own question answering task. Pointers for this are left as comments. from nn_pruning.sparse_trainer import SparseTrainer from .qa_train import QATrainer # SparseTrainer should appear first in the base classes, as its functions must override QATrainer and its base classes (Trainer) class QASparseTrainer(SparseTrainer, QATrainer): def __init__(self, sparse_args, *args, **kwargs): QATrainer.__init__(self, *args, **kwargs) SparseTrainer.__init__(self, sparse_args)
[((27, 8, 27, 49), 'nn_pruning.sparse_trainer.SparseTrainer.__init__', 'SparseTrainer.__init__', ({(27, 31, 27, 35): 'self', (27, 37, 27, 48): 'sparse_args'}, {}), '(self, sparse_args)', False, 'from nn_pruning.sparse_trainer import SparseTrainer\n')]
armohamm/ironic
ironic/drivers/modules/ilo/raid.py
21093ca886ed736a7a25bf5e71e05d41e132fd2f
# Copyright 2018 Hewlett Packard Enterprise Development LP # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ iLO5 RAID specific methods """ from ironic_lib import metrics_utils from oslo_log import log as logging from oslo_utils import importutils from ironic.common import exception from ironic.common.i18n import _ from ironic.common import raid from ironic.common import states from ironic.conductor import utils as manager_utils from ironic import conf from ironic.drivers import base from ironic.drivers.modules import deploy_utils from ironic.drivers.modules.ilo import common as ilo_common LOG = logging.getLogger(__name__) CONF = conf.CONF METRICS = metrics_utils.get_metrics_logger(__name__) ilo_error = importutils.try_import('proliantutils.exception') class Ilo5RAID(base.RAIDInterface): """Implementation of OOB RAIDInterface for iLO5.""" def get_properties(self): """Return the properties of the interface.""" return ilo_common.REQUIRED_PROPERTIES def _set_clean_failed(self, task, msg, exc): LOG.error("RAID configuration job failed for node %(node)s. " "Message: '%(message)s'.", {'node': task.node.uuid, 'message': msg}) task.node.last_error = msg task.process_event('fail') def _set_driver_internal_true_value(self, task, *keys): driver_internal_info = task.node.driver_internal_info for key in keys: driver_internal_info[key] = True task.node.driver_internal_info = driver_internal_info task.node.save() def _set_driver_internal_false_value(self, task, *keys): driver_internal_info = task.node.driver_internal_info for key in keys: driver_internal_info[key] = False task.node.driver_internal_info = driver_internal_info task.node.save() def _pop_driver_internal_values(self, task, *keys): driver_internal_info = task.node.driver_internal_info for key in keys: driver_internal_info.pop(key, None) task.node.driver_internal_info = driver_internal_info task.node.save() def _prepare_for_read_raid(self, task, raid_step): deploy_opts = deploy_utils.build_agent_options(task.node) task.driver.boot.prepare_ramdisk(task, deploy_opts) manager_utils.node_power_action(task, states.REBOOT) if raid_step == 'create_raid': self._set_driver_internal_true_value( task, 'ilo_raid_create_in_progress') else: self._set_driver_internal_true_value( task, 'ilo_raid_delete_in_progress') self._set_driver_internal_true_value(task, 'cleaning_reboot') self._set_driver_internal_false_value(task, 'skip_current_clean_step') @METRICS.timer('Ilo5RAID.create_configuration') @base.clean_step(priority=0, abortable=False, argsinfo={ 'create_root_volume': { 'description': ( 'This specifies whether to create the root volume. ' 'Defaults to `True`.' ), 'required': False }, 'create_nonroot_volumes': { 'description': ( 'This specifies whether to create the non-root volumes. ' 'Defaults to `True`.' ), 'required': False } }) def create_configuration(self, task, create_root_volume=True, create_nonroot_volumes=True): """Create a RAID configuration on a bare metal using agent ramdisk. This method creates a RAID configuration on the given node. :param task: a TaskManager instance. :param create_root_volume: If True, a root volume is created during RAID configuration. Otherwise, no root volume is created. Default is True. :param create_nonroot_volumes: If True, non-root volumes are created. If False, no non-root volumes are created. Default is True. :raises: MissingParameterValue, if node.target_raid_config is missing or was found to be empty after skipping root volume and/or non-root volumes. :raises: NodeCleaningFailure, on failure to execute step. """ node = task.node target_raid_config = raid.filter_target_raid_config( node, create_root_volume=create_root_volume, create_nonroot_volumes=create_nonroot_volumes) driver_internal_info = node.driver_internal_info driver_internal_info['target_raid_config'] = target_raid_config LOG.debug("Calling OOB RAID create_configuration for node %(node)s " "with the following target RAID configuration: %(target)s", {'node': node.uuid, 'target': target_raid_config}) ilo_object = ilo_common.get_ilo_object(node) try: # Raid configuration in progress, checking status if not driver_internal_info.get('ilo_raid_create_in_progress'): ilo_object.create_raid_configuration(target_raid_config) self._prepare_for_read_raid(task, 'create_raid') return states.CLEANWAIT else: # Raid configuration is done, updating raid_config raid_conf = ( ilo_object.read_raid_configuration( raid_config=target_raid_config)) if len(raid_conf['logical_disks']): raid.update_raid_info(node, raid_conf) LOG.debug("Node %(uuid)s raid create clean step is done.", {'uuid': node.uuid}) self._pop_driver_internal_values( task, 'ilo_raid_create_in_progress', 'cleaning_reboot', 'skip_current_clean_step') node.driver_internal_info = driver_internal_info node.save() else: # Raid configuration failed msg = "Unable to create raid" self._pop_driver_internal_values( task, 'ilo_raid_create_in_progress', 'cleaning_reboot', 'skip_current_clean_step') node.driver_internal_info = driver_internal_info node.save() raise exception.NodeCleaningFailure( "Clean step create_configuration failed " "on node %(node)s with error: %(err)s" % {'node': node.uuid, 'err': msg}) except ilo_error.IloError as ilo_exception: operation = (_("Failed to create raid configuration on node %s") % node.uuid) self._pop_driver_internal_values(task, 'ilo_raid_create_in_progress', 'cleaning_reboot', 'skip_current_clean_step') node.driver_internal_info = driver_internal_info node.save() self._set_clean_failed(task, operation, ilo_exception) @METRICS.timer('Ilo5RAID.delete_configuration') @base.clean_step(priority=0, abortable=False) def delete_configuration(self, task): """Delete the RAID configuration. :param task: a TaskManager instance containing the node to act on. :raises: NodeCleaningFailure, on failure to execute step. """ node = task.node LOG.debug("OOB RAID delete_configuration invoked for node %s.", node.uuid) driver_internal_info = node.driver_internal_info ilo_object = ilo_common.get_ilo_object(node) try: # Raid configuration in progress, checking status if not driver_internal_info.get('ilo_raid_delete_in_progress'): ilo_object.delete_raid_configuration() self._prepare_for_read_raid(task, 'delete_raid') return states.CLEANWAIT else: # Raid configuration is done, updating raid_config raid_conf = ilo_object.read_raid_configuration() if not len(raid_conf['logical_disks']): node.raid_config = {} LOG.debug("Node %(uuid)s raid delete clean step is done.", {'uuid': node.uuid}) self._pop_driver_internal_values( task, 'ilo_raid_delete_in_progress', 'cleaning_reboot', 'skip_current_clean_step') node.driver_internal_info = driver_internal_info node.save() else: # Raid configuration failed msg = ("Unable to delete this logical disks: %s" % raid_conf['logical_disks']) self._pop_driver_internal_values( task, 'ilo_raid_delete_in_progress', 'cleaning_reboot', 'skip_current_clean_step') node.driver_internal_info = driver_internal_info node.save() raise exception.NodeCleaningFailure( "Clean step delete_configuration failed " "on node %(node)s with error: %(err)s" % {'node': node.uuid, 'err': msg}) except ilo_error.IloLogicalDriveNotFoundError: LOG.info("No logical drive found to delete on node %(node)s", {'node': node.uuid}) except ilo_error.IloError as ilo_exception: operation = (_("Failed to delete raid configuration on node %s") % node.uuid) self._pop_driver_internal_values(task, 'ilo_raid_delete_in_progress', 'cleaning_reboot', 'skip_current_clean_step') node.driver_internal_info = driver_internal_info node.save() self._set_clean_failed(task, operation, ilo_exception)
[((34, 6, 34, 33), 'oslo_log.log.getLogger', 'logging.getLogger', ({(34, 24, 34, 32): '__name__'}, {}), '(__name__)', True, 'from oslo_log import log as logging\n'), ((36, 10, 36, 52), 'ironic_lib.metrics_utils.get_metrics_logger', 'metrics_utils.get_metrics_logger', ({(36, 43, 36, 51): '__name__'}, {}), '(__name__)', False, 'from ironic_lib import metrics_utils\n'), ((38, 12, 38, 61), 'oslo_utils.importutils.try_import', 'importutils.try_import', ({(38, 35, 38, 60): '"""proliantutils.exception"""'}, {}), "('proliantutils.exception')", False, 'from oslo_utils import importutils\n'), ((90, 5, 105, 6), 'ironic.drivers.base.clean_step', 'base.clean_step', (), '', False, 'from ironic.drivers import base\n'), ((179, 5, 179, 49), 'ironic.drivers.base.clean_step', 'base.clean_step', (), '', False, 'from ironic.drivers import base\n'), ((77, 22, 77, 65), 'ironic.drivers.modules.deploy_utils.build_agent_options', 'deploy_utils.build_agent_options', ({(77, 55, 77, 64): 'task.node'}, {}), '(task.node)', False, 'from ironic.drivers.modules import deploy_utils\n'), ((79, 8, 79, 60), 'ironic.conductor.utils.node_power_action', 'manager_utils.node_power_action', ({(79, 40, 79, 44): 'task', (79, 46, 79, 59): 'states.REBOOT'}, {}), '(task, states.REBOOT)', True, 'from ironic.conductor import utils as manager_utils\n'), ((125, 29, 127, 58), 'ironic.common.raid.filter_target_raid_config', 'raid.filter_target_raid_config', (), '', False, 'from ironic.common import raid\n'), ((133, 21, 133, 52), 'ironic.drivers.modules.ilo.common.get_ilo_object', 'ilo_common.get_ilo_object', ({(133, 47, 133, 51): 'node'}, {}), '(node)', True, 'from ironic.drivers.modules.ilo import common as ilo_common\n'), ((190, 21, 190, 52), 'ironic.drivers.modules.ilo.common.get_ilo_object', 'ilo_common.get_ilo_object', ({(190, 47, 190, 51): 'node'}, {}), '(node)', True, 'from ironic.drivers.modules.ilo import common as ilo_common\n'), ((147, 20, 147, 58), 'ironic.common.raid.update_raid_info', 'raid.update_raid_info', ({(147, 42, 147, 46): 'node', (147, 48, 147, 57): 'raid_conf'}, {}), '(node, raid_conf)', False, 'from ironic.common import raid\n'), ((163, 26, 166, 56), 'ironic.common.exception.NodeCleaningFailure', 'exception.NodeCleaningFailure', ({(164, 24, 166, 55): "('Clean step create_configuration failed on node %(node)s with error: %(err)s'\n % {'node': node.uuid, 'err': msg})"}, {}), "(\n 'Clean step create_configuration failed on node %(node)s with error: %(err)s'\n % {'node': node.uuid, 'err': msg})", False, 'from ironic.common import exception\n'), ((168, 25, 168, 76), 'ironic.common.i18n._', '_', ({(168, 27, 168, 75): '"""Failed to create raid configuration on node %s"""'}, {}), "('Failed to create raid configuration on node %s')", False, 'from ironic.common.i18n import _\n'), ((219, 26, 222, 56), 'ironic.common.exception.NodeCleaningFailure', 'exception.NodeCleaningFailure', ({(220, 24, 222, 55): "('Clean step delete_configuration failed on node %(node)s with error: %(err)s'\n % {'node': node.uuid, 'err': msg})"}, {}), "(\n 'Clean step delete_configuration failed on node %(node)s with error: %(err)s'\n % {'node': node.uuid, 'err': msg})", False, 'from ironic.common import exception\n'), ((227, 25, 227, 76), 'ironic.common.i18n._', '_', ({(227, 27, 227, 75): '"""Failed to delete raid configuration on node %s"""'}, {}), "('Failed to delete raid configuration on node %s')", False, 'from ironic.common.i18n import _\n')]
groboclown/petronia
src/petronia/aid/bootstrap/__init__.py
486338023d19cee989e92f0c5692680f1a37811f
""" Common Petronia imports for bootstrap parts of an extension. This should be imported along with the `simp` module. """ from ...base.bus import ( EventBus, ListenerRegistrar, ListenerSetup, QueuePriority, ExtensionMetadataStruct, register_event, EVENT_WILDCARD, TARGET_WILDCARD, QUEUE_EVENT_NORMAL, QUEUE_EVENT_HIGH, QUEUE_EVENT_IO, QUEUE_EVENT_TYPES ) from ...base.participant import ( create_singleton_identity, NOT_PARTICIPANT, ) from ...base.events import ( # These are generally just bootstrap events. DisposeCompleteEvent, as_dispose_complete_listener, RequestDisposeEvent, as_request_dispose_listener, SystemStartedEvent, as_system_started_listener, ) from ...base.events.bus import ( EventProtectionModel, GLOBAL_EVENT_PROTECTION, INTERNAL_EVENT_PROTECTION, PRODUCE_EVENT_PROTECTION, CONSUME_EVENT_PROTECTION, REQUEST_EVENT_PROTECTION, RESPONSE_EVENT_PROTECTION, ) from ...core.extensions.api import ANY_VERSION from ...core.shutdown.api import ( SystemShutdownEvent, as_system_shutdown_listener, SystemShutdownFinalizeEvent, as_system_shutdown_finalize_listener, TARGET_ID_SYSTEM_SHUTDOWN, )
[]
zachjweiner/pyopencl
examples/dump-properties.py
4e2e4f3150c331680e6d9e36c59290411e4a0c40
import pyopencl as cl from optparse import OptionParser parser = OptionParser() parser.add_option("-s", "--short", action="store_true", help="don't print all device properties") (options, args) = parser.parse_args() def print_info(obj, info_cls): for info_name in sorted(dir(info_cls)): if not info_name.startswith("_") and info_name != "to_string": info = getattr(info_cls, info_name) try: info_value = obj.get_info(info) except: info_value = "<error>" if (info_cls == cl.device_info and info_name == "PARTITION_TYPES_EXT" and isinstance(info_value, list)): print("{}: {}".format(info_name, [ cl.device_partition_property_ext.to_string(v, "<unknown device partition property %d>") for v in info_value])) else: try: print(f"{info_name}: {info_value}") except: print("%s: <error>" % info_name) for platform in cl.get_platforms(): print(75*"=") print(platform) print(75*"=") if not options.short: print_info(platform, cl.platform_info) for device in platform.get_devices(): if not options.short: print(75*"-") print(device) if not options.short: print(75*"-") print_info(device, cl.device_info) ctx = cl.Context([device]) for mf in [ cl.mem_flags.READ_ONLY, #cl.mem_flags.READ_WRITE, #cl.mem_flags.WRITE_ONLY ]: for itype in [ cl.mem_object_type.IMAGE2D, cl.mem_object_type.IMAGE3D ]: try: formats = cl.get_supported_image_formats(ctx, mf, itype) except: formats = "<error>" else: def str_chd_type(chdtype): result = cl.channel_type.to_string(chdtype, "<unknown channel data type %d>") result = result.replace("_INT", "") result = result.replace("UNSIGNED", "U") result = result.replace("SIGNED", "S") result = result.replace("NORM", "N") result = result.replace("FLOAT", "F") return result formats = ", ".join( "{}-{}".format( cl.channel_order.to_string(iform.channel_order, "<unknown channel order 0x%x>"), str_chd_type(iform.channel_data_type)) for iform in formats) print("{} {} FORMATS: {}\n".format( cl.mem_object_type.to_string(itype), cl.mem_flags.to_string(mf), formats)) del ctx
[((4, 9, 4, 23), 'optparse.OptionParser', 'OptionParser', ({}, {}), '()', False, 'from optparse import OptionParser\n'), ((32, 16, 32, 34), 'pyopencl.get_platforms', 'cl.get_platforms', ({}, {}), '()', True, 'import pyopencl as cl\n'), ((46, 18, 46, 38), 'pyopencl.Context', 'cl.Context', ({(46, 29, 46, 37): '[device]'}, {}), '([device])', True, 'import pyopencl as cl\n'), ((57, 34, 57, 80), 'pyopencl.get_supported_image_formats', 'cl.get_supported_image_formats', ({(57, 65, 57, 68): 'ctx', (57, 70, 57, 72): 'mf', (57, 74, 57, 79): 'itype'}, {}), '(ctx, mf, itype)', True, 'import pyopencl as cl\n'), ((23, 20, 24, 65), 'pyopencl.device_partition_property_ext.to_string', 'cl.device_partition_property_ext.to_string', ({(23, 63, 23, 64): 'v', (24, 24, 24, 64): '"""<unknown device partition property %d>"""'}, {}), "(v,\n '<unknown device partition property %d>')", True, 'import pyopencl as cl\n'), ((62, 37, 63, 69), 'pyopencl.channel_type.to_string', 'cl.channel_type.to_string', ({(62, 63, 62, 70): 'chdtype', (63, 36, 63, 68): '"""<unknown channel data type %d>"""'}, {}), "(chdtype, '<unknown channel data type %d>')", True, 'import pyopencl as cl\n'), ((80, 28, 80, 63), 'pyopencl.mem_object_type.to_string', 'cl.mem_object_type.to_string', ({(80, 57, 80, 62): 'itype'}, {}), '(itype)', True, 'import pyopencl as cl\n'), ((81, 28, 81, 54), 'pyopencl.mem_flags.to_string', 'cl.mem_flags.to_string', ({(81, 51, 81, 53): 'mf'}, {}), '(mf)', True, 'import pyopencl as cl\n'), ((74, 36, 75, 71), 'pyopencl.channel_order.to_string', 'cl.channel_order.to_string', ({(74, 63, 74, 82): 'iform.channel_order', (75, 40, 75, 70): '"""<unknown channel order 0x%x>"""'}, {}), "(iform.channel_order, '<unknown channel order 0x%x>')", True, 'import pyopencl as cl\n')]
jgillis/acados
interfaces/acados_template/acados_template/acados_ocp_solver.py
3119e2dda636a8358fbd52247eb0163a167cbc97
# -*- coding: future_fstrings -*- # # Copyright 2019 Gianluca Frison, Dimitris Kouzoupis, Robin Verschueren, # Andrea Zanelli, Niels van Duijkeren, Jonathan Frey, Tommaso Sartor, # Branimir Novoselnik, Rien Quirynen, Rezart Qelibari, Dang Doan, # Jonas Koenemann, Yutao Chen, Tobias Schöls, Jonas Schlagenhauf, Moritz Diehl # # This file is part of acados. # # The 2-Clause BSD License # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE.; # import sys, os, json import numpy as np from ctypes import * from casadi import CasadiMeta, Function, SX from copy import deepcopy from .generate_c_code_explicit_ode import generate_c_code_explicit_ode from .generate_c_code_implicit_ode import generate_c_code_implicit_ode from .generate_c_code_gnsf import generate_c_code_gnsf from .generate_c_code_constraint import generate_c_code_constraint from .generate_c_code_nls_cost import generate_c_code_nls_cost from .generate_c_code_external_cost import generate_c_code_external_cost from .acados_ocp import AcadosOcp from .acados_model import acados_model_strip_casadi_symbolics from .utils import is_column, is_empty, casadi_length, render_template, acados_class2dict,\ format_class_dict, ocp_check_against_layout, np_array_to_list, make_model_consistent,\ set_up_imported_gnsf_model def make_ocp_dims_consistent(acados_ocp): dims = acados_ocp.dims cost = acados_ocp.cost constraints = acados_ocp.constraints model = acados_ocp.model opts = acados_ocp.solver_options # nx if is_column(model.x): dims.nx = casadi_length(model.x) else: raise Exception('model.x should be column vector!') # nu if is_empty(model.u): dims.nu = 0 else: dims.nu = casadi_length(model.u) # nz if is_empty(model.z): dims.nz = 0 else: dims.nz = casadi_length(model.z) # np if is_empty(model.p): dims.np = 0 else: dims.np = casadi_length(model.p) if acados_ocp.parameter_values.shape[0] != dims.np: raise Exception('inconsistent dimension np, regarding model.p and parameter_values.') ## cost # path if cost.cost_type == 'LINEAR_LS': ny = cost.W.shape[0] if cost.Vx.shape[0] != ny or cost.Vu.shape[0] != ny: raise Exception('inconsistent dimension ny, regarding W, Vx, Vu.' + \ f'\nGot W[{cost.W.shape}], Vx[{cost.Vx.shape}], Vu[{cost.Vu.shape}]\n') if dims.nz != 0 and cost.Vz.shape[0] != ny: raise Exception('inconsistent dimension ny, regarding W, Vx, Vu, Vz.' + \ f'\nGot W[{cost.W.shape}], Vx[{cost.Vx.shape}], Vu[{cost.Vu.shape}], Vz[{cost.Vz.shape}]\n') if cost.Vx.shape[1] != dims.nx and ny != 0: raise Exception('inconsistent dimension: Vx should have nx columns.') if cost.Vu.shape[1] != dims.nu and ny != 0: raise Exception('inconsistent dimension: Vu should have nu columns.') if cost.yref.shape[0] != ny: raise Exception('inconsistent dimension: regarding W, yref.' + \ f'\nGot W[{cost.W.shape}], yref[{cost.yref.shape}]\n') dims.ny = ny elif cost.cost_type == 'NONLINEAR_LS': ny = cost.W.shape[0] if is_empty(model.cost_y_expr) and ny != 0: raise Exception('inconsistent dimension ny: regarding W, cost_y_expr.') elif casadi_length(model.cost_y_expr) != ny: raise Exception('inconsistent dimension ny: regarding W, cost_y_expr.') if cost.yref.shape[0] != ny: raise Exception('inconsistent dimension: regarding W, yref.' + \ f'\nGot W[{cost.W.shape}], yref[{cost.yref.shape}]\n') dims.ny = ny # terminal if cost.cost_type_e == 'LINEAR_LS': ny_e = cost.W_e.shape[0] if cost.Vx_e.shape[0] != ny_e: raise Exception('inconsistent dimension ny_e: regarding W_e, cost_y_expr_e.' + \ f'\nGot W_e[{cost.W_e.shape}], Vx_e[{cost.Vx_e.shape}]') if cost.Vx_e.shape[1] != dims.nx and ny_e != 0: raise Exception('inconsistent dimension: Vx_e should have nx columns.') if cost.yref_e.shape[0] != ny_e: raise Exception('inconsistent dimension: regarding W_e, yref_e.') dims.ny_e = ny_e elif cost.cost_type_e == 'NONLINEAR_LS': ny_e = cost.W_e.shape[0] if is_empty(model.cost_y_expr_e) and ny_e != 0: raise Exception('inconsistent dimension ny_e: regarding W_e, cost_y_expr_e.') elif casadi_length(model.cost_y_expr_e) != ny_e: raise Exception('inconsistent dimension ny_e: regarding W_e, cost_y_expr_e.') if cost.yref_e.shape[0] != ny_e: raise Exception('inconsistent dimension: regarding W_e, yref_e.') dims.ny_e = ny_e ## constraints # initial if (constraints.lbx_0 == [] and constraints.ubx_0 == []): dims.nbx_0 = 0 else: this_shape = constraints.lbx_0.shape other_shape = constraints.ubx_0.shape if not this_shape == other_shape: raise Exception('lbx_0, ubx_0 have different shapes!') if not is_column(constraints.lbx_0): raise Exception('lbx_0, ubx_0 must be column vectors!') dims.nbx_0 = constraints.lbx_0.size if all(constraints.lbx_0 == constraints.ubx_0): dims.nbxe_0 = dims.nbx_0 # path nbx = constraints.idxbx.shape[0] if constraints.ubx.shape[0] != nbx or constraints.lbx.shape[0] != nbx: raise Exception('inconsistent dimension nbx, regarding idxbx, ubx, lbx.') else: dims.nbx = nbx nbu = constraints.idxbu.shape[0] if constraints.ubu.shape[0] != nbu or constraints.lbu.shape[0] != nbu: raise Exception('inconsistent dimension nbu, regarding idxbu, ubu, lbu.') else: dims.nbu = nbu ng = constraints.lg.shape[0] if constraints.ug.shape[0] != ng or constraints.C.shape[0] != ng \ or constraints.D.shape[0] != ng: raise Exception('inconsistent dimension ng, regarding lg, ug, C, D.') else: dims.ng = ng if not is_empty(model.con_h_expr): nh = casadi_length(model.con_h_expr) else: nh = 0 if constraints.uh.shape[0] != nh or constraints.lh.shape[0] != nh: raise Exception('inconsistent dimension nh, regarding lh, uh, con_h_expr.') else: dims.nh = nh if is_empty(model.con_phi_expr): dims.nphi = 0 dims.nr = 0 else: dims.nphi = casadi_length(model.con_phi_expr) if is_empty(model.con_r_expr): raise Exception('convex over nonlinear constraints: con_r_expr but con_phi_expr is nonempty') else: dims.nr = casadi_length(model.con_r_expr) # terminal nbx_e = constraints.idxbx_e.shape[0] if constraints.ubx_e.shape[0] != nbx_e or constraints.lbx_e.shape[0] != nbx_e: raise Exception('inconsistent dimension nbx_e, regarding idxbx_e, ubx_e, lbx_e.') else: dims.nbx_e = nbx_e ng_e = constraints.lg_e.shape[0] if constraints.ug_e.shape[0] != ng_e or constraints.C_e.shape[0] != ng_e: raise Exception('inconsistent dimension ng_e, regarding_e lg_e, ug_e, C_e.') else: dims.ng_e = ng_e if not is_empty(model.con_h_expr_e): nh_e = casadi_length(model.con_h_expr_e) else: nh_e = 0 if constraints.uh_e.shape[0] != nh_e or constraints.lh_e.shape[0] != nh_e: raise Exception('inconsistent dimension nh_e, regarding lh_e, uh_e, con_h_expr_e.') else: dims.nh_e = nh_e if is_empty(model.con_phi_expr_e): dims.nphi_e = 0 dims.nr_e = 0 else: dims.nphi_e = casadi_length(model.con_phi_expr_e) if is_empty(model.con_r_expr_e): raise Exception('convex over nonlinear constraints: con_r_expr_e but con_phi_expr_e is nonempty') else: dims.nr_e = casadi_length(model.con_r_expr_e) # Slack dimensions nsbx = constraints.idxsbx.shape[0] if is_empty(constraints.lsbx): constraints.lsbx = np.zeros((nsbx,)) elif constraints.lsbx.shape[0] != nsbx: raise Exception('inconsistent dimension nsbx, regarding idxsbx, lsbx.') if is_empty(constraints.usbx): constraints.usbx = np.zeros((nsbx,)) elif constraints.usbx.shape[0] != nsbx: raise Exception('inconsistent dimension nsbx, regarding idxsbx, usbx.') dims.nsbx = nsbx nsbu = constraints.idxsbu.shape[0] if is_empty(constraints.lsbu): constraints.lsbu = np.zeros((nsbu,)) elif constraints.lsbu.shape[0] != nsbu: raise Exception('inconsistent dimension nsbu, regarding idxsbu, lsbu.') if is_empty(constraints.usbu): constraints.usbu = np.zeros((nsbu,)) elif constraints.usbu.shape[0] != nsbu: raise Exception('inconsistent dimension nsbu, regarding idxsbu, usbu.') dims.nsbu = nsbu nsh = constraints.idxsh.shape[0] if is_empty(constraints.lsh): constraints.lsh = np.zeros((nsh,)) elif constraints.lsh.shape[0] != nsh: raise Exception('inconsistent dimension nsh, regarding idxsh, lsh.') if is_empty(constraints.ush): constraints.ush = np.zeros((nsh,)) elif constraints.ush.shape[0] != nsh: raise Exception('inconsistent dimension nsh, regarding idxsh, ush.') dims.nsh = nsh nsphi = constraints.idxsphi.shape[0] if is_empty(constraints.lsphi): constraints.lsphi = np.zeros((nsphi,)) elif constraints.lsphi.shape[0] != nsphi: raise Exception('inconsistent dimension nsphi, regarding idxsphi, lsphi.') if is_empty(constraints.usphi): constraints.usphi = np.zeros((nsphi,)) elif constraints.usphi.shape[0] != nsphi: raise Exception('inconsistent dimension nsphi, regarding idxsphi, usphi.') dims.nsphi = nsphi nsg = constraints.idxsg.shape[0] if is_empty(constraints.lsg): constraints.lsg = np.zeros((nsg,)) elif constraints.lsg.shape[0] != nsg: raise Exception('inconsistent dimension nsg, regarding idxsg, lsg.') if is_empty(constraints.usg): constraints.usg = np.zeros((nsg,)) elif constraints.usg.shape[0] != nsg: raise Exception('inconsistent dimension nsg, regarding idxsg, usg.') dims.nsg = nsg ns = nsbx + nsbu + nsh + nsg + nsphi wrong_field = "" if cost.Zl.shape[0] != ns: wrong_field = "Zl" dim = cost.Zl.shape[0] elif cost.Zu.shape[0] != ns: wrong_field = "Zu" dim = cost.Zu.shape[0] elif cost.zl.shape[0] != ns: wrong_field = "zl" dim = cost.zl.shape[0] elif cost.zu.shape[0] != ns: wrong_field = "zu" dim = cost.zu.shape[0] if wrong_field != "": raise Exception(f'Inconsistent size for field {wrong_field}, with dimension {dim}, \n\t'\ + f'Detected ns = {ns} = nsbx + nsbu + nsg + nsh + nsphi.\n\t'\ + f'With nsbx = {nsbx}, nsbu = {nsbu}, nsg = {nsg}, nsh = {nsh}, nsphi = {nsphi}') dims.ns = ns nsbx_e = constraints.idxsbx_e.shape[0] if is_empty(constraints.lsbx_e): constraints.lsbx_e = np.zeros((nsbx_e,)) elif constraints.lsbx_e.shape[0] != nsbx_e: raise Exception('inconsistent dimension nsbx_e, regarding idxsbx_e, lsbx_e.') if is_empty(constraints.usbx_e): constraints.usbx_e = np.zeros((nsbx_e,)) elif constraints.usbx_e.shape[0] != nsbx_e: raise Exception('inconsistent dimension nsbx_e, regarding idxsbx_e, usbx_e.') dims.nsbx_e = nsbx_e nsh_e = constraints.idxsh_e.shape[0] if is_empty(constraints.lsh_e): constraints.lsh_e = np.zeros((nsh_e,)) elif constraints.lsh_e.shape[0] != nsh_e: raise Exception('inconsistent dimension nsh_e, regarding idxsh_e, lsh_e.') if is_empty(constraints.ush_e): constraints.ush_e = np.zeros((nsh_e,)) elif constraints.ush_e.shape[0] != nsh_e: raise Exception('inconsistent dimension nsh_e, regarding idxsh_e, ush_e.') dims.nsh_e = nsh_e nsg_e = constraints.idxsg_e.shape[0] if is_empty(constraints.lsg_e): constraints.lsg_e = np.zeros((nsg_e,)) elif constraints.lsg_e.shape[0] != nsg_e: raise Exception('inconsistent dimension nsg_e, regarding idxsg_e, lsg_e.') if is_empty(constraints.usg_e): constraints.usg_e = np.zeros((nsg_e,)) elif constraints.usg_e.shape[0] != nsg_e: raise Exception('inconsistent dimension nsg_e, regarding idxsg_e, usg_e.') dims.nsg_e = nsg_e nsphi_e = constraints.idxsphi_e.shape[0] if is_empty(constraints.lsphi_e): constraints.lsphi_e = np.zeros((nsphi_e,)) elif constraints.lsphi_e.shape[0] != nsphi_e: raise Exception('inconsistent dimension nsphi_e, regarding idxsphi_e, lsphi_e.') if is_empty(constraints.usphi_e): constraints.usphi_e = np.zeros((nsphi_e,)) elif constraints.usphi_e.shape[0] != nsphi_e: raise Exception('inconsistent dimension nsphi_e, regarding idxsphi_e, usphi_e.') dims.nsphi_e = nsphi_e # terminal ns_e = nsbx_e + nsh_e + nsg_e + nsphi_e wrong_field = "" if cost.Zl_e.shape[0] != ns_e: wrong_field = "Zl_e" dim = cost.Zl_e.shape[0] elif cost.Zu_e.shape[0] != ns_e: wrong_field = "Zu_e" dim = cost.Zu_e.shape[0] elif cost.zl_e.shape[0] != ns_e: wrong_field = "zl_e" dim = cost.zl_e.shape[0] elif cost.zu_e.shape[0] != ns_e: wrong_field = "zu_e" dim = cost.zu_e.shape[0] if wrong_field != "": raise Exception(f'Inconsistent size for field {wrong_field}, with dimension {dim}, \n\t'\ + f'Detected ns_e = {ns_e} = nsbx_e + nsg_e + nsh_e + nsphi_e.\n\t'\ + f'With nsbx_e = {nsbx_e}, nsg_e = {nsg_e}, nsh_e = {nsh_e}, nsphi_e = {nsphi_e}') dims.ns_e = ns_e # discretization if is_empty(opts.time_steps) and is_empty(opts.shooting_nodes): # uniform discretization opts.time_steps = opts.tf / dims.N * np.ones((dims.N,)) elif not is_empty(opts.shooting_nodes): if np.shape(opts.shooting_nodes)[0] != dims.N+1: raise Exception('inconsistent dimension N, regarding shooting_nodes.') time_steps = np.zeros((dims.N,)) for i in range(dims.N): time_steps[i] = opts.shooting_nodes[i+1] - opts.shooting_nodes[i] opts.time_steps = time_steps elif (not is_empty(opts.time_steps)) and (not is_empty(opts.shooting_nodes)): Exception('Please provide either time_steps or shooting_nodes for nonuniform discretization') tf = np.sum(opts.time_steps) if (tf - opts.tf) / tf > 1e-15: raise Exception(f'Inconsistent discretization: {opts.tf}'\ f' = tf != sum(opts.time_steps) = {tf}.') def get_ocp_nlp_layout(): current_module = sys.modules[__name__] acados_path = os.path.dirname(current_module.__file__) with open(acados_path + '/acados_layout.json', 'r') as f: ocp_nlp_layout = json.load(f) return ocp_nlp_layout def ocp_formulation_json_dump(acados_ocp, json_file='acados_ocp_nlp.json'): # Load acados_ocp_nlp structure description ocp_layout = get_ocp_nlp_layout() # Copy input ocp object dictionary ocp_nlp_dict = dict(deepcopy(acados_ocp).__dict__) # TODO: maybe make one funciton with formatting for acados_struct, v in ocp_layout.items(): # skip non dict attributes if not isinstance(v, dict): continue # setattr(ocp_nlp, acados_struct, dict(getattr(acados_ocp, acados_struct).__dict__)) # Copy ocp object attributes dictionaries ocp_nlp_dict[acados_struct]=dict(getattr(acados_ocp, acados_struct).__dict__) ocp_nlp_dict = format_class_dict(ocp_nlp_dict) # strip symbolics ocp_nlp_dict['model'] = acados_model_strip_casadi_symbolics(ocp_nlp_dict['model']) # strip shooting_nodes ocp_nlp_dict['solver_options'].pop('shooting_nodes', None) dims_dict = acados_class2dict(acados_ocp.dims) ocp_check_against_layout(ocp_nlp_dict, dims_dict) with open(json_file, 'w') as f: json.dump(ocp_nlp_dict, f, default=np_array_to_list, indent=4, sort_keys=True) def ocp_formulation_json_load(json_file='acados_ocp_nlp.json'): # Load acados_ocp_nlp structure description ocp_layout = get_ocp_nlp_layout() with open(json_file, 'r') as f: ocp_nlp_json = json.load(f) ocp_nlp_dict = json2dict(ocp_nlp_json, ocp_nlp_json['dims']) # Instantiate AcadosOcp object acados_ocp = AcadosOcp() # load class dict acados_ocp.__dict__ = ocp_nlp_dict # laod class attributes dict, dims, constraints, etc for acados_struct, v in ocp_layout.items(): # skip non dict attributes if not isinstance(v, dict): continue acados_attribute = getattr(acados_ocp, acados_struct) acados_attribute.__dict__ = ocp_nlp_dict[acados_struct] setattr(acados_ocp, acados_struct, acados_attribute) return acados_ocp def ocp_generate_external_functions(acados_ocp, model): model = make_model_consistent(model) if acados_ocp.solver_options.integrator_type == 'ERK': # explicit model -- generate C code generate_c_code_explicit_ode(model) elif acados_ocp.solver_options.integrator_type == 'IRK': # implicit model -- generate C code opts = dict(generate_hess=1) generate_c_code_implicit_ode(model, opts) elif acados_ocp.solver_options.integrator_type == 'GNSF': generate_c_code_gnsf(model) else: raise Exception("ocp_generate_external_functions: unknown integrator type.") if acados_ocp.solver_options.hessian_approx == 'EXACT': opts = dict(generate_hess=1) else: opts = dict(generate_hess=0) if acados_ocp.dims.nphi > 0 or acados_ocp.dims.nh > 0: generate_c_code_constraint(model, model.name, False, opts) if acados_ocp.dims.nphi_e > 0 or acados_ocp.dims.nh_e > 0: generate_c_code_constraint(model, model.name, True, opts) # dummy matrices if not acados_ocp.cost.cost_type == 'LINEAR_LS': acados_ocp.cost.Vx = np.zeros((acados_ocp.dims.ny, acados_ocp.dims.nx)) acados_ocp.cost.Vu = np.zeros((acados_ocp.dims.ny, acados_ocp.dims.nu)) if not acados_ocp.cost.cost_type_e == 'LINEAR_LS': acados_ocp.cost.Vx_e = np.zeros((acados_ocp.dims.ny_e, acados_ocp.dims.nx)) if acados_ocp.cost.cost_type == 'NONLINEAR_LS': generate_c_code_nls_cost(model, model.name, False) elif acados_ocp.cost.cost_type == 'EXTERNAL': generate_c_code_external_cost(model, False) if acados_ocp.cost.cost_type_e == 'NONLINEAR_LS': generate_c_code_nls_cost(model, model.name, True) elif acados_ocp.cost.cost_type_e == 'EXTERNAL': generate_c_code_external_cost(model, True) def ocp_render_templates(acados_ocp, json_file): name = acados_ocp.model.name # setting up loader and environment json_path = '{cwd}/{json_file}'.format( cwd=os.getcwd(), json_file=json_file) if not os.path.exists(json_path): raise Exception('{} not found!'.format(json_path)) template_dir = 'c_generated_code/' ## Render templates in_file = 'main.in.c' out_file = 'main_{}.c'.format(name) render_template(in_file, out_file, template_dir, json_path) in_file = 'acados_solver.in.c' out_file = 'acados_solver_{}.c'.format(name) render_template(in_file, out_file, template_dir, json_path) in_file = 'acados_solver.in.h' out_file = 'acados_solver_{}.h'.format(name) render_template(in_file, out_file, template_dir, json_path) in_file = 'Makefile.in' out_file = 'Makefile' render_template(in_file, out_file, template_dir, json_path) in_file = 'acados_solver_sfun.in.c' out_file = 'acados_solver_sfunction_{}.c'.format(name) render_template(in_file, out_file, template_dir, json_path) in_file = 'make_sfun.in.m' out_file = 'make_sfun.m' render_template(in_file, out_file, template_dir, json_path) in_file = 'acados_sim_solver.in.c' out_file = 'acados_sim_solver_{}.c'.format(name) render_template(in_file, out_file, template_dir, json_path) in_file = 'acados_sim_solver.in.h' out_file = 'acados_sim_solver_{}.h'.format(name) render_template(in_file, out_file, template_dir, json_path) ## folder model template_dir = 'c_generated_code/{}_model/'.format(name) in_file = 'model.in.h' out_file = '{}_model.h'.format(name) render_template(in_file, out_file, template_dir, json_path) # constraints on convex over nonlinear function if acados_ocp.constraints.constr_type == 'BGP' and acados_ocp.dims.nphi > 0: # constraints on outer function template_dir = 'c_generated_code/{}_constraints/'.format(name) in_file = 'phi_constraint.in.h' out_file = '{}_phi_constraint.h'.format(name) render_template(in_file, out_file, template_dir, json_path) # terminal constraints on convex over nonlinear function if acados_ocp.constraints.constr_type_e == 'BGP' and acados_ocp.dims.nphi_e > 0: # terminal constraints on outer function template_dir = 'c_generated_code/{}_constraints/'.format(name) in_file = 'phi_e_constraint.in.h' out_file = '{}_phi_e_constraint.h'.format(name) render_template(in_file, out_file, template_dir, json_path) # nonlinear constraints if acados_ocp.constraints.constr_type == 'BGH' and acados_ocp.dims.nh > 0: template_dir = 'c_generated_code/{}_constraints/'.format(name) in_file = 'h_constraint.in.h' out_file = '{}_h_constraint.h'.format(name) render_template(in_file, out_file, template_dir, json_path) # terminal nonlinear constraints if acados_ocp.constraints.constr_type_e == 'BGH' and acados_ocp.dims.nh_e > 0: template_dir = 'c_generated_code/{}_constraints/'.format(name) in_file = 'h_e_constraint.in.h' out_file = '{}_h_e_constraint.h'.format(name) render_template(in_file, out_file, template_dir, json_path) # nonlinear cost function if acados_ocp.cost.cost_type == 'NONLINEAR_LS': template_dir = 'c_generated_code/{}_cost/'.format(name) in_file = 'cost_y_fun.in.h' out_file = '{}_cost_y_fun.h'.format(name) render_template(in_file, out_file, template_dir, json_path) # terminal nonlinear cost function if acados_ocp.cost.cost_type_e == 'NONLINEAR_LS': template_dir = 'c_generated_code/{}_cost/'.format(name) in_file = 'cost_y_e_fun.in.h' out_file = '{}_cost_y_e_fun.h'.format(name) render_template(in_file, out_file, template_dir, json_path) # external cost if acados_ocp.cost.cost_type == 'EXTERNAL': template_dir = 'c_generated_code/{}_cost/'.format(name) in_file = 'external_cost.in.h' out_file = '{}_external_cost.h'.format(name) render_template(in_file, out_file, template_dir, json_path) # external cost - terminal if acados_ocp.cost.cost_type_e == 'EXTERNAL': template_dir = 'c_generated_code/{}_cost/'.format(name) in_file = 'external_cost_e.in.h' out_file = '{}_external_cost_e.h'.format(name) render_template(in_file, out_file, template_dir, json_path) class AcadosOcpSolver: """ class to interact with the acados ocp solver C object """ def __init__(self, acados_ocp, json_file='acados_ocp_nlp.json'): self.solver_created = False model = acados_ocp.model # make dims consistent make_ocp_dims_consistent(acados_ocp) if acados_ocp.solver_options.integrator_type == 'GNSF': set_up_imported_gnsf_model(acados_ocp) # set integrator time automatically acados_ocp.solver_options.Tsim = acados_ocp.solver_options.time_steps[0] # generate external functions ocp_generate_external_functions(acados_ocp, model) # dump to json ocp_formulation_json_dump(acados_ocp, json_file) # render templates ocp_render_templates(acados_ocp, json_file) ## Compile solver os.chdir('c_generated_code') os.system('make clean_ocp_shared_lib') os.system('make ocp_shared_lib') os.chdir('..') self.shared_lib_name = 'c_generated_code/libacados_ocp_solver_' + model.name + '.so' # get self.shared_lib = CDLL(self.shared_lib_name) self.shared_lib.acados_create() self.solver_created = True self.shared_lib.acados_get_nlp_opts.restype = c_void_p self.nlp_opts = self.shared_lib.acados_get_nlp_opts() self.shared_lib.acados_get_nlp_dims.restype = c_void_p self.nlp_dims = self.shared_lib.acados_get_nlp_dims() self.shared_lib.acados_get_nlp_config.restype = c_void_p self.nlp_config = self.shared_lib.acados_get_nlp_config() self.shared_lib.acados_get_nlp_out.restype = c_void_p self.nlp_out = self.shared_lib.acados_get_nlp_out() self.shared_lib.acados_get_nlp_in.restype = c_void_p self.nlp_in = self.shared_lib.acados_get_nlp_in() self.shared_lib.acados_get_nlp_solver.restype = c_void_p self.nlp_solver = self.shared_lib.acados_get_nlp_solver() self.acados_ocp = acados_ocp def solve(self): """ solve the ocp with current input """ status = self.shared_lib.acados_solve() return status def get(self, stage_, field_): """ get the last solution of the solver: :param stage: integer corresponding to shooting node :param field_: string in ['x', 'u', 'z', 'pi', 'lam', 't', 'sl', 'su',] .. note:: regarding lam, t: \n the inequalities are internally organized in the following order: \n [ lbu lbx lg lh lphi ubu ubx ug uh uphi; \n lsbu lsbx lsg lsh lsphi usbu usbx usg ush usphi] .. note:: pi: multipliers for dynamics equality constraints \n lam: multipliers for inequalities \n t: slack variables corresponding to evaluation of all inequalities (at the solution) \n sl: slack variables of soft lower inequality constraints \n su: slack variables of soft upper inequality constraints \n """ out_fields = ['x', 'u', 'z', 'pi', 'lam', 't'] mem_fields = ['sl', 'su'] field = field_ field = field.encode('utf-8') if (field_ not in out_fields + mem_fields): raise Exception('AcadosOcpSolver.get(): {} is an invalid argument.\ \n Possible values are {}. Exiting.'.format(field_, out_fields + mem_fields)) self.shared_lib.ocp_nlp_dims_get_from_attr.argtypes = \ [c_void_p, c_void_p, c_void_p, c_int, c_char_p] self.shared_lib.ocp_nlp_dims_get_from_attr.restype = c_int dims = self.shared_lib.ocp_nlp_dims_get_from_attr(self.nlp_config, \ self.nlp_dims, self.nlp_out, stage_, field) out = np.ascontiguousarray(np.zeros((dims,)), dtype=np.float64) out_data = cast(out.ctypes.data, POINTER(c_double)) if (field_ in out_fields): self.shared_lib.ocp_nlp_out_get.argtypes = \ [c_void_p, c_void_p, c_void_p, c_int, c_char_p, c_void_p] self.shared_lib.ocp_nlp_out_get(self.nlp_config, \ self.nlp_dims, self.nlp_out, stage_, field, out_data) elif field_ in mem_fields: self.shared_lib.ocp_nlp_get_at_stage.argtypes = \ [c_void_p, c_void_p, c_void_p, c_int, c_char_p, c_void_p] self.shared_lib.ocp_nlp_get_at_stage(self.nlp_config, \ self.nlp_dims, self.nlp_solver, stage_, field, out_data) return out def print_statistics(self): stat = self.get_stats("statistics") if self.acados_ocp.solver_options.nlp_solver_type == 'SQP': print('\niter\tres_stat\tres_eq\t\tres_ineq\tres_comp\tqp_stat\tqp_iter') if stat.shape[0]>7: print('\tqp_res_stat\tqp_res_eq\tqp_res_ineq\tqp_res_comp') for jj in range(stat.shape[1]): print('{:d}\t{:e}\t{:e}\t{:e}\t{:e}\t{:d}\t{:d}'.format( \ int(stat[0][jj]), stat[1][jj], stat[2][jj], \ stat[3][jj], stat[4][jj], int(stat[5][jj]), int(stat[6][jj]))) if stat.shape[0]>7: print('\t{:e}\t{:e}\t{:e}\t{:e}'.format( \ stat[7][jj], stat[8][jj], stat[9][jj], stat[10][jj])) print('\n') elif self.acados_ocp.solver_options.nlp_solver_type == 'SQP_RTI': print('\niter\tqp_stat\tqp_iter') if stat.shape[0]>3: print('\tqp_res_stat\tqp_res_eq\tqp_res_ineq\tqp_res_comp') for jj in range(stat.shape[1]): print('{:d}\t{:d}\t{:d}'.format( int(stat[0][jj]), int(stat[1][jj]), int(stat[2][jj]))) if stat.shape[0]>3: print('\t{:e}\t{:e}\t{:e}\t{:e}'.format( \ stat[3][jj], stat[4][jj], stat[5][jj], stat[6][jj])) print('\n') return def get_stats(self, field_): """ get the information of the last solver call: :param field_: string in ['statistics', 'time_tot', 'time_lin', 'time_sim', 'time_sim_ad', 'time_sim_la', 'time_qp', 'time_qp_solver_call', 'time_reg', 'sqp_iter'] """ fields = ['time_tot', # total cpu time previous call 'time_lin', # cpu time for linearization 'time_sim', # cpu time for integrator 'time_sim_ad', # cpu time for integrator contribution of external function calls 'time_sim_la', # cpu time for integrator contribution of linear algebra 'time_qp', # cpu time qp solution 'time_qp_solver_call', # cpu time inside qp solver (without converting the QP) 'time_qp_xcond', 'time_reg', # cpu time regularization 'sqp_iter', # number of SQP iterations 'statistics', # table with info about last iteration 'stat_m', 'stat_n', ] field = field_ field = field.encode('utf-8') if (field_ not in fields): raise Exception('AcadosOcpSolver.get_stats(): {} is not a valid argument.\ \n Possible values are {}. Exiting.'.format(fields, fields)) if field_ in ['sqp_iter', 'stat_m', 'stat_n']: out = np.ascontiguousarray(np.zeros((1,)), dtype=np.int64) out_data = cast(out.ctypes.data, POINTER(c_int64)) elif field_ == 'statistics': sqp_iter = self.get_stats("sqp_iter") stat_m = self.get_stats("stat_m") stat_n = self.get_stats("stat_n") min_size = min([stat_m, sqp_iter+1]) out = np.ascontiguousarray( np.zeros( (stat_n[0]+1, min_size[0]) ), dtype=np.float64) out_data = cast(out.ctypes.data, POINTER(c_double)) else: out = np.ascontiguousarray(np.zeros((1,)), dtype=np.float64) out_data = cast(out.ctypes.data, POINTER(c_double)) self.shared_lib.ocp_nlp_get.argtypes = [c_void_p, c_void_p, c_char_p, c_void_p] self.shared_lib.ocp_nlp_get(self.nlp_config, self.nlp_solver, field, out_data) return out # Note: this function should not be used anymore, better use cost_set, constraints_set def set(self, stage_, field_, value_): cost_fields = ['y_ref', 'yref'] constraints_fields = ['lbx', 'ubx', 'lbu', 'ubu'] out_fields = ['x', 'u', 'pi', 'lam', 't'] # cast value_ to avoid conversion issues value_ = value_.astype(float) field = field_ field = field.encode('utf-8') stage = c_int(stage_) # treat parameters separately if field_ is 'p': self.shared_lib.acados_update_params.argtypes = [c_int, POINTER(c_double)] self.shared_lib.acados_update_params.restype = c_int value_data = cast(value_.ctypes.data, POINTER(c_double)) self.shared_lib.acados_update_params(stage, value_data, value_.shape[0]) else: if field_ not in constraints_fields + cost_fields + out_fields: raise Exception("AcadosOcpSolver.set(): {} is not a valid argument.\ \nPossible values are {}. Exiting.".format(field, \ constraints_fields + cost_fields + out_fields + ['p'])) self.shared_lib.ocp_nlp_dims_get_from_attr.argtypes = \ [c_void_p, c_void_p, c_void_p, c_int, c_char_p] self.shared_lib.ocp_nlp_dims_get_from_attr.restype = c_int dims = self.shared_lib.ocp_nlp_dims_get_from_attr(self.nlp_config, \ self.nlp_dims, self.nlp_out, stage_, field) if value_.shape[0] != dims: msg = 'AcadosOcpSolver.set(): mismatching dimension for field "{}" '.format(field_) msg += 'with dimension {} (you have {})'.format(dims, value_.shape[0]) raise Exception(msg) value_data = cast(value_.ctypes.data, POINTER(c_double)) value_data_p = cast((value_data), c_void_p) if field_ in constraints_fields: self.shared_lib.ocp_nlp_constraints_model_set.argtypes = \ [c_void_p, c_void_p, c_void_p, c_int, c_char_p, c_void_p] self.shared_lib.ocp_nlp_constraints_model_set(self.nlp_config, \ self.nlp_dims, self.nlp_in, stage, field, value_data_p) elif field_ in cost_fields: self.shared_lib.ocp_nlp_cost_model_set.argtypes = \ [c_void_p, c_void_p, c_void_p, c_int, c_char_p, c_void_p] self.shared_lib.ocp_nlp_cost_model_set(self.nlp_config, \ self.nlp_dims, self.nlp_in, stage, field, value_data_p) elif field_ in out_fields: self.shared_lib.ocp_nlp_out_set.argtypes = \ [c_void_p, c_void_p, c_void_p, c_int, c_char_p, c_void_p] self.shared_lib.ocp_nlp_out_set(self.nlp_config, \ self.nlp_dims, self.nlp_out, stage, field, value_data_p) return def cost_set(self, stage_, field_, value_): """ set numerical data in the cost module of the solver: :param stage_: integer corresponding to shooting node :param field_: string, e.g. 'yref', 'W', 'ext_cost_num_hess' :param value_: of appropriate size """ # cast value_ to avoid conversion issues value_ = value_.astype(float) field = field_ field = field.encode('utf-8') stage = c_int(stage_) self.shared_lib.ocp_nlp_cost_dims_get_from_attr.argtypes = \ [c_void_p, c_void_p, c_void_p, c_int, c_char_p, POINTER(c_int)] self.shared_lib.ocp_nlp_cost_dims_get_from_attr.restype = c_int dims = np.ascontiguousarray(np.zeros((2,)), dtype=np.intc) dims_data = cast(dims.ctypes.data, POINTER(c_int)) self.shared_lib.ocp_nlp_cost_dims_get_from_attr(self.nlp_config, \ self.nlp_dims, self.nlp_out, stage_, field, dims_data) value_shape = value_.shape if len(value_shape) == 1: value_shape = (value_shape[0], 0) if value_shape != tuple(dims): raise Exception('AcadosOcpSolver.cost_set(): mismatching dimension', \ ' for field "{}" with dimension {} (you have {})'.format( \ field_, tuple(dims), value_shape)) value_data = cast(value_.ctypes.data, POINTER(c_double)) value_data_p = cast((value_data), c_void_p) self.shared_lib.ocp_nlp_cost_model_set.argtypes = \ [c_void_p, c_void_p, c_void_p, c_int, c_char_p, c_void_p] self.shared_lib.ocp_nlp_cost_model_set(self.nlp_config, \ self.nlp_dims, self.nlp_in, stage, field, value_data_p) return def constraints_set(self, stage_, field_, value_): """ set numerical data in the constraint module of the solver: Parameters: :param stage_: integer corresponding to shooting node :param field_: string, e.g. 'lbx' :param value_: of appropriate size """ # cast value_ to avoid conversion issues value_ = value_.astype(float) field = field_ field = field.encode('utf-8') stage = c_int(stage_) self.shared_lib.ocp_nlp_constraint_dims_get_from_attr.argtypes = \ [c_void_p, c_void_p, c_void_p, c_int, c_char_p, POINTER(c_int)] self.shared_lib.ocp_nlp_constraint_dims_get_from_attr.restype = c_int dims = np.ascontiguousarray(np.zeros((2,)), dtype=np.intc) dims_data = cast(dims.ctypes.data, POINTER(c_int)) self.shared_lib.ocp_nlp_constraint_dims_get_from_attr(self.nlp_config, \ self.nlp_dims, self.nlp_out, stage_, field, dims_data) value_shape = value_.shape if len(value_shape) == 1: value_shape = (value_shape[0], 0) if value_shape != tuple(dims): raise Exception('AcadosOcpSolver.constraints_set(): mismatching dimension' \ ' for field "{}" with dimension {} (you have {})'.format(field_, tuple(dims), value_shape)) value_data = cast(value_.ctypes.data, POINTER(c_double)) value_data_p = cast((value_data), c_void_p) self.shared_lib.ocp_nlp_constraints_model_set.argtypes = \ [c_void_p, c_void_p, c_void_p, c_int, c_char_p, c_void_p] self.shared_lib.ocp_nlp_constraints_model_set(self.nlp_config, \ self.nlp_dims, self.nlp_in, stage, field, value_data_p) return def options_set(self, field_, value_): """ set options of the solver: Parameters: :param field_: string, e.g. 'print_level', 'rti_phase', 'initialize_t_slacks', 'step_length' :param value_: of type int, float """ int_fields = ['print_level', 'rti_phase', 'initialize_t_slacks'] double_fields = ['step_length'] string_fields = ['globalization'] if field_ in int_fields: if not isinstance(value_, int): raise Exception('solver option {} must be of type int. You have {}.'.format(field_, type(value_))) else: value_ctypes = c_int(value_) elif field_ in double_fields: if not isinstance(value_, float): raise Exception('solver option {} must be of type float. You have {}.'.format(field_, type(value_))) else: value_ctypes = c_double(value_) elif field_ in string_fields: if not isinstance(value_, str): raise Exception('solver option {} must be of type str. You have {}.'.format(field_, type(value_))) else: value_ctypes = value_.encode('utf-8') if field_ == 'rti_phase': if value_ < 0 or value_ > 2: raise Exception('AcadosOcpSolver.solve(): argument \'rti_phase\' can ' 'take only values 0, 1, 2 for SQP-RTI-type solvers') if self.acados_ocp.solver_options.nlp_solver_type != 'SQP_RTI' and value_ > 0: raise Exception('AcadosOcpSolver.solve(): argument \'rti_phase\' can ' 'take only value 0 for SQP-type solvers') field = field_ field = field.encode('utf-8') if field_ in string_fields: self.shared_lib.ocp_nlp_solver_opts_set.argtypes = \ [c_void_p, c_void_p, c_char_p, c_char_p] self.shared_lib.ocp_nlp_solver_opts_set(self.nlp_config, \ self.nlp_opts, field, value_ctypes) else: self.shared_lib.ocp_nlp_solver_opts_set.argtypes = \ [c_void_p, c_void_p, c_char_p, c_void_p] self.shared_lib.ocp_nlp_solver_opts_set(self.nlp_config, \ self.nlp_opts, field, byref(value_ctypes)) return def __del__(self): if self.solver_created: self.shared_lib.acados_free() del self.shared_lib # NOTE: DLL cannot be easily unloaded!!! # see https://stackoverflow.com/questions/359498/how-can-i-unload-a-dll-using-ctypes-in-python # while isLoaded(self.shared_lib_name): # dlclose(handle)
[((393, 9, 393, 32), 'numpy.sum', 'np.sum', ({(393, 16, 393, 31): 'opts.time_steps'}, {}), '(opts.time_steps)', True, 'import numpy as np\n'), ((402, 18, 402, 58), 'os.path.dirname', 'os.path.dirname', ({(402, 34, 402, 57): 'current_module.__file__'}, {}), '(current_module.__file__)', False, 'import sys, os, json\n'), ((234, 27, 234, 44), 'numpy.zeros', 'np.zeros', ({(234, 36, 234, 43): '(nsbx,)'}, {}), '((nsbx,))', True, 'import numpy as np\n'), ((238, 27, 238, 44), 'numpy.zeros', 'np.zeros', ({(238, 36, 238, 43): '(nsbx,)'}, {}), '((nsbx,))', True, 'import numpy as np\n'), ((245, 27, 245, 44), 'numpy.zeros', 'np.zeros', ({(245, 36, 245, 43): '(nsbu,)'}, {}), '((nsbu,))', True, 'import numpy as np\n'), ((249, 27, 249, 44), 'numpy.zeros', 'np.zeros', ({(249, 36, 249, 43): '(nsbu,)'}, {}), '((nsbu,))', True, 'import numpy as np\n'), ((256, 26, 256, 42), 'numpy.zeros', 'np.zeros', ({(256, 35, 256, 41): '(nsh,)'}, {}), '((nsh,))', True, 'import numpy as np\n'), ((260, 26, 260, 42), 'numpy.zeros', 'np.zeros', ({(260, 35, 260, 41): '(nsh,)'}, {}), '((nsh,))', True, 'import numpy as np\n'), ((267, 28, 267, 46), 'numpy.zeros', 'np.zeros', ({(267, 37, 267, 45): '(nsphi,)'}, {}), '((nsphi,))', True, 'import numpy as np\n'), ((271, 28, 271, 46), 'numpy.zeros', 'np.zeros', ({(271, 37, 271, 45): '(nsphi,)'}, {}), '((nsphi,))', True, 'import numpy as np\n'), ((278, 26, 278, 42), 'numpy.zeros', 'np.zeros', ({(278, 35, 278, 41): '(nsg,)'}, {}), '((nsg,))', True, 'import numpy as np\n'), ((282, 26, 282, 42), 'numpy.zeros', 'np.zeros', ({(282, 35, 282, 41): '(nsg,)'}, {}), '((nsg,))', True, 'import numpy as np\n'), ((311, 29, 311, 48), 'numpy.zeros', 'np.zeros', ({(311, 38, 311, 47): '(nsbx_e,)'}, {}), '((nsbx_e,))', True, 'import numpy as np\n'), ((315, 29, 315, 48), 'numpy.zeros', 'np.zeros', ({(315, 38, 315, 47): '(nsbx_e,)'}, {}), '((nsbx_e,))', True, 'import numpy as np\n'), ((322, 28, 322, 46), 'numpy.zeros', 'np.zeros', ({(322, 37, 322, 45): '(nsh_e,)'}, {}), '((nsh_e,))', True, 'import numpy as np\n'), ((326, 28, 326, 46), 'numpy.zeros', 'np.zeros', ({(326, 37, 326, 45): '(nsh_e,)'}, {}), '((nsh_e,))', True, 'import numpy as np\n'), ((333, 28, 333, 46), 'numpy.zeros', 'np.zeros', ({(333, 37, 333, 45): '(nsg_e,)'}, {}), '((nsg_e,))', True, 'import numpy as np\n'), ((337, 28, 337, 46), 'numpy.zeros', 'np.zeros', ({(337, 37, 337, 45): '(nsg_e,)'}, {}), '((nsg_e,))', True, 'import numpy as np\n'), ((344, 30, 344, 50), 'numpy.zeros', 'np.zeros', ({(344, 39, 344, 49): '(nsphi_e,)'}, {}), '((nsphi_e,))', True, 'import numpy as np\n'), ((348, 30, 348, 50), 'numpy.zeros', 'np.zeros', ({(348, 39, 348, 49): '(nsphi_e,)'}, {}), '((nsphi_e,))', True, 'import numpy as np\n'), ((404, 25, 404, 37), 'json.load', 'json.load', ({(404, 35, 404, 36): 'f'}, {}), '(f)', False, 'import sys, os, json\n'), ((436, 8, 436, 86), 'json.dump', 'json.dump', (), '', False, 'import sys, os, json\n'), ((445, 23, 445, 35), 'json.load', 'json.load', ({(445, 33, 445, 34): 'f'}, {}), '(f)', False, 'import sys, os, json\n'), ((495, 29, 495, 79), 'numpy.zeros', 'np.zeros', ({(495, 38, 495, 78): '(acados_ocp.dims.ny, acados_ocp.dims.nx)'}, {}), '((acados_ocp.dims.ny, acados_ocp.dims.nx))', True, 'import numpy as np\n'), ((496, 29, 496, 79), 'numpy.zeros', 'np.zeros', ({(496, 38, 496, 78): '(acados_ocp.dims.ny, acados_ocp.dims.nu)'}, {}), '((acados_ocp.dims.ny, acados_ocp.dims.nu))', True, 'import numpy as np\n'), ((498, 31, 498, 83), 'numpy.zeros', 'np.zeros', ({(498, 40, 498, 82): '(acados_ocp.dims.ny_e, acados_ocp.dims.nx)'}, {}), '((acados_ocp.dims.ny_e, acados_ocp.dims.nx))', True, 'import numpy as np\n'), ((521, 11, 521, 36), 'os.path.exists', 'os.path.exists', ({(521, 26, 521, 35): 'json_path'}, {}), '(json_path)', False, 'import sys, os, json\n'), ((655, 8, 655, 36), 'os.chdir', 'os.chdir', ({(655, 17, 655, 35): '"""c_generated_code"""'}, {}), "('c_generated_code')", False, 'import sys, os, json\n'), ((656, 8, 656, 46), 'os.system', 'os.system', ({(656, 18, 656, 45): '"""make clean_ocp_shared_lib"""'}, {}), "('make clean_ocp_shared_lib')", False, 'import sys, os, json\n'), ((657, 8, 657, 40), 'os.system', 'os.system', ({(657, 18, 657, 39): '"""make ocp_shared_lib"""'}, {}), "('make ocp_shared_lib')", False, 'import sys, os, json\n'), ((658, 8, 658, 22), 'os.chdir', 'os.chdir', ({(658, 17, 658, 21): '""".."""'}, {}), "('..')", False, 'import sys, os, json\n'), ((379, 45, 379, 63), 'numpy.ones', 'np.ones', ({(379, 53, 379, 62): '(dims.N,)'}, {}), '((dims.N,))', True, 'import numpy as np\n'), ((385, 21, 385, 40), 'numpy.zeros', 'np.zeros', ({(385, 30, 385, 39): '(dims.N,)'}, {}), '((dims.N,))', True, 'import numpy as np\n'), ((413, 24, 413, 44), 'copy.deepcopy', 'deepcopy', ({(413, 33, 413, 43): 'acados_ocp'}, {}), '(acados_ocp)', False, 'from copy import deepcopy\n'), ((518, 12, 518, 23), 'os.getcwd', 'os.getcwd', ({}, {}), '()', False, 'import sys, os, json\n'), ((730, 35, 730, 52), 'numpy.zeros', 'np.zeros', ({(730, 44, 730, 51): '(dims,)'}, {}), '((dims,))', True, 'import numpy as np\n'), ((906, 36, 906, 50), 'numpy.zeros', 'np.zeros', ({(906, 45, 906, 49): '(2,)'}, {}), '((2,))', True, 'import numpy as np\n'), ((951, 36, 951, 50), 'numpy.zeros', 'np.zeros', ({(951, 45, 951, 49): '(2,)'}, {}), '((2,))', True, 'import numpy as np\n'), ((804, 39, 804, 53), 'numpy.zeros', 'np.zeros', ({(804, 48, 804, 52): '(1,)'}, {}), '((1,))', True, 'import numpy as np\n'), ((382, 11, 382, 40), 'numpy.shape', 'np.shape', ({(382, 20, 382, 39): 'opts.shooting_nodes'}, {}), '(opts.shooting_nodes)', True, 'import numpy as np\n'), ((815, 24, 815, 62), 'numpy.zeros', 'np.zeros', ({(815, 34, 815, 60): '(stat_n[0] + 1, min_size[0])'}, {}), '((stat_n[0] + 1, min_size[0]))', True, 'import numpy as np\n'), ((819, 39, 819, 53), 'numpy.zeros', 'np.zeros', ({(819, 48, 819, 52): '(1,)'}, {}), '((1,))', True, 'import numpy as np\n')]
MaikWischow/Camera-Condition-Monitoring
noise/estimation/PCA/analyticNoiseEstimation_PCA.py
910f9192d6309a6803ab76c346269fa5029c38e6
import numpy as np import cv2 import sys import os import glob def im2patch(im, pch_size, stride=1): ''' Transform image to patches. Input: im: 3 x H x W or 1 X H x W image, numpy format pch_size: (int, int) tuple or integer stride: (int, int) tuple or integer ''' if isinstance(pch_size, tuple): pch_H, pch_W = pch_size elif isinstance(pch_size, int): pch_H = pch_W = pch_size else: sys.exit('The input of pch_size must be a integer or a int tuple!') if isinstance(stride, tuple): stride_H, stride_W = stride elif isinstance(stride, int): stride_H = stride_W = stride else: sys.exit('The input of stride must be a integer or a int tuple!') C, H, W = im.shape num_H = len(range(0, H-pch_H+1, stride_H)) num_W = len(range(0, W-pch_W+1, stride_W)) num_pch = num_H * num_W pch = np.zeros((C, pch_H*pch_W, num_pch), dtype=im.dtype) kk = 0 for ii in range(pch_H): for jj in range(pch_W): temp = im[:, ii:H-pch_H+ii+1:stride_H, jj:W-pch_W+jj+1:stride_W] pch[:, kk, :] = temp.reshape((C, num_pch)) kk += 1 return pch.reshape((C, pch_H, pch_W, num_pch)) def noise_estimate(im, pch_size=8): ''' Implement of noise level estimation of the following paper: Chen G , Zhu F , Heng P A . An Efficient Statistical Method for Image Noise Level Estimation[C]// 2015 IEEE International Conference on Computer Vision (ICCV). IEEE Computer Society, 2015. Input: im: the noise image, H x W x 3 or H x W numpy tensor, range [0,1] pch_size: patch_size Output: noise_level: the estimated noise level ''' if im.ndim == 3: im = im.transpose((2, 0, 1)) else: im = np.expand_dims(im, axis=0) # image to patch pch = im2patch(im, pch_size, 3) # C x pch_size x pch_size x num_pch tensor num_pch = pch.shape[3] pch = pch.reshape((-1, num_pch)) # d x num_pch matrix d = pch.shape[0] mu = pch.mean(axis=1, keepdims=True) # d x 1 X = pch - mu sigma_X = np.matmul(X, X.transpose()) / num_pch sig_value, _ = np.linalg.eigh(sigma_X) sig_value.sort() for ii in range(-1, -d-1, -1): tau = np.mean(sig_value[:ii]) if np.sum(sig_value[:ii]>tau) == np.sum(sig_value[:ii] < tau): return np.sqrt(tau) def run(imgPath, patchSize, internalNumPatches, dirOut, saveResults=True): """ Estimates the standard deviation of (additive white gaussian) noise of image patches. The noise is estimated patch by patch. Based on: "An Efficient Statistical Method for Image Noise Level Estimation" (2015) :param imgPath: Path to the input image. :param patchSize: Image patch size. :param internalNumPatches: Internal number of sub-image-patches. :param dirOut: Directory where to save the noise estimation results. :param saveResults: Whether to save the estimation results or not. :return: None """ # Load image img = np.array(cv2.imread(imgPath)) try: img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY) img = img / 255.0 h, w = img.shape psize = min(min(patchSize, h), w) psize -= psize % 2 patch_step = psize shift_factor = 2 # Result array estimatedNoiseMap = np.zeros([h, w], dtype=np.int8) rangex = range(0, w, patch_step) rangey = range(0, h, patch_step) for start_x in rangex: for start_y in rangey: end_x = start_x + psize end_y = start_y + psize if end_x > w: end_x = w end_x = shift_factor * ((end_x) // shift_factor) start_x = end_x - psize if end_y > h: end_y = h end_y = shift_factor * ((end_y) // shift_factor) start_y = end_y - psize tileM = img[start_y:end_y, start_x:end_x] h_, w_ = tileM.shape sigma = noise_estimate(tileM, internalNumPatches) * 255.0 estimatedNoiseMap[start_y :start_y + h_, start_x : start_x + w_] = sigma if saveResults: if dirOut is not None: imgName = imgPath.split(os.sep)[-1].split(".")[0] dirOut = os.path.join(dirOut) if not os.path.exists(dirOut): os.makedirs(dirOut) noiseMapPath = os.path.join(dirOut, imgName + ".npz") if not os.path.exists(noiseMapPath): np.savez_compressed(noiseMapPath, estimatedNoiseMap) return estimatedNoiseMap except: return None # Example # if __name__ == '__main__': # dirIn = r"../../../data/udacity/img/GT" # dirOut = r"../../../data/udacity/labels_noise_patchwise/PCA" # imgFileEnding = ".jpg" # for imgPath in glob.glob(os.path.join(dirIn, "*" + imgFileEnding)): # run(imgPath, 128, 8, dirOut)
[((34, 10, 34, 61), 'numpy.zeros', 'np.zeros', (), '', True, 'import numpy as np\n'), ((70, 19, 70, 42), 'numpy.linalg.eigh', 'np.linalg.eigh', ({(70, 34, 70, 41): 'sigma_X'}, {}), '(sigma_X)', True, 'import numpy as np\n'), ((59, 13, 59, 39), 'numpy.expand_dims', 'np.expand_dims', (), '', True, 'import numpy as np\n'), ((74, 14, 74, 37), 'numpy.mean', 'np.mean', ({(74, 22, 74, 36): 'sig_value[:ii]'}, {}), '(sig_value[:ii])', True, 'import numpy as np\n'), ((91, 19, 91, 38), 'cv2.imread', 'cv2.imread', ({(91, 30, 91, 37): 'imgPath'}, {}), '(imgPath)', False, 'import cv2\n'), ((93, 14, 93, 51), 'cv2.cvtColor', 'cv2.cvtColor', ({(93, 27, 93, 30): 'img', (93, 32, 93, 50): 'cv2.COLOR_RGB2GRAY'}, {}), '(img, cv2.COLOR_RGB2GRAY)', False, 'import cv2\n'), ((103, 28, 103, 59), 'numpy.zeros', 'np.zeros', (), '', True, 'import numpy as np\n'), ((20, 8, 20, 75), 'sys.exit', 'sys.exit', ({(20, 17, 20, 74): '"""The input of pch_size must be a integer or a int tuple!"""'}, {}), "('The input of pch_size must be a integer or a int tuple!')", False, 'import sys\n'), ((27, 8, 27, 73), 'sys.exit', 'sys.exit', ({(27, 17, 27, 72): '"""The input of stride must be a integer or a int tuple!"""'}, {}), "('The input of stride must be a integer or a int tuple!')", False, 'import sys\n'), ((75, 11, 75, 37), 'numpy.sum', 'np.sum', ({(75, 18, 75, 36): '(sig_value[:ii] > tau)'}, {}), '(sig_value[:ii] > tau)', True, 'import numpy as np\n'), ((75, 41, 75, 69), 'numpy.sum', 'np.sum', ({(75, 48, 75, 68): '(sig_value[:ii] < tau)'}, {}), '(sig_value[:ii] < tau)', True, 'import numpy as np\n'), ((76, 19, 76, 31), 'numpy.sqrt', 'np.sqrt', ({(76, 27, 76, 30): 'tau'}, {}), '(tau)', True, 'import numpy as np\n'), ((132, 27, 132, 65), 'os.path.join', 'os.path.join', ({(132, 40, 132, 46): 'dirOut', (132, 48, 132, 64): "imgName + '.npz'"}, {}), "(dirOut, imgName + '.npz')", False, 'import os\n'), ((128, 25, 128, 45), 'os.path.join', 'os.path.join', ({(128, 38, 128, 44): 'dirOut'}, {}), '(dirOut)', False, 'import os\n'), ((133, 19, 133, 47), 'os.path.exists', 'os.path.exists', ({(133, 34, 133, 46): 'noiseMapPath'}, {}), '(noiseMapPath)', False, 'import os\n'), ((134, 16, 134, 68), 'numpy.savez_compressed', 'np.savez_compressed', ({(134, 36, 134, 48): 'noiseMapPath', (134, 50, 134, 67): 'estimatedNoiseMap'}, {}), '(noiseMapPath, estimatedNoiseMap)', True, 'import numpy as np\n'), ((129, 23, 129, 45), 'os.path.exists', 'os.path.exists', ({(129, 38, 129, 44): 'dirOut'}, {}), '(dirOut)', False, 'import os\n'), ((130, 20, 130, 39), 'os.makedirs', 'os.makedirs', ({(130, 32, 130, 38): 'dirOut'}, {}), '(dirOut)', False, 'import os\n')]
claudejrogers/biotite
src/biotite/application/application.py
3635bc9071506ecb85ddd9b1dbe6a430295e060e
# This source code is part of the Biotite package and is distributed # under the 3-Clause BSD License. Please see 'LICENSE.rst' for further # information. __name__ = "biotite.application" __author__ = "Patrick Kunzmann" __all__ = ["Application", "AppStateError", "TimeoutError", "VersionError", "AppState", "requires_state"] import abc import time from functools import wraps from enum import Flag, auto class AppState(Flag): """ This enum type represents the app states of an application. """ CREATED = auto() RUNNING = auto() FINISHED = auto() JOINED = auto() CANCELLED = auto() def requires_state(app_state): """ A decorator for methods of :class:`Application` subclasses that raises an :class:`AppStateError` in case the method is called, when the :class:`Application` is not in the specified :class:`AppState` `app_state`. Parameters ---------- app_state : AppState The required app state. Examples -------- Raises :class:`AppStateError` when `function` is called, if :class:`Application` is not in one of the specified states: >>> @requires_state(AppState.RUNNING | AppState.FINISHED) ... def function(self): ... pass """ def decorator(func): @wraps(func) def wrapper(*args, **kwargs): # First parameter of method is always 'self' instance = args[0] if not instance._state & app_state: raise AppStateError( f"The application is in {instance.get_app_state()} state, " f"but {app_state} state is required" ) return func(*args, **kwargs) return wrapper return decorator class Application(metaclass=abc.ABCMeta): """ This class is a wrapper around an external piece of runnable software in any sense. Subclasses of this abstract base class specify the respective kind of software and the way of interacting with it. Every :class:`Application` runs through a different app states (instances of enum :class:`AppState`) from its creation until its termination: Directly after its instantiation the app is in the *CREATED* state. In this state further parameters can be set for the application run. After the user calls the :func:`start()` method, the app state is set to *RUNNING* and the :class:`Application` type specific :func:`run()` method is called. When the application finishes the AppState changes to *FINISHED*. This is checked via the :class:`Application` type specific :func:`is_finished()` method. The user can now call the :func:`join()` method, concluding the application in the *JOINED* state and making the results of the application accessible by executing the :class:`Application` type specific :func:`evaluate()` method. Furthermore this executes the :class:`Application` type specific :func:`clean_up()` method. :func:`join()` can even be called in the *RUNNING* state: This will constantly check :func:`is_finished()` and will directly go into the *JOINED* state as soon as the application reaches the *FINISHED* state. Calling the :func:`cancel()` method while the application is *RUNNING* or *FINISHED* leaves the application in the *CANCELLED* state. This triggers the :func:`clean_up()` method, too, but there are no accessible results. If a method is called in an unsuitable app state, an :class:`AppStateError` is called. The application run behaves like an additional thread: Between the call of :func:`start()` and :func:`join()` other Python code can be executed, while the application runs in the background. """ def __init__(self): self._state = AppState.CREATED @requires_state(AppState.CREATED) def start(self): """ Start the application run and set its state to *RUNNING*. This can only be done from the *CREATED* state. """ self.run() self._start_time = time.time() self._state = AppState.RUNNING @requires_state(AppState.RUNNING | AppState.FINISHED) def join(self, timeout=None): """ Conclude the application run and set its state to *JOINED*. This can only be done from the *RUNNING* or *FINISHED* state. If the application is *FINISHED* the joining process happens immediately, if otherwise the application is *RUNNING*, this method waits until the application is *FINISHED*. Parameters ---------- timeout : float, optional If this parameter is specified, the :class:`Application` only waits for finishing until this value (in seconds) runs out. After this time is exceeded a :class:`TimeoutError` is raised and the application is cancelled. Raises ------ TimeoutError If the joining process exceeds the `timeout` value. """ time.sleep(self.wait_interval()) while self.get_app_state() != AppState.FINISHED: if timeout is not None and time.time()-self._start_time > timeout: self.cancel() raise TimeoutError( f"The application expired its timeout " f"({timeout:.1f} s)" ) else: time.sleep(self.wait_interval()) time.sleep(self.wait_interval()) try: self.evaluate() except AppStateError: raise except: self._state = AppState.CANCELLED raise else: self._state = AppState.JOINED self.clean_up() @requires_state(AppState.RUNNING | AppState.FINISHED) def cancel(self): """ Cancel the application when in *RUNNING* or *FINISHED* state. """ self._state = AppState.CANCELLED self.clean_up() def get_app_state(self): """ Get the current app state. Returns ------- app_state : AppState The current app state. """ if self._state == AppState.RUNNING: if self.is_finished(): self._state = AppState.FINISHED return self._state @abc.abstractmethod def run(self): """ Commence the application run. Called in :func:`start()`. PROTECTED: Override when inheriting. """ pass @abc.abstractmethod def is_finished(self): """ Check if the application has finished. PROTECTED: Override when inheriting. Returns ------- finished : bool True of the application has finished, false otherwise """ pass @abc.abstractmethod def wait_interval(self): """ The time interval of :func:`is_finished()` calls in the joining process. PROTECTED: Override when inheriting. Returns ------- interval : float Time (in seconds) between calls of :func:`is_finished()` in :func:`join()` """ pass @abc.abstractmethod def evaluate(self): """ Evaluate application results. Called in :func:`join()`. PROTECTED: Override when inheriting. """ pass def clean_up(self): """ Do clean up work after the application terminates. PROTECTED: Optionally override when inheriting. """ pass class AppStateError(Exception): """ Indicate that the application lifecycle was violated. """ pass class TimeoutError(Exception): """ Indicate that the application's timeout expired. """ pass class VersionError(Exception): """ Indicate that the application's version is invalid. """ pass
[((20, 14, 20, 20), 'enum.auto', 'auto', ({}, {}), '()', False, 'from enum import Flag, auto\n'), ((21, 14, 21, 20), 'enum.auto', 'auto', ({}, {}), '()', False, 'from enum import Flag, auto\n'), ((22, 15, 22, 21), 'enum.auto', 'auto', ({}, {}), '()', False, 'from enum import Flag, auto\n'), ((23, 13, 23, 19), 'enum.auto', 'auto', ({}, {}), '()', False, 'from enum import Flag, auto\n'), ((24, 16, 24, 22), 'enum.auto', 'auto', ({}, {}), '()', False, 'from enum import Flag, auto\n'), ((49, 9, 49, 20), 'functools.wraps', 'wraps', ({(49, 15, 49, 19): 'func'}, {}), '(func)', False, 'from functools import wraps\n'), ((114, 27, 114, 38), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((143, 39, 143, 50), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n')]
douch/Paddle
python/paddle/tensor/attribute.py
81c40722869935d6e897f4b1aeb6e6f67606188a
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import print_function from ..framework import core from ..fluid.layer_helper import LayerHelper from ..fluid.data_feeder import check_variable_and_dtype # TODO: define functions to get tensor attributes from ..fluid.layers import rank # noqa: F401 from ..fluid.layers import shape # noqa: F401 import paddle from paddle import _C_ops from paddle.static import Variable from ..fluid.framework import _in_legacy_dygraph, in_dygraph_mode __all__ = [] def _complex_to_real_dtype(dtype): if dtype == core.VarDesc.VarType.COMPLEX64: return core.VarDesc.VarType.FP32 elif dtype == core.VarDesc.VarType.COMPLEX128: return core.VarDesc.VarType.FP64 else: return dtype def _real_to_complex_dtype(dtype): if dtype == core.VarDesc.VarType.FP32: return core.VarDesc.VarType.COMPLEX64 elif dtype == core.VarDesc.VarType.FP64: return core.VarDesc.VarType.COMPLEX128 else: return dtype def is_complex(x): """Return whether x is a tensor of complex data type(complex64 or complex128). Args: x (Tensor): The input tensor. Returns: bool: True if the data type of the input is complex data type, otherwise false. Examples: .. code-block:: python import paddle x = paddle.to_tensor([1 + 2j, 3 + 4j]) print(paddle.is_complex(x)) # True x = paddle.to_tensor([1.1, 1.2]) print(paddle.is_complex(x)) # False x = paddle.to_tensor([1, 2, 3]) print(paddle.is_complex(x)) # False """ if not isinstance(x, (paddle.Tensor, paddle.static.Variable)): raise TypeError("Expected Tensor, but received type of x: {}".format( type(x))) dtype = x.dtype is_complex_dtype = (dtype == core.VarDesc.VarType.COMPLEX64 or dtype == core.VarDesc.VarType.COMPLEX128) return is_complex_dtype def is_floating_point(x): """ Returns whether the dtype of `x` is one of paddle.float64, paddle.float32, paddle.float16, and paddle.bfloat16. Args: x (Tensor): The input tensor. Returns: bool: True if the dtype of `x` is floating type, otherwise false. Examples: .. code-block:: python import paddle x = paddle.arange(1., 5., dtype='float32') y = paddle.arange(1, 5, dtype='int32') print(paddle.is_floating_point(x)) # True print(paddle.is_floating_point(y)) # False """ if not isinstance(x, (paddle.Tensor, paddle.static.Variable)): raise TypeError("Expected Tensor, but received type of x: {}".format( type(x))) dtype = x.dtype is_fp_dtype = (dtype == core.VarDesc.VarType.FP32 or dtype == core.VarDesc.VarType.FP64 or dtype == core.VarDesc.VarType.FP16 or dtype == core.VarDesc.VarType.BF16) return is_fp_dtype def is_integer(x): """Return whether x is a tensor of integeral data type. Args: x (Tensor): The input tensor. Returns: bool: True if the data type of the input is integer data type, otherwise false. Examples: .. code-block:: python import paddle x = paddle.to_tensor([1 + 2j, 3 + 4j]) print(paddle.is_integer(x)) # False x = paddle.to_tensor([1.1, 1.2]) print(paddle.is_integer(x)) # False x = paddle.to_tensor([1, 2, 3]) print(paddle.is_integer(x)) # True """ if not isinstance(x, (paddle.Tensor, paddle.static.Variable)): raise TypeError("Expected Tensor, but received type of x: {}".format( type(x))) dtype = x.dtype is_int_dtype = (dtype == core.VarDesc.VarType.UINT8 or dtype == core.VarDesc.VarType.INT8 or dtype == core.VarDesc.VarType.INT16 or dtype == core.VarDesc.VarType.INT32 or dtype == core.VarDesc.VarType.INT64) return is_int_dtype def real(x, name=None): """ Returns a new tensor containing real values of the input tensor. Args: x (Tensor): the input tensor, its data type could be complex64 or complex128. name (str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` . Returns: Tensor: a tensor containing real values of the input tensor. Examples: .. code-block:: python import paddle x = paddle.to_tensor( [[1 + 6j, 2 + 5j, 3 + 4j], [4 + 3j, 5 + 2j, 6 + 1j]]) # Tensor(shape=[2, 3], dtype=complex64, place=CUDAPlace(0), stop_gradient=True, # [[(1+6j), (2+5j), (3+4j)], # [(4+3j), (5+2j), (6+1j)]]) real_res = paddle.real(x) # Tensor(shape=[2, 3], dtype=float32, place=CUDAPlace(0), stop_gradient=True, # [[1., 2., 3.], # [4., 5., 6.]]) real_t = x.real() # Tensor(shape=[2, 3], dtype=float32, place=CUDAPlace(0), stop_gradient=True, # [[1., 2., 3.], # [4., 5., 6.]]) """ if in_dygraph_mode(): return _C_ops.final_state_real(x) if _in_legacy_dygraph(): return _C_ops.real(x) check_variable_and_dtype(x, 'x', ['complex64', 'complex128'], 'real') helper = LayerHelper('real', **locals()) out = helper.create_variable_for_type_inference( dtype=_complex_to_real_dtype(helper.input_dtype())) helper.append_op(type='real', inputs={'X': x}, outputs={'Out': out}) return out def imag(x, name=None): """ Returns a new tensor containing imaginary values of input tensor. Args: x (Tensor): the input tensor, its data type could be complex64 or complex128. name (str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` . Returns: Tensor: a tensor containing imaginary values of the input tensor. Examples: .. code-block:: python import paddle x = paddle.to_tensor( [[1 + 6j, 2 + 5j, 3 + 4j], [4 + 3j, 5 + 2j, 6 + 1j]]) # Tensor(shape=[2, 3], dtype=complex64, place=CUDAPlace(0), stop_gradient=True, # [[(1+6j), (2+5j), (3+4j)], # [(4+3j), (5+2j), (6+1j)]]) imag_res = paddle.imag(x) # Tensor(shape=[2, 3], dtype=float32, place=CUDAPlace(0), stop_gradient=True, # [[6., 5., 4.], # [3., 2., 1.]]) imag_t = x.imag() # Tensor(shape=[2, 3], dtype=float32, place=CUDAPlace(0), stop_gradient=True, # [[6., 5., 4.], # [3., 2., 1.]]) """ if in_dygraph_mode(): return _C_ops.final_state_imag(x) if _in_legacy_dygraph(): return _C_ops.imag(x) check_variable_and_dtype(x, 'x', ['complex64', 'complex128'], 'imag') helper = LayerHelper('imag', **locals()) out = helper.create_variable_for_type_inference( dtype=_complex_to_real_dtype(helper.input_dtype())) helper.append_op(type='imag', inputs={'X': x}, outputs={'Out': out}) return out
[((190, 15, 190, 41), 'paddle._C_ops.final_state_real', '_C_ops.final_state_real', ({(190, 39, 190, 40): 'x'}, {}), '(x)', False, 'from paddle import _C_ops\n'), ((192, 15, 192, 29), 'paddle._C_ops.real', '_C_ops.real', ({(192, 27, 192, 28): 'x'}, {}), '(x)', False, 'from paddle import _C_ops\n'), ((236, 15, 236, 41), 'paddle._C_ops.final_state_imag', '_C_ops.final_state_imag', ({(236, 39, 236, 40): 'x'}, {}), '(x)', False, 'from paddle import _C_ops\n'), ((238, 15, 238, 29), 'paddle._C_ops.imag', '_C_ops.imag', ({(238, 27, 238, 28): 'x'}, {}), '(x)', False, 'from paddle import _C_ops\n')]
open-contracting/ocds-merge
ocdsmerge/exceptions.py
80c7cb380d191c75f88feefd34b607bc0de13ee1
class OCDSMergeError(Exception): """Base class for exceptions from within this package""" class MissingDateKeyError(OCDSMergeError, KeyError): """Raised when a release is missing a 'date' key""" def __init__(self, key, message): self.key = key self.message = message def __str__(self): return str(self.message) class NonObjectReleaseError(OCDSMergeError, TypeError): """Raised when a release is not an object""" class NullDateValueError(OCDSMergeError, TypeError): """Raised when a release has a null 'date' value""" class NonStringDateValueError(OCDSMergeError, TypeError): """Raised when a release has a non-string 'date' value""" class InconsistentTypeError(OCDSMergeError, TypeError): """Raised when a path is a literal and an object in different releases""" class OCDSMergeWarning(UserWarning): """Base class for warnings from within this package""" class DuplicateIdValueWarning(OCDSMergeWarning): """Used when at least two objects in the same array have the same value for the 'id' field""" def __init__(self, path, id, message): self.path = path self.id = id self.message = message def __str__(self): return str(self.message)
[]
guardhunt/TelemterRC
appcodec.py
679f99b317ecc6cbef6e022ae861cde18594f6a0
import evdev import time import struct class appcodec(): def __init__(self): self.device = evdev.InputDevice("/dev/input/event2") self.capabilities = self.device.capabilities(verbose=True) self.capaRAW = self.device.capabilities(absinfo=False) self.config = {} self.state = {} def build(self): """build state dictionary for controller""" #build config dictionary by code and name for key, value in self.capabilities.items(): for element in value: if type(element[0]) is tuple: self.config[element[0][1]] = element[0][0] elif type(element[0]) is list: self.config[element[1]] = element[0][0] elif ("SYN" in str(element[0])) or ("FF" in str(element[0])): pass else: self.config[element[1]] = element[0] #build state dictionary from raw codes for code in self.capaRAW[1]: self.state[self.config[code]] = 0 for code in self.capaRAW[3]: self.state[self.config[code]] = 0 print("waiting for event") for event in self.device.read_loop(): if event.type == evdev.ecodes.EV_KEY or event.type == evdev.ecodes.EV_ABS: return(self.update_state(event)) def update_state(self, event): self.state[self.config[event.code]] = event.value buttons1_state = 0 buttons1_state = buttons1_state | self.state["BTN_A"] buttons1_state = buttons1_state | self.state["BTN_B"] << 1 buttons1_state = buttons1_state | self.state["BTN_NORTH"] << 2 buttons1_state = buttons1_state | self.state["BTN_WEST"] << 3 buttons2_state = 0 buttons2_state = buttons2_state | self.state["BTN_START"] buttons2_state = buttons2_state | self.state["BTN_MODE"] << 1 buttons2_state = buttons2_state | self.state["BTN_SELECT"] << 2 buttons2_state = buttons2_state | self.state["BTN_TR"] << 3 buttons2_state = buttons2_state | self.state["BTN_TL"] << 4 packet = struct.pack('6h2c', self.state["ABS_X"], self.state["ABS_Y"], self.state["ABS_RX"], self.state["ABS_RY"], self.state["ABS_HAT0X"], self.state["ABS_HAT0Y"], buttons1_state.to_bytes(1, byteorder="big"), buttons2_state.to_bytes(1, byteorder="big")) return packet def decode(self, packet): buttons = [] state = packet[14:30] state = struct.unpack('6h2B2c', state) buttons1 = state[8] buttons2 = state[9] holder1 = '{0:06b}'.format(int.from_bytes(buttons1, byteorder="big")) holder2 = '{0:05b}'.format(int.from_bytes(buttons2, byteorder="big")) for i in holder1: buttons.append(int(i)) for i in holder2: buttons.append(int(i)) state = list(state[ :7]) + buttons return state
[((7, 22, 7, 60), 'evdev.InputDevice', 'evdev.InputDevice', ({(7, 40, 7, 59): '"""/dev/input/event2"""'}, {}), "('/dev/input/event2')", False, 'import evdev\n'), ((62, 16, 62, 46), 'struct.unpack', 'struct.unpack', ({(62, 30, 62, 38): '"""6h2B2c"""', (62, 40, 62, 45): 'state'}, {}), "('6h2B2c', state)", False, 'import struct\n')]
jiskra/openmv
scripts/examples/OpenMV/16-Codes/find_barcodes.py
a0f321836f77f94d8118910598dcdb79eb784d58
# Barcode Example # # This example shows off how easy it is to detect bar codes using the # OpenMV Cam M7. Barcode detection does not work on the M4 Camera. import sensor, image, time, math sensor.reset() sensor.set_pixformat(sensor.GRAYSCALE) sensor.set_framesize(sensor.VGA) # High Res! sensor.set_windowing((640, 80)) # V Res of 80 == less work (40 for 2X the speed). sensor.skip_frames(time = 2000) sensor.set_auto_gain(False) # must turn this off to prevent image washout... sensor.set_auto_whitebal(False) # must turn this off to prevent image washout... clock = time.clock() # Barcode detection can run at the full 640x480 resolution of your OpenMV Cam's # OV7725 camera module. Barcode detection will also work in RGB565 mode but at # a lower resolution. That said, barcode detection requires a higher resolution # to work well so it should always be run at 640x480 in grayscale... def barcode_name(code): if(code.type() == image.EAN2): return "EAN2" if(code.type() == image.EAN5): return "EAN5" if(code.type() == image.EAN8): return "EAN8" if(code.type() == image.UPCE): return "UPCE" if(code.type() == image.ISBN10): return "ISBN10" if(code.type() == image.UPCA): return "UPCA" if(code.type() == image.EAN13): return "EAN13" if(code.type() == image.ISBN13): return "ISBN13" if(code.type() == image.I25): return "I25" if(code.type() == image.DATABAR): return "DATABAR" if(code.type() == image.DATABAR_EXP): return "DATABAR_EXP" if(code.type() == image.CODABAR): return "CODABAR" if(code.type() == image.CODE39): return "CODE39" if(code.type() == image.PDF417): return "PDF417" if(code.type() == image.CODE93): return "CODE93" if(code.type() == image.CODE128): return "CODE128" while(True): clock.tick() img = sensor.snapshot() codes = img.find_barcodes() for code in codes: img.draw_rectangle(code.rect()) print_args = (barcode_name(code), code.payload(), (180 * code.rotation()) / math.pi, code.quality(), clock.fps()) print("Barcode %s, Payload \"%s\", rotation %f (degrees), quality %d, FPS %f" % print_args) if not codes: print("FPS %f" % clock.fps())
[((8, 0, 8, 14), 'sensor.reset', 'sensor.reset', ({}, {}), '()', False, 'import sensor, image, time, math\n'), ((9, 0, 9, 38), 'sensor.set_pixformat', 'sensor.set_pixformat', ({(9, 21, 9, 37): 'sensor.GRAYSCALE'}, {}), '(sensor.GRAYSCALE)', False, 'import sensor, image, time, math\n'), ((10, 0, 10, 32), 'sensor.set_framesize', 'sensor.set_framesize', ({(10, 21, 10, 31): 'sensor.VGA'}, {}), '(sensor.VGA)', False, 'import sensor, image, time, math\n'), ((11, 0, 11, 31), 'sensor.set_windowing', 'sensor.set_windowing', ({(11, 21, 11, 30): '(640, 80)'}, {}), '((640, 80))', False, 'import sensor, image, time, math\n'), ((12, 0, 12, 31), 'sensor.skip_frames', 'sensor.skip_frames', (), '', False, 'import sensor, image, time, math\n'), ((13, 0, 13, 27), 'sensor.set_auto_gain', 'sensor.set_auto_gain', ({(13, 21, 13, 26): '(False)'}, {}), '(False)', False, 'import sensor, image, time, math\n'), ((14, 0, 14, 31), 'sensor.set_auto_whitebal', 'sensor.set_auto_whitebal', ({(14, 25, 14, 30): '(False)'}, {}), '(False)', False, 'import sensor, image, time, math\n'), ((15, 8, 15, 20), 'time.clock', 'time.clock', ({}, {}), '()', False, 'import sensor, image, time, math\n'), ((58, 10, 58, 27), 'sensor.snapshot', 'sensor.snapshot', ({}, {}), '()', False, 'import sensor, image, time, math\n')]
devaslooper/Code-Overflow
Python/factorial.py
d7d55ea0f5015bccb5c4100c4240464fcda8504a
n=int(input("Enter number ")) fact=1 for i in range(1,n+1): fact=fact*i print("Factorial is ",fact)
[]
thiagofreitascarneiro/Curso-de-Python---Curso-em-Video
mundo 3/099.py
0342e482780b5a1c6f78cddd51d9bfad785c79fa
import time # O * é para desempacotar o paramêtro. Permite atribuir inumeros parametros. def maior(* num): contador = maior = 0 print('Analisando os valores passados...') for v in num: contador = contador + 1 print(f'{v} ', end='', flush=True) time.sleep(0.3) if contador == 1: maior = v else: if v > maior: maior = v print(f'Foram informado o total de {len(num)}') print(f'O maior valor informado foi {max(num)}') print(30 * '-') maior(2, 1, 7) maior(5, 4, 7, 9, 2) maior(1, 4, 7, 20, 2) maior(0)
[((9, 8, 9, 23), 'time.sleep', 'time.sleep', ({(9, 19, 9, 22): '(0.3)'}, {}), '(0.3)', False, 'import time\n')]
theycallmepeter/pytorch3d_PBR
tests/test_packed_to_padded.py
bc83c23969ff7843fc05d2da001952b368926174
# Copyright (c) Facebook, Inc. and its affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. import unittest import torch from common_testing import TestCaseMixin, get_random_cuda_device from pytorch3d.ops import packed_to_padded, padded_to_packed from pytorch3d.structures.meshes import Meshes class TestPackedToPadded(TestCaseMixin, unittest.TestCase): def setUp(self) -> None: super().setUp() torch.manual_seed(1) @staticmethod def init_meshes( num_meshes: int = 10, num_verts: int = 1000, num_faces: int = 3000, device: str = "cpu", ): device = torch.device(device) verts_list = [] faces_list = [] for _ in range(num_meshes): verts = torch.rand((num_verts, 3), dtype=torch.float32, device=device) faces = torch.randint( num_verts, size=(num_faces, 3), dtype=torch.int64, device=device ) verts_list.append(verts) faces_list.append(faces) meshes = Meshes(verts_list, faces_list) return meshes @staticmethod def packed_to_padded_python(inputs, first_idxs, max_size, device): """ PyTorch implementation of packed_to_padded function. """ num_meshes = first_idxs.size(0) D = inputs.shape[1] if inputs.dim() == 2 else 0 if D == 0: inputs_padded = torch.zeros((num_meshes, max_size), device=device) else: inputs_padded = torch.zeros((num_meshes, max_size, D), device=device) for m in range(num_meshes): s = first_idxs[m] if m == num_meshes - 1: f = inputs.shape[0] else: f = first_idxs[m + 1] inputs_padded[m, :f] = inputs[s:f] return inputs_padded @staticmethod def padded_to_packed_python(inputs, first_idxs, num_inputs, device): """ PyTorch implementation of padded_to_packed function. """ num_meshes = inputs.size(0) D = inputs.shape[2] if inputs.dim() == 3 else 0 if D == 0: inputs_packed = torch.zeros((num_inputs,), device=device) else: inputs_packed = torch.zeros((num_inputs, D), device=device) for m in range(num_meshes): s = first_idxs[m] if m == num_meshes - 1: f = num_inputs else: f = first_idxs[m + 1] inputs_packed[s:f] = inputs[m, :f] return inputs_packed def _test_packed_to_padded_helper(self, D, device): """ Check the results from packed_to_padded and PyTorch implementations are the same. """ meshes = self.init_meshes(16, 100, 300, device=device) faces = meshes.faces_packed() mesh_to_faces_packed_first_idx = meshes.mesh_to_faces_packed_first_idx() max_faces = meshes.num_faces_per_mesh().max().item() if D == 0: values = torch.rand((faces.shape[0],), device=device, requires_grad=True) else: values = torch.rand((faces.shape[0], D), device=device, requires_grad=True) values_torch = values.detach().clone() values_torch.requires_grad = True values_padded = packed_to_padded( values, mesh_to_faces_packed_first_idx, max_faces ) values_padded_torch = TestPackedToPadded.packed_to_padded_python( values_torch, mesh_to_faces_packed_first_idx, max_faces, device ) # check forward self.assertClose(values_padded, values_padded_torch) # check backward if D == 0: grad_inputs = torch.rand((len(meshes), max_faces), device=device) else: grad_inputs = torch.rand((len(meshes), max_faces, D), device=device) values_padded.backward(grad_inputs) grad_outputs = values.grad values_padded_torch.backward(grad_inputs) grad_outputs_torch1 = values_torch.grad grad_outputs_torch2 = TestPackedToPadded.padded_to_packed_python( grad_inputs, mesh_to_faces_packed_first_idx, values.size(0), device=device ) self.assertClose(grad_outputs, grad_outputs_torch1) self.assertClose(grad_outputs, grad_outputs_torch2) def test_packed_to_padded_flat_cpu(self): self._test_packed_to_padded_helper(0, "cpu") def test_packed_to_padded_D1_cpu(self): self._test_packed_to_padded_helper(1, "cpu") def test_packed_to_padded_D16_cpu(self): self._test_packed_to_padded_helper(16, "cpu") def test_packed_to_padded_flat_cuda(self): device = get_random_cuda_device() self._test_packed_to_padded_helper(0, device) def test_packed_to_padded_D1_cuda(self): device = get_random_cuda_device() self._test_packed_to_padded_helper(1, device) def test_packed_to_padded_D16_cuda(self): device = get_random_cuda_device() self._test_packed_to_padded_helper(16, device) def _test_padded_to_packed_helper(self, D, device): """ Check the results from packed_to_padded and PyTorch implementations are the same. """ meshes = self.init_meshes(16, 100, 300, device=device) mesh_to_faces_packed_first_idx = meshes.mesh_to_faces_packed_first_idx() num_faces_per_mesh = meshes.num_faces_per_mesh() max_faces = num_faces_per_mesh.max().item() if D == 0: values = torch.rand((len(meshes), max_faces), device=device) else: values = torch.rand((len(meshes), max_faces, D), device=device) for i, num in enumerate(num_faces_per_mesh): values[i, num:] = 0 values.requires_grad = True values_torch = values.detach().clone() values_torch.requires_grad = True values_packed = padded_to_packed( values, mesh_to_faces_packed_first_idx, num_faces_per_mesh.sum().item() ) values_packed_torch = TestPackedToPadded.padded_to_packed_python( values_torch, mesh_to_faces_packed_first_idx, num_faces_per_mesh.sum().item(), device, ) # check forward self.assertClose(values_packed, values_packed_torch) # check backward if D == 0: grad_inputs = torch.rand((num_faces_per_mesh.sum().item()), device=device) else: grad_inputs = torch.rand( (num_faces_per_mesh.sum().item(), D), device=device ) values_packed.backward(grad_inputs) grad_outputs = values.grad values_packed_torch.backward(grad_inputs) grad_outputs_torch1 = values_torch.grad grad_outputs_torch2 = TestPackedToPadded.packed_to_padded_python( grad_inputs, mesh_to_faces_packed_first_idx, values.size(1), device=device ) self.assertClose(grad_outputs, grad_outputs_torch1) self.assertClose(grad_outputs, grad_outputs_torch2) def test_padded_to_packed_flat_cpu(self): self._test_padded_to_packed_helper(0, "cpu") def test_padded_to_packed_D1_cpu(self): self._test_padded_to_packed_helper(1, "cpu") def test_padded_to_packed_D16_cpu(self): self._test_padded_to_packed_helper(16, "cpu") def test_padded_to_packed_flat_cuda(self): device = get_random_cuda_device() self._test_padded_to_packed_helper(0, device) def test_padded_to_packed_D1_cuda(self): device = get_random_cuda_device() self._test_padded_to_packed_helper(1, device) def test_padded_to_packed_D16_cuda(self): device = get_random_cuda_device() self._test_padded_to_packed_helper(16, device) def test_invalid_inputs_shapes(self, device="cuda:0"): with self.assertRaisesRegex(ValueError, "input can only be 2-dimensional."): values = torch.rand((100, 50, 2), device=device) first_idxs = torch.tensor([0, 80], dtype=torch.int64, device=device) packed_to_padded(values, first_idxs, 100) with self.assertRaisesRegex(ValueError, "input can only be 3-dimensional."): values = torch.rand((100,), device=device) first_idxs = torch.tensor([0, 80], dtype=torch.int64, device=device) padded_to_packed(values, first_idxs, 20) with self.assertRaisesRegex(ValueError, "input can only be 3-dimensional."): values = torch.rand((100, 50, 2, 2), device=device) first_idxs = torch.tensor([0, 80], dtype=torch.int64, device=device) padded_to_packed(values, first_idxs, 20) @staticmethod def packed_to_padded_with_init( num_meshes: int, num_verts: int, num_faces: int, num_d: int, device: str = "cpu" ): meshes = TestPackedToPadded.init_meshes( num_meshes, num_verts, num_faces, device ) faces = meshes.faces_packed() mesh_to_faces_packed_first_idx = meshes.mesh_to_faces_packed_first_idx() max_faces = meshes.num_faces_per_mesh().max().item() if num_d == 0: values = torch.rand((faces.shape[0],), device=meshes.device) else: values = torch.rand((faces.shape[0], num_d), device=meshes.device) torch.cuda.synchronize() def out(): packed_to_padded(values, mesh_to_faces_packed_first_idx, max_faces) torch.cuda.synchronize() return out @staticmethod def packed_to_padded_with_init_torch( num_meshes: int, num_verts: int, num_faces: int, num_d: int, device: str = "cpu" ): meshes = TestPackedToPadded.init_meshes( num_meshes, num_verts, num_faces, device ) faces = meshes.faces_packed() mesh_to_faces_packed_first_idx = meshes.mesh_to_faces_packed_first_idx() max_faces = meshes.num_faces_per_mesh().max().item() if num_d == 0: values = torch.rand((faces.shape[0],), device=meshes.device) else: values = torch.rand((faces.shape[0], num_d), device=meshes.device) torch.cuda.synchronize() def out(): TestPackedToPadded.packed_to_padded_python( values, mesh_to_faces_packed_first_idx, max_faces, device ) torch.cuda.synchronize() return out
[((18, 8, 18, 28), 'torch.manual_seed', 'torch.manual_seed', ({(18, 26, 18, 27): '(1)'}, {}), '(1)', False, 'import torch\n'), ((27, 17, 27, 37), 'torch.device', 'torch.device', ({(27, 30, 27, 36): 'device'}, {}), '(device)', False, 'import torch\n'), ((37, 17, 37, 47), 'pytorch3d.structures.meshes.Meshes', 'Meshes', ({(37, 24, 37, 34): 'verts_list', (37, 36, 37, 46): 'faces_list'}, {}), '(verts_list, faces_list)', False, 'from pytorch3d.structures.meshes import Meshes\n'), ((99, 24, 101, 9), 'pytorch3d.ops.packed_to_padded', 'packed_to_padded', ({(100, 12, 100, 18): 'values', (100, 20, 100, 50): 'mesh_to_faces_packed_first_idx', (100, 52, 100, 61): 'max_faces'}, {}), '(values, mesh_to_faces_packed_first_idx, max_faces)', False, 'from pytorch3d.ops import packed_to_padded, padded_to_packed\n'), ((133, 17, 133, 41), 'common_testing.get_random_cuda_device', 'get_random_cuda_device', ({}, {}), '()', False, 'from common_testing import TestCaseMixin, get_random_cuda_device\n'), ((137, 17, 137, 41), 'common_testing.get_random_cuda_device', 'get_random_cuda_device', ({}, {}), '()', False, 'from common_testing import TestCaseMixin, get_random_cuda_device\n'), ((141, 17, 141, 41), 'common_testing.get_random_cuda_device', 'get_random_cuda_device', ({}, {}), '()', False, 'from common_testing import TestCaseMixin, get_random_cuda_device\n'), ((201, 17, 201, 41), 'common_testing.get_random_cuda_device', 'get_random_cuda_device', ({}, {}), '()', False, 'from common_testing import TestCaseMixin, get_random_cuda_device\n'), ((205, 17, 205, 41), 'common_testing.get_random_cuda_device', 'get_random_cuda_device', ({}, {}), '()', False, 'from common_testing import TestCaseMixin, get_random_cuda_device\n'), ((209, 17, 209, 41), 'common_testing.get_random_cuda_device', 'get_random_cuda_device', ({}, {}), '()', False, 'from common_testing import TestCaseMixin, get_random_cuda_device\n'), ((242, 8, 242, 32), 'torch.cuda.synchronize', 'torch.cuda.synchronize', ({}, {}), '()', False, 'import torch\n'), ((264, 8, 264, 32), 'torch.cuda.synchronize', 'torch.cuda.synchronize', ({}, {}), '()', False, 'import torch\n'), ((31, 20, 31, 82), 'torch.rand', 'torch.rand', (), '', False, 'import torch\n'), ((32, 20, 34, 13), 'torch.randint', 'torch.randint', (), '', False, 'import torch\n'), ((49, 28, 49, 78), 'torch.zeros', 'torch.zeros', (), '', False, 'import torch\n'), ((51, 28, 51, 81), 'torch.zeros', 'torch.zeros', (), '', False, 'import torch\n'), ((70, 28, 70, 69), 'torch.zeros', 'torch.zeros', (), '', False, 'import torch\n'), ((72, 28, 72, 71), 'torch.zeros', 'torch.zeros', (), '', False, 'import torch\n'), ((94, 21, 94, 85), 'torch.rand', 'torch.rand', (), '', False, 'import torch\n'), ((96, 21, 96, 87), 'torch.rand', 'torch.rand', (), '', False, 'import torch\n'), ((214, 21, 214, 60), 'torch.rand', 'torch.rand', (), '', False, 'import torch\n'), ((215, 25, 215, 80), 'torch.tensor', 'torch.tensor', (), '', False, 'import torch\n'), ((216, 12, 216, 53), 'pytorch3d.ops.packed_to_padded', 'packed_to_padded', ({(216, 29, 216, 35): 'values', (216, 37, 216, 47): 'first_idxs', (216, 49, 216, 52): '(100)'}, {}), '(values, first_idxs, 100)', False, 'from pytorch3d.ops import packed_to_padded, padded_to_packed\n'), ((219, 21, 219, 54), 'torch.rand', 'torch.rand', (), '', False, 'import torch\n'), ((220, 25, 220, 80), 'torch.tensor', 'torch.tensor', (), '', False, 'import torch\n'), ((221, 12, 221, 52), 'pytorch3d.ops.padded_to_packed', 'padded_to_packed', ({(221, 29, 221, 35): 'values', (221, 37, 221, 47): 'first_idxs', (221, 49, 221, 51): '(20)'}, {}), '(values, first_idxs, 20)', False, 'from pytorch3d.ops import packed_to_padded, padded_to_packed\n'), ((224, 21, 224, 63), 'torch.rand', 'torch.rand', (), '', False, 'import torch\n'), ((225, 25, 225, 80), 'torch.tensor', 'torch.tensor', (), '', False, 'import torch\n'), ((226, 12, 226, 52), 'pytorch3d.ops.padded_to_packed', 'padded_to_packed', ({(226, 29, 226, 35): 'values', (226, 37, 226, 47): 'first_idxs', (226, 49, 226, 51): '(20)'}, {}), '(values, first_idxs, 20)', False, 'from pytorch3d.ops import packed_to_padded, padded_to_packed\n'), ((239, 21, 239, 72), 'torch.rand', 'torch.rand', (), '', False, 'import torch\n'), ((241, 21, 241, 78), 'torch.rand', 'torch.rand', (), '', False, 'import torch\n'), ((245, 12, 245, 79), 'pytorch3d.ops.packed_to_padded', 'packed_to_padded', ({(245, 29, 245, 35): 'values', (245, 37, 245, 67): 'mesh_to_faces_packed_first_idx', (245, 69, 245, 78): 'max_faces'}, {}), '(values, mesh_to_faces_packed_first_idx, max_faces)', False, 'from pytorch3d.ops import packed_to_padded, padded_to_packed\n'), ((246, 12, 246, 36), 'torch.cuda.synchronize', 'torch.cuda.synchronize', ({}, {}), '()', False, 'import torch\n'), ((261, 21, 261, 72), 'torch.rand', 'torch.rand', (), '', False, 'import torch\n'), ((263, 21, 263, 78), 'torch.rand', 'torch.rand', (), '', False, 'import torch\n'), ((270, 12, 270, 36), 'torch.cuda.synchronize', 'torch.cuda.synchronize', ({}, {}), '()', False, 'import torch\n')]
HowcanoeWang/EasyRIC
easyric/tests/test_io_geotiff.py
a3420bc7b1e0f1013411565cf0e66dd2d2ba5371
import pyproj import pytest import numpy as np from easyric.io import geotiff, shp from skimage.io import imread from skimage.color import rgb2gray import matplotlib.pyplot as plt def test_prase_header_string_width(): out_dict = geotiff._prase_header_string("* 256 image_width (1H) 13503") assert out_dict['width'] == 13503 def test_prase_header_string_length(): out_dict = geotiff._prase_header_string("* 257 image_length (1H) 19866") assert out_dict['length'] == 19866 def test_prase_header_string_scale(): in_str = "* 33550 model_pixel_scale (3d) (0.0029700000000000004, 0.0029700000000000004, 0" out_dict = geotiff._prase_header_string(in_str) assert out_dict['scale'] == (0.0029700000000000004, 0.0029700000000000004) def test_prase_header_string_tie_point(): in_str = "* 33922 model_tie_point (6d) (0.0, 0.0, 0.0, 368090.77975000005, 3956071.13823," out_dict = geotiff._prase_header_string(in_str) assert out_dict['tie_point'] == (368090.77975000005, 3956071.13823) in_str = "* 33922 model_tie_point (6d) (0.0, 0.0, 0.0, 368090.77975000005, 3956071.13823, 0" out_dict = geotiff._prase_header_string(in_str) assert out_dict['tie_point'] == (368090.77975000005, 3956071.13823) def test_prase_header_string_nodata(): out_dict = geotiff._prase_header_string("* 42113 gdal_nodata (7s) b'-10000'") assert out_dict['nodata'] == -10000 def test_prase_header_string_proj_normal(capsys): in_str = "* 34737 geo_ascii_params (30s) b'WGS 84 / UTM zone 54N|WGS 84|'" out_dict = geotiff._prase_header_string(in_str) captured = capsys.readouterr() assert f"[io][geotiff][GeoCorrd] Comprehense [{in_str}]" in captured.out assert out_dict['proj'] == pyproj.CRS.from_epsg(32654) def test_prase_header_string_proj_error(capsys): # should raise error because WGS 84 / UTM ... should be full out_dict = geotiff._prase_header_string("* 34737 geo_ascii_params (30s) b'UTM zone 54N|WGS 84|'") captured = capsys.readouterr() assert '[io][geotiff][GeoCorrd] Generation failed, because [Input is not a CRS: UTM zone 54N]' in captured.out assert out_dict['proj'] == None def test_get_imarray_without_header(capsys): pass def test_get_imarray_with_header(capsys): pass def test_point_query_one_point(): point = (368023.004, 3955500.669) out = geotiff.point_query(r'file/pix4d.diy/hasu_tanashi_20170525_Ins1RGB_30m_dsm.tif', point) np.testing.assert_almost_equal(out, np.float32(97.45558), decimal=3) def test_point_query_numpy_points(): points = np.asarray([[368022.581, 3955501.054], [368024.032, 3955500.465]]) out = geotiff.point_query(r'file/pix4d.diy/hasu_tanashi_20170525_Ins1RGB_30m_dsm.tif', points) expected = np.asarray([97.624344, 97.59617]) np.testing.assert_almost_equal(out, expected, decimal=3) def test_point_query_list_numpy_points(): points = np.asarray([[368022.581, 3955501.054], [368024.032, 3955500.465]]) point = np.asarray([[368023.004, 3955500.669]]) p_list = [point, points] expected = [np.asarray([97.45558]), np.asarray([97.624344, 97.59617])] out = geotiff.point_query(r'file/pix4d.diy/hasu_tanashi_20170525_Ins1RGB_30m_dsm.tif', p_list) assert type(expected) == type(out) np.testing.assert_almost_equal(expected[0], out[0], decimal=3) np.testing.assert_almost_equal(expected[1], out[1], decimal=3) def test_point_query_wrong_types(): # [TODO] pass def test_point_query_input_ndarray(): # [Todo] pass def test_mean_values(capsys): mean_ht = geotiff.mean_values(r'file/pix4d.diy/hasu_tanashi_20170525_Ins1RGB_30m_dsm.tif') captured = capsys.readouterr() # When not convert to float, mean_values = 97.562584 # assert mean_ht == np.float32(97.562584) np.testing.assert_almost_equal(mean_ht, np.float32(97.562584), decimal=3) # another case that not working in previous version: # Cannot convert np.nan to int, fixed by astype(float) mean_ht = geotiff.mean_values(r'file/tiff_test/2_12.tif') captured = capsys.readouterr() np.testing.assert_almost_equal(mean_ht, np.float(72.31657466298653), decimal=3) def test_gis2pixel2gis(): geo_head_txt = """ TIFF file: 200423_G_M600pro_transparent_mosaic_group1.tif, 411 MiB, little endian, bigtiff Series 0: 31255x19436x4, uint8, YXS, 1 pages, not mem-mappable Page 0: 31255x19436x4, uint8, 8 bit, rgb, lzw * 256 image_width (1H) 19436 * 257 image_length (1H) 31255 * 258 bits_per_sample (4H) (8, 8, 8, 8) * 259 compression (1H) 5 * 262 photometric (1H) 2 * 273 strip_offsets (31255Q) (500650, 501114, 501578, 502042, 502506, 502970, 5 * 277 samples_per_pixel (1H) 4 * 278 rows_per_strip (1H) 1 * 279 strip_byte_counts (31255Q) (464, 464, 464, 464, 464, 464, 464, 464, 464, * 284 planar_configuration (1H) 1 * 305 software (12s) b'pix4dmapper' * 317 predictor (1H) 2 * 338 extra_samples (1H) 2 * 339 sample_format (4H) (1, 1, 1, 1) * 33550 model_pixel_scale (3d) (0.001, 0.001, 0.0) * 33922 model_tie_point (6d) (0.0, 0.0, 0.0, 484576.70205, 3862285.5109300003, * 34735 geo_key_directory (32H) (1, 1, 0, 7, 1024, 0, 1, 1, 1025, 0, 1, 1, 1026 * 34737 geo_ascii_params (30s) b'WGS 84 / UTM zone 53N|WGS 84|' """ gis_coord = np.asarray([[ 484593.67474654, 3862259.42413431], [ 484593.41064743, 3862259.92582402], [ 484593.64841806, 3862260.06515117], [ 484593.93077419, 3862259.55455913], [ 484593.67474654, 3862259.42413431]]) header = geotiff._prase_header_string(geo_head_txt) expected_pixel = np.asarray([[16972, 26086], [16708, 25585], [16946, 25445], [17228, 25956], [16972, 26086]]) pixel_coord = geotiff.geo2pixel(gis_coord, header) np.testing.assert_almost_equal(pixel_coord, expected_pixel) gis_revert = geotiff.pixel2geo(pixel_coord, header) np.testing.assert_almost_equal(gis_revert, gis_coord, decimal=3) def test_is_roi_type(): roi1 = np.asarray([[123, 456], [456, 789]]) roi2 = [roi1, roi1] roi_wrong_1 = (123, 345) roi_wrong_2 = [123, 456] roi_wrong_3 = [[123, 345], [456, 789]] roi1_out = geotiff._is_roi_type(roi1) assert roi1_out == [roi1] roi2_out = geotiff._is_roi_type(roi2) assert roi2_out == roi2 with pytest.raises(TypeError) as errinfo: roi_w1_out = geotiff._is_roi_type(roi_wrong_1) assert 'Only numpy.ndarray points and list contains numpy.ndarray points are supported' in str(errinfo.value) with pytest.raises(TypeError) as errinfo: roi_w2_out = geotiff._is_roi_type(roi_wrong_2) assert 'Only list contains numpy.ndarray points are supported' in str(errinfo.value) with pytest.raises(TypeError) as errinfo: roi_w3_out = geotiff._is_roi_type(roi_wrong_3) assert 'Only list contains numpy.ndarray points are supported' in str(errinfo.value) def test_imarray_clip_2d_rgb_rgba(): photo_path = 'file/pix4d.diy/photos/DJI_0174.JPG' roi = np.asarray([[2251, 1223], [2270, 1270], [2227, 1263], [2251, 1223]]) fig, ax = plt.subplots(1,3, figsize=(12,4)) # ----------------------------------------------- imarray_rgb = imread(photo_path) assert imarray_rgb.shape == (3456, 4608, 3) im_out_rgb, offsets_rgb = geotiff.imarray_clip(imarray_rgb, roi) ax[1].imshow(im_out_rgb / 255) ax[1].set_title('rgb') # ----------------------------------------------- imarray_2d = rgb2gray(imarray_rgb) assert imarray_2d.shape == (3456, 4608) im_out_2d, offsets_2d = geotiff.imarray_clip(imarray_2d, roi) ax[0].imshow(im_out_2d, cmap='gray') ax[0].set_title('gray') # ----------------------------------------------- imarray_rgba = np.dstack((imarray_rgb, np.ones((3456, 4608)) * 255)) assert imarray_rgba.shape == (3456, 4608, 4) im_out_rgba, offsets = geotiff.imarray_clip(imarray_rgba, roi) ax[2].imshow(im_out_rgba/255) ax[2].set_title('rgba') plt.show() def test_clip_roi_pixel(): poly = shp.read_shp2d('file/shp_test/test.shp') poly_pixel = geotiff.geo2pixel(poly['0'], geotiff.get_header('file/tiff_test/2_12.tif')) imarray, offset = geotiff.clip_roi(poly_pixel, 'file/tiff_test/2_12.tif', is_geo=False) assert len(imarray) == 1 def test_clip_roi_geo(): poly = shp.read_shp2d('file/shp_test/test.shp') imarray, offset = geotiff.clip_roi(poly['0'], 'file/tiff_test/2_12.tif', is_geo=True) assert len(imarray) == 1
[((10, 15, 10, 75), 'easyric.io.geotiff._prase_header_string', 'geotiff._prase_header_string', ({(10, 44, 10, 74): '"""* 256 image_width (1H) 13503"""'}, {}), "('* 256 image_width (1H) 13503')", False, 'from easyric.io import geotiff, shp\n'), ((15, 15, 15, 76), 'easyric.io.geotiff._prase_header_string', 'geotiff._prase_header_string', ({(15, 44, 15, 75): '"""* 257 image_length (1H) 19866"""'}, {}), "('* 257 image_length (1H) 19866')", False, 'from easyric.io import geotiff, shp\n'), ((21, 15, 21, 51), 'easyric.io.geotiff._prase_header_string', 'geotiff._prase_header_string', ({(21, 44, 21, 50): 'in_str'}, {}), '(in_str)', False, 'from easyric.io import geotiff, shp\n'), ((27, 15, 27, 51), 'easyric.io.geotiff._prase_header_string', 'geotiff._prase_header_string', ({(27, 44, 27, 50): 'in_str'}, {}), '(in_str)', False, 'from easyric.io import geotiff, shp\n'), ((31, 15, 31, 51), 'easyric.io.geotiff._prase_header_string', 'geotiff._prase_header_string', ({(31, 44, 31, 50): 'in_str'}, {}), '(in_str)', False, 'from easyric.io import geotiff, shp\n'), ((36, 15, 36, 81), 'easyric.io.geotiff._prase_header_string', 'geotiff._prase_header_string', ({(36, 44, 36, 80): '"""* 42113 gdal_nodata (7s) b\'-10000\'"""'}, {}), '("* 42113 gdal_nodata (7s) b\'-10000\'")', False, 'from easyric.io import geotiff, shp\n'), ((42, 15, 42, 51), 'easyric.io.geotiff._prase_header_string', 'geotiff._prase_header_string', ({(42, 44, 42, 50): 'in_str'}, {}), '(in_str)', False, 'from easyric.io import geotiff, shp\n'), ((51, 15, 51, 101), 'easyric.io.geotiff._prase_header_string', 'geotiff._prase_header_string', ({(51, 44, 51, 100): '"""* 34737 geo_ascii_params (30s) b\'UTM zone 54N|WGS 84|\'"""'}, {}), '(\n "* 34737 geo_ascii_params (30s) b\'UTM zone 54N|WGS 84|\'")', False, 'from easyric.io import geotiff, shp\n'), ((67, 10, 67, 97), 'easyric.io.geotiff.point_query', 'geotiff.point_query', ({(67, 30, 67, 89): '"""file/pix4d.diy/hasu_tanashi_20170525_Ins1RGB_30m_dsm.tif"""', (67, 91, 67, 96): 'point'}, {}), "('file/pix4d.diy/hasu_tanashi_20170525_Ins1RGB_30m_dsm.tif',\n point)", False, 'from easyric.io import geotiff, shp\n'), ((72, 13, 72, 79), 'numpy.asarray', 'np.asarray', ({(72, 24, 72, 78): '[[368022.581, 3955501.054], [368024.032, 3955500.465]]'}, {}), '([[368022.581, 3955501.054], [368024.032, 3955500.465]])', True, 'import numpy as np\n'), ((73, 10, 73, 98), 'easyric.io.geotiff.point_query', 'geotiff.point_query', ({(73, 30, 73, 89): '"""file/pix4d.diy/hasu_tanashi_20170525_Ins1RGB_30m_dsm.tif"""', (73, 91, 73, 97): 'points'}, {}), "('file/pix4d.diy/hasu_tanashi_20170525_Ins1RGB_30m_dsm.tif',\n points)", False, 'from easyric.io import geotiff, shp\n'), ((74, 15, 74, 48), 'numpy.asarray', 'np.asarray', ({(74, 26, 74, 47): '[97.624344, 97.59617]'}, {}), '([97.624344, 97.59617])', True, 'import numpy as np\n'), ((76, 4, 76, 60), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (), '', True, 'import numpy as np\n'), ((80, 13, 80, 79), 'numpy.asarray', 'np.asarray', ({(80, 24, 80, 78): '[[368022.581, 3955501.054], [368024.032, 3955500.465]]'}, {}), '([[368022.581, 3955501.054], [368024.032, 3955500.465]])', True, 'import numpy as np\n'), ((81, 12, 81, 51), 'numpy.asarray', 'np.asarray', ({(81, 23, 81, 50): '[[368023.004, 3955500.669]]'}, {}), '([[368023.004, 3955500.669]])', True, 'import numpy as np\n'), ((85, 10, 85, 98), 'easyric.io.geotiff.point_query', 'geotiff.point_query', ({(85, 30, 85, 89): '"""file/pix4d.diy/hasu_tanashi_20170525_Ins1RGB_30m_dsm.tif"""', (85, 91, 85, 97): 'p_list'}, {}), "('file/pix4d.diy/hasu_tanashi_20170525_Ins1RGB_30m_dsm.tif',\n p_list)", False, 'from easyric.io import geotiff, shp\n'), ((88, 4, 88, 66), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (), '', True, 'import numpy as np\n'), ((89, 4, 89, 66), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (), '', True, 'import numpy as np\n'), ((100, 14, 100, 94), 'easyric.io.geotiff.mean_values', 'geotiff.mean_values', ({(100, 34, 100, 93): '"""file/pix4d.diy/hasu_tanashi_20170525_Ins1RGB_30m_dsm.tif"""'}, {}), "('file/pix4d.diy/hasu_tanashi_20170525_Ins1RGB_30m_dsm.tif')", False, 'from easyric.io import geotiff, shp\n'), ((108, 14, 108, 61), 'easyric.io.geotiff.mean_values', 'geotiff.mean_values', ({(108, 34, 108, 60): '"""file/tiff_test/2_12.tif"""'}, {}), "('file/tiff_test/2_12.tif')", False, 'from easyric.io import geotiff, shp\n'), ((138, 16, 142, 66), 'numpy.asarray', 'np.asarray', ({(138, 27, 142, 65): '[[484593.67474654, 3862259.42413431], [484593.41064743, 3862259.92582402],\n [484593.64841806, 3862260.06515117], [484593.93077419, 3862259.55455913\n ], [484593.67474654, 3862259.42413431]]'}, {}), '([[484593.67474654, 3862259.42413431], [484593.41064743, \n 3862259.92582402], [484593.64841806, 3862260.06515117], [\n 484593.93077419, 3862259.55455913], [484593.67474654, 3862259.42413431]])', True, 'import numpy as np\n'), ((144, 13, 144, 55), 'easyric.io.geotiff._prase_header_string', 'geotiff._prase_header_string', ({(144, 42, 144, 54): 'geo_head_txt'}, {}), '(geo_head_txt)', False, 'from easyric.io import geotiff, shp\n'), ((146, 21, 150, 49), 'numpy.asarray', 'np.asarray', ({(146, 32, 150, 48): '[[16972, 26086], [16708, 25585], [16946, 25445], [17228, 25956], [16972, 26086]\n ]'}, {}), '([[16972, 26086], [16708, 25585], [16946, 25445], [17228, 25956],\n [16972, 26086]])', True, 'import numpy as np\n'), ((152, 18, 152, 54), 'easyric.io.geotiff.geo2pixel', 'geotiff.geo2pixel', ({(152, 36, 152, 45): 'gis_coord', (152, 47, 152, 53): 'header'}, {}), '(gis_coord, header)', False, 'from easyric.io import geotiff, shp\n'), ((154, 4, 154, 63), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', ({(154, 35, 154, 46): 'pixel_coord', (154, 48, 154, 62): 'expected_pixel'}, {}), '(pixel_coord, expected_pixel)', True, 'import numpy as np\n'), ((156, 17, 156, 55), 'easyric.io.geotiff.pixel2geo', 'geotiff.pixel2geo', ({(156, 35, 156, 46): 'pixel_coord', (156, 48, 156, 54): 'header'}, {}), '(pixel_coord, header)', False, 'from easyric.io import geotiff, shp\n'), ((158, 4, 158, 68), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (), '', True, 'import numpy as np\n'), ((162, 11, 162, 47), 'numpy.asarray', 'np.asarray', ({(162, 22, 162, 46): '[[123, 456], [456, 789]]'}, {}), '([[123, 456], [456, 789]])', True, 'import numpy as np\n'), ((169, 15, 169, 41), 'easyric.io.geotiff._is_roi_type', 'geotiff._is_roi_type', ({(169, 36, 169, 40): 'roi1'}, {}), '(roi1)', False, 'from easyric.io import geotiff, shp\n'), ((172, 15, 172, 41), 'easyric.io.geotiff._is_roi_type', 'geotiff._is_roi_type', ({(172, 36, 172, 40): 'roi2'}, {}), '(roi2)', False, 'from easyric.io import geotiff, shp\n'), ((190, 10, 190, 78), 'numpy.asarray', 'np.asarray', ({(190, 21, 190, 77): '[[2251, 1223], [2270, 1270], [2227, 1263], [2251, 1223]]'}, {}), '([[2251, 1223], [2270, 1270], [2227, 1263], [2251, 1223]])', True, 'import numpy as np\n'), ((192, 14, 192, 47), 'matplotlib.pyplot.subplots', 'plt.subplots', (), '', True, 'import matplotlib.pyplot as plt\n'), ((194, 18, 194, 36), 'skimage.io.imread', 'imread', ({(194, 25, 194, 35): 'photo_path'}, {}), '(photo_path)', False, 'from skimage.io import imread\n'), ((197, 30, 197, 68), 'easyric.io.geotiff.imarray_clip', 'geotiff.imarray_clip', ({(197, 51, 197, 62): 'imarray_rgb', (197, 64, 197, 67): 'roi'}, {}), '(imarray_rgb, roi)', False, 'from easyric.io import geotiff, shp\n'), ((203, 17, 203, 38), 'skimage.color.rgb2gray', 'rgb2gray', ({(203, 26, 203, 37): 'imarray_rgb'}, {}), '(imarray_rgb)', False, 'from skimage.color import rgb2gray\n'), ((206, 28, 206, 65), 'easyric.io.geotiff.imarray_clip', 'geotiff.imarray_clip', ({(206, 49, 206, 59): 'imarray_2d', (206, 61, 206, 64): 'roi'}, {}), '(imarray_2d, roi)', False, 'from easyric.io import geotiff, shp\n'), ((215, 27, 215, 66), 'easyric.io.geotiff.imarray_clip', 'geotiff.imarray_clip', ({(215, 48, 215, 60): 'imarray_rgba', (215, 62, 215, 65): 'roi'}, {}), '(imarray_rgba, roi)', False, 'from easyric.io import geotiff, shp\n'), ((219, 4, 219, 14), 'matplotlib.pyplot.show', 'plt.show', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n'), ((223, 11, 223, 51), 'easyric.io.shp.read_shp2d', 'shp.read_shp2d', ({(223, 26, 223, 50): '"""file/shp_test/test.shp"""'}, {}), "('file/shp_test/test.shp')", False, 'from easyric.io import geotiff, shp\n'), ((225, 22, 225, 91), 'easyric.io.geotiff.clip_roi', 'geotiff.clip_roi', (), '', False, 'from easyric.io import geotiff, shp\n'), ((230, 11, 230, 51), 'easyric.io.shp.read_shp2d', 'shp.read_shp2d', ({(230, 26, 230, 50): '"""file/shp_test/test.shp"""'}, {}), "('file/shp_test/test.shp')", False, 'from easyric.io import geotiff, shp\n'), ((231, 22, 231, 89), 'easyric.io.geotiff.clip_roi', 'geotiff.clip_roi', (), '', False, 'from easyric.io import geotiff, shp\n'), ((46, 31, 46, 58), 'pyproj.CRS.from_epsg', 'pyproj.CRS.from_epsg', ({(46, 52, 46, 57): '(32654)'}, {}), '(32654)', False, 'import pyproj\n'), ((68, 40, 68, 60), 'numpy.float32', 'np.float32', ({(68, 51, 68, 59): '(97.45558)'}, {}), '(97.45558)', True, 'import numpy as np\n'), ((84, 16, 84, 38), 'numpy.asarray', 'np.asarray', ({(84, 27, 84, 37): '[97.45558]'}, {}), '([97.45558])', True, 'import numpy as np\n'), ((84, 40, 84, 73), 'numpy.asarray', 'np.asarray', ({(84, 51, 84, 72): '[97.624344, 97.59617]'}, {}), '([97.624344, 97.59617])', True, 'import numpy as np\n'), ((104, 44, 104, 65), 'numpy.float32', 'np.float32', ({(104, 55, 104, 64): '(97.562584)'}, {}), '(97.562584)', True, 'import numpy as np\n'), ((110, 44, 110, 71), 'numpy.float', 'np.float', ({(110, 53, 110, 70): '(72.31657466298653)'}, {}), '(72.31657466298653)', True, 'import numpy as np\n'), ((175, 9, 175, 33), 'pytest.raises', 'pytest.raises', ({(175, 23, 175, 32): 'TypeError'}, {}), '(TypeError)', False, 'import pytest\n'), ((176, 21, 176, 54), 'easyric.io.geotiff._is_roi_type', 'geotiff._is_roi_type', ({(176, 42, 176, 53): 'roi_wrong_1'}, {}), '(roi_wrong_1)', False, 'from easyric.io import geotiff, shp\n'), ((179, 9, 179, 33), 'pytest.raises', 'pytest.raises', ({(179, 23, 179, 32): 'TypeError'}, {}), '(TypeError)', False, 'import pytest\n'), ((180, 21, 180, 54), 'easyric.io.geotiff._is_roi_type', 'geotiff._is_roi_type', ({(180, 42, 180, 53): 'roi_wrong_2'}, {}), '(roi_wrong_2)', False, 'from easyric.io import geotiff, shp\n'), ((183, 9, 183, 33), 'pytest.raises', 'pytest.raises', ({(183, 23, 183, 32): 'TypeError'}, {}), '(TypeError)', False, 'import pytest\n'), ((184, 21, 184, 54), 'easyric.io.geotiff._is_roi_type', 'geotiff._is_roi_type', ({(184, 42, 184, 53): 'roi_wrong_3'}, {}), '(roi_wrong_3)', False, 'from easyric.io import geotiff, shp\n'), ((224, 46, 224, 91), 'easyric.io.geotiff.get_header', 'geotiff.get_header', ({(224, 65, 224, 90): '"""file/tiff_test/2_12.tif"""'}, {}), "('file/tiff_test/2_12.tif')", False, 'from easyric.io import geotiff, shp\n'), ((212, 43, 212, 64), 'numpy.ones', 'np.ones', ({(212, 51, 212, 63): '(3456, 4608)'}, {}), '((3456, 4608))', True, 'import numpy as np\n')]
matecsaj/ebay_rest
src/ebay_rest/api/buy_marketplace_insights/models/item_location.py
dd23236f39e05636eff222f99df1e3699ce47d4a
# coding: utf-8 """ Marketplace Insights API <a href=\"https://developer.ebay.com/api-docs/static/versioning.html#limited\" target=\"_blank\"> <img src=\"/cms/img/docs/partners-api.svg\" class=\"legend-icon partners-icon\" title=\"Limited Release\" alt=\"Limited Release\" />(Limited Release)</a> The Marketplace Insights API provides the ability to search for sold items on eBay by keyword, GTIN, category, and product and returns the of sales history of those items. # noqa: E501 OpenAPI spec version: v1_beta.2.2 Generated by: https://github.com/swagger-api/swagger-codegen.git """ import pprint import re # noqa: F401 import six class ItemLocation(object): """NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. """ """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'address_line1': 'str', 'address_line2': 'str', 'city': 'str', 'country': 'str', 'county': 'str', 'postal_code': 'str', 'state_or_province': 'str' } attribute_map = { 'address_line1': 'addressLine1', 'address_line2': 'addressLine2', 'city': 'city', 'country': 'country', 'county': 'county', 'postal_code': 'postalCode', 'state_or_province': 'stateOrProvince' } def __init__(self, address_line1=None, address_line2=None, city=None, country=None, county=None, postal_code=None, state_or_province=None): # noqa: E501 """ItemLocation - a model defined in Swagger""" # noqa: E501 self._address_line1 = None self._address_line2 = None self._city = None self._country = None self._county = None self._postal_code = None self._state_or_province = None self.discriminator = None if address_line1 is not None: self.address_line1 = address_line1 if address_line2 is not None: self.address_line2 = address_line2 if city is not None: self.city = city if country is not None: self.country = country if county is not None: self.county = county if postal_code is not None: self.postal_code = postal_code if state_or_province is not None: self.state_or_province = state_or_province @property def address_line1(self): """Gets the address_line1 of this ItemLocation. # noqa: E501 The first line of the street address. # noqa: E501 :return: The address_line1 of this ItemLocation. # noqa: E501 :rtype: str """ return self._address_line1 @address_line1.setter def address_line1(self, address_line1): """Sets the address_line1 of this ItemLocation. The first line of the street address. # noqa: E501 :param address_line1: The address_line1 of this ItemLocation. # noqa: E501 :type: str """ self._address_line1 = address_line1 @property def address_line2(self): """Gets the address_line2 of this ItemLocation. # noqa: E501 The second line of the street address. This field may contain such values as an apartment or suite number. # noqa: E501 :return: The address_line2 of this ItemLocation. # noqa: E501 :rtype: str """ return self._address_line2 @address_line2.setter def address_line2(self, address_line2): """Sets the address_line2 of this ItemLocation. The second line of the street address. This field may contain such values as an apartment or suite number. # noqa: E501 :param address_line2: The address_line2 of this ItemLocation. # noqa: E501 :type: str """ self._address_line2 = address_line2 @property def city(self): """Gets the city of this ItemLocation. # noqa: E501 The city in which the item is located. # noqa: E501 :return: The city of this ItemLocation. # noqa: E501 :rtype: str """ return self._city @city.setter def city(self, city): """Sets the city of this ItemLocation. The city in which the item is located. # noqa: E501 :param city: The city of this ItemLocation. # noqa: E501 :type: str """ self._city = city @property def country(self): """Gets the country of this ItemLocation. # noqa: E501 The two-letter <a href=\"https://www.iso.org/iso-3166-country-codes.html\">ISO 3166</a> standard code that indicates the country in which the item is located. For implementation help, refer to <a href='https://developer.ebay.com/api-docs/buy/marketplace_insights/types/ba:CountryCodeEnum'>eBay API documentation</a> # noqa: E501 :return: The country of this ItemLocation. # noqa: E501 :rtype: str """ return self._country @country.setter def country(self, country): """Sets the country of this ItemLocation. The two-letter <a href=\"https://www.iso.org/iso-3166-country-codes.html\">ISO 3166</a> standard code that indicates the country in which the item is located. For implementation help, refer to <a href='https://developer.ebay.com/api-docs/buy/marketplace_insights/types/ba:CountryCodeEnum'>eBay API documentation</a> # noqa: E501 :param country: The country of this ItemLocation. # noqa: E501 :type: str """ self._country = country @property def county(self): """Gets the county of this ItemLocation. # noqa: E501 The county in which the item is located. # noqa: E501 :return: The county of this ItemLocation. # noqa: E501 :rtype: str """ return self._county @county.setter def county(self, county): """Sets the county of this ItemLocation. The county in which the item is located. # noqa: E501 :param county: The county of this ItemLocation. # noqa: E501 :type: str """ self._county = county @property def postal_code(self): """Gets the postal_code of this ItemLocation. # noqa: E501 The postal code (or zip code in US) where the item is located.<br /> <br /><span class=\"tablenote\"> <b> Note: </b>Beginning in late January 2020, the displayed postal code will be masked to all users. Different countries will mask postal/zip codes in slightly different ways, but an example would be <code>951**</code>.</span> # noqa: E501 :return: The postal_code of this ItemLocation. # noqa: E501 :rtype: str """ return self._postal_code @postal_code.setter def postal_code(self, postal_code): """Sets the postal_code of this ItemLocation. The postal code (or zip code in US) where the item is located.<br /> <br /><span class=\"tablenote\"> <b> Note: </b>Beginning in late January 2020, the displayed postal code will be masked to all users. Different countries will mask postal/zip codes in slightly different ways, but an example would be <code>951**</code>.</span> # noqa: E501 :param postal_code: The postal_code of this ItemLocation. # noqa: E501 :type: str """ self._postal_code = postal_code @property def state_or_province(self): """Gets the state_or_province of this ItemLocation. # noqa: E501 The state or province in which the item is located. # noqa: E501 :return: The state_or_province of this ItemLocation. # noqa: E501 :rtype: str """ return self._state_or_province @state_or_province.setter def state_or_province(self, state_or_province): """Sets the state_or_province of this ItemLocation. The state or province in which the item is located. # noqa: E501 :param state_or_province: The state_or_province of this ItemLocation. # noqa: E501 :type: str """ self._state_or_province = state_or_province def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value if issubclass(ItemLocation, dict): for key, value in self.items(): result[key] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, ItemLocation): return False return self.__dict__ == other.__dict__ def __ne__(self, other): """Returns true if both objects are not equal""" return not self == other
[((240, 23, 240, 56), 'six.iteritems', 'six.iteritems', ({(240, 37, 240, 55): 'self.swagger_types'}, {}), '(self.swagger_types)', False, 'import six\n')]
aadishgoel2013/Algos-with-Python
fractionalKnapsack.py
19541607c8ede9a76a8cbbe047e01343080cfd5b
# Fractional Knapsack wt = [40,50,30,10,10,40,30] pro = [30,20,20,25,5,35,15] n = len(wt) data = [ (i,pro[i],wt[i]) for i in range(n) ] bag = 100 data.sort(key=lambda x: x[1]/x[2], reverse=True) profit=0 ans=[] i=0 while i<n: if data[i][2]<=bag: bag-=data[i][2] ans.append(data[i][0]) profit+=data[i][1] i+=1 else: break if i<n: ans.append(data[i][0]) profit += (bag*data[i][1])/data[i][2] print(profit,ans)
[]
CrackerCat/xed
pysrc/classifier.py
428712c28e831573579b7f749db63d3a58dcdbd9
#!/usr/bin/env python # -*- python -*- #BEGIN_LEGAL # #Copyright (c) 2019 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # #END_LEGAL from __future__ import print_function import re import genutil import codegen def _emit_function(fe, isa_sets, name): fo = codegen.function_object_t('xed_classify_{}'.format(name)) fo.add_arg('const xed_decoded_inst_t* d') fo.add_code_eol(' const xed_isa_set_enum_t isa_set = xed_decoded_inst_get_isa_set(d)') # FIXME: 2017-07-14 optimization: could use a static array for faster checking, smaller code switch = codegen.c_switch_generator_t('isa_set', fo) isa_sets_sorted = sorted(isa_sets) for c in isa_sets_sorted: switch.add_case('XED_ISA_SET_{}'.format(c.upper()),[],do_break=False) if len(isa_sets) > 0: switch.add('return 1;') switch.add_default(['return 0;'], do_break=False) switch.finish() fo.emit_file_emitter(fe) def work(agi): sse_isa_sets = set([]) avx_isa_sets = set([]) avx512_isa_sets = set([]) avx512_kmask_op = set([]) for generator in agi.generator_list: for ii in generator.parser_output.instructions: if genutil.field_check(ii, 'iclass'): if re.search('AVX512',ii.isa_set): avx512_isa_sets.add(ii.isa_set) if re.search('KOP',ii.isa_set): avx512_kmask_op.add(ii.isa_set) elif re.search('AVX',ii.isa_set) or ii.isa_set in ['F16C', 'FMA']: avx_isa_sets.add(ii.isa_set) elif re.search('SSE',ii.isa_set) or ii.isa_set in ['AES','PCLMULQDQ']: # Exclude MMX instructions that come in with SSE2 & # SSSE3. The several purely MMX instr in SSE are # "SSE-opcodes" with memop operands. One can look for # those with SSE2MMX and SSSE3MMX xed isa_sets. # # Also exclude the SSE_PREFETCH operations; Those are # just memops. if (not re.search('MMX',ii.isa_set) and not re.search('PREFETCH',ii.isa_set) and not re.search('X87',ii.isa_set) and not re.search('MWAIT',ii.isa_set)): sse_isa_sets.add(ii.isa_set) fe = agi.open_file('xed-classifiers.c') # xed_file_emitter_t _emit_function(fe, avx512_isa_sets, 'avx512') _emit_function(fe, avx512_kmask_op, 'avx512_maskop') _emit_function(fe, avx_isa_sets, 'avx') _emit_function(fe, sse_isa_sets, 'sse') fe.close() return
[((33, 13, 33, 56), 'codegen.c_switch_generator_t', 'codegen.c_switch_generator_t', ({(33, 42, 33, 51): '"""isa_set"""', (33, 53, 33, 55): 'fo'}, {}), "('isa_set', fo)", False, 'import codegen\n'), ((52, 12, 52, 45), 'genutil.field_check', 'genutil.field_check', ({(52, 32, 52, 34): 'ii', (52, 36, 52, 44): '"""iclass"""'}, {}), "(ii, 'iclass')", False, 'import genutil\n'), ((53, 16, 53, 46), 're.search', 're.search', ({(53, 26, 53, 34): '"""AVX512"""', (53, 35, 53, 45): 'ii.isa_set'}, {}), "('AVX512', ii.isa_set)", False, 'import re\n'), ((55, 20, 55, 47), 're.search', 're.search', ({(55, 30, 55, 35): '"""KOP"""', (55, 36, 55, 46): 'ii.isa_set'}, {}), "('KOP', ii.isa_set)", False, 'import re\n'), ((57, 18, 57, 45), 're.search', 're.search', ({(57, 28, 57, 33): '"""AVX"""', (57, 34, 57, 44): 'ii.isa_set'}, {}), "('AVX', ii.isa_set)", False, 'import re\n'), ((59, 18, 59, 45), 're.search', 're.search', ({(59, 28, 59, 33): '"""SSE"""', (59, 34, 59, 44): 'ii.isa_set'}, {}), "('SSE', ii.isa_set)", False, 'import re\n'), ((67, 25, 67, 52), 're.search', 're.search', ({(67, 35, 67, 40): '"""MMX"""', (67, 41, 67, 51): 'ii.isa_set'}, {}), "('MMX', ii.isa_set)", False, 'import re\n'), ((67, 61, 67, 93), 're.search', 're.search', ({(67, 71, 67, 81): '"""PREFETCH"""', (67, 82, 67, 92): 'ii.isa_set'}, {}), "('PREFETCH', ii.isa_set)", False, 'import re\n'), ((68, 29, 68, 56), 're.search', 're.search', ({(68, 39, 68, 44): '"""X87"""', (68, 45, 68, 55): 'ii.isa_set'}, {}), "('X87', ii.isa_set)", False, 'import re\n'), ((68, 65, 68, 94), 're.search', 're.search', ({(68, 75, 68, 82): '"""MWAIT"""', (68, 83, 68, 93): 'ii.isa_set'}, {}), "('MWAIT', ii.isa_set)", False, 'import re\n')]
lanz/Tenable.io-SDK-for-Python
tests/integration/api/test_target_groups.py
e81a61c369ac103d1524b0898153a569536a131e
import pytest from tenable_io.api.target_groups import TargetListEditRequest from tenable_io.api.models import TargetGroup, TargetGroupList @pytest.mark.vcr() def test_target_groups_create(new_target_group): assert isinstance(new_target_group, TargetGroup), u'The `create` method did not return type `TargetGroup`.' @pytest.mark.vcr() def test_target_groups_details(client, new_target_group): target_group = new_target_group details = client.target_groups_api.details(target_group.id) assert isinstance(details, TargetGroup), u'The `details` method did not return type `TargetGroup`.' assert details.id == target_group.id, u'Expected the `details` response to match the requested target group.' @pytest.mark.vcr() def test_target_groups_list(client): target_groups = client.target_groups_api.list() assert isinstance(target_groups, TargetGroupList), u'The `details` method did not return type `TargetGroup`.' for group in target_groups.target_groups: assert isinstance(group, TargetGroup), u'Expected a list of type `TargetGroup`.' @pytest.mark.vcr() def test_target_groups_delete(client, new_target_group): assert client.target_groups_api.delete(new_target_group.id), u'The target group was not deleted.' @pytest.mark.vcr() def test_target_groups_edit(client, new_target_group): target_group = new_target_group edited_name = 'test_target_group_edit' edited_group = client.target_groups_api.edit(TargetListEditRequest(name=edited_name), target_group.id) assert isinstance(edited_group, TargetGroup), u'The `edit` method did not return type `TargetGroup`.' assert edited_group.id == target_group.id, u'Expected the edited target group to match the requested target group.' assert edited_group.name == edited_name, u'Expected the name to be updated.'
[((7, 1, 7, 18), 'pytest.mark.vcr', 'pytest.mark.vcr', ({}, {}), '()', False, 'import pytest\n'), ((12, 1, 12, 18), 'pytest.mark.vcr', 'pytest.mark.vcr', ({}, {}), '()', False, 'import pytest\n'), ((20, 1, 20, 18), 'pytest.mark.vcr', 'pytest.mark.vcr', ({}, {}), '()', False, 'import pytest\n'), ((28, 1, 28, 18), 'pytest.mark.vcr', 'pytest.mark.vcr', ({}, {}), '()', False, 'import pytest\n'), ((33, 1, 33, 18), 'pytest.mark.vcr', 'pytest.mark.vcr', ({}, {}), '()', False, 'import pytest\n'), ((37, 49, 37, 88), 'tenable_io.api.target_groups.TargetListEditRequest', 'TargetListEditRequest', (), '', False, 'from tenable_io.api.target_groups import TargetListEditRequest\n')]
tasercake/nnAudio
Installation/nnAudio/Spectrogram.py
5edc37b7b73674598d533261314429b875ba285d
""" Module containing all the spectrogram classes """ # 0.2.0 import torch import torch.nn as nn from torch.nn.functional import conv1d, conv2d, fold import numpy as np from time import time from nnAudio.librosa_functions import * from nnAudio.utils import * sz_float = 4 # size of a float epsilon = 10e-8 # fudge factor for normalization ### --------------------------- Spectrogram Classes ---------------------------### class STFT(torch.nn.Module): """This function is to calculate the short-time Fourier transform (STFT) of the input signal. Input signal should be in either of the following shapes.\n 1. ``(len_audio)``\n 2. ``(num_audio, len_audio)``\n 3. ``(num_audio, 1, len_audio)`` The correct shape will be inferred automatically if the input follows these 3 shapes. Most of the arguments follow the convention from librosa. This class inherits from ``torch.nn.Module``, therefore, the usage is same as ``torch.nn.Module``. Parameters ---------- n_fft : int The window size. Default value is 2048. freq_bins : int Number of frequency bins. Default is ``None``, which means ``n_fft//2+1`` bins. hop_length : int The hop (or stride) size. Default value is ``None`` which is equivalent to ``n_fft//4``. window : str The windowing function for STFT. It uses ``scipy.signal.get_window``, please refer to scipy documentation for possible windowing functions. The default value is 'hann'. freq_scale : 'linear', 'log', or 'no' Determine the spacing between each frequency bin. When `linear` or `log` is used, the bin spacing can be controlled by ``fmin`` and ``fmax``. If 'no' is used, the bin will start at 0Hz and end at Nyquist frequency with linear spacing. center : bool Putting the STFT keneral at the center of the time-step or not. If ``False``, the time index is the beginning of the STFT kernel, if ``True``, the time index is the center of the STFT kernel. Default value if ``True``. pad_mode : str The padding method. Default value is 'reflect'. inverse : bool To activate the iSTFT module or not. By default, it is False to save GPU memory. fmin : int The starting frequency for the lowest frequency bin. If freq_scale is ``no``, this argument does nothing. fmax : int The ending frequency for the highest frequency bin. If freq_scale is ``no``, this argument does nothing. sr : int The sampling rate for the input audio. It is used to calucate the correct ``fmin`` and ``fmax``. Setting the correct sampling rate is very important for calculating the correct frequency. trainable : bool Determine if the STFT kenrels are trainable or not. If ``True``, the gradients for STFT kernels will also be caluclated and the STFT kernels will be updated during model training. Default value is ``False`` output_format : str Control the spectrogram output type, either ``Magnitude``, ``Complex``, or ``Phase``. The output_format can also be changed during the ``forward`` method. verbose : bool If ``True``, it shows layer information. If ``False``, it suppresses all prints device : str Choose which device to initialize this layer. Default value is 'cpu' Returns ------- spectrogram : torch.tensor It returns a tensor of spectrograms. ``shape = (num_samples, freq_bins,time_steps)`` if ``output_format='Magnitude'``; ``shape = (num_samples, freq_bins,time_steps, 2)`` if ``output_format='Complex' or 'Phase'``; Examples -------- >>> spec_layer = Spectrogram.STFT() >>> specs = spec_layer(x) """ def __init__(self, n_fft=2048, win_length=None, freq_bins=None, hop_length=None, window='hann', freq_scale='no', center=True, pad_mode='reflect', iSTFT=False, fmin=50, fmax=6000, sr=22050, trainable=False, output_format="Complex", verbose=True): super().__init__() # Trying to make the default setting same as librosa if win_length==None: win_length = n_fft if hop_length==None: hop_length = int(win_length // 4) self.output_format = output_format self.trainable = trainable self.stride = hop_length self.center = center self.pad_mode = pad_mode self.n_fft = n_fft self.freq_bins = freq_bins self.trainable = trainable self.pad_amount = self.n_fft // 2 self.window = window self.win_length = win_length self.iSTFT = iSTFT self.trainable = trainable start = time() # Create filter windows for stft kernel_sin, kernel_cos, self.bins2freq, self.bin_list, window_mask = create_fourier_kernels(n_fft, win_length=win_length, freq_bins=freq_bins, window=window, freq_scale=freq_scale, fmin=fmin, fmax=fmax, sr=sr, verbose=verbose) kernel_sin = torch.tensor(kernel_sin, dtype=torch.float) kernel_cos = torch.tensor(kernel_cos, dtype=torch.float) # In this way, the inverse kernel and the forward kernel do not share the same memory... kernel_sin_inv = torch.cat((kernel_sin, -kernel_sin[1:-1].flip(0)), 0) kernel_cos_inv = torch.cat((kernel_cos, kernel_cos[1:-1].flip(0)), 0) if iSTFT: self.register_buffer('kernel_sin_inv', kernel_sin_inv.unsqueeze(-1)) self.register_buffer('kernel_cos_inv', kernel_cos_inv.unsqueeze(-1)) # Making all these variables nn.Parameter, so that the model can be used with nn.Parallel # self.kernel_sin = torch.nn.Parameter(self.kernel_sin, requires_grad=self.trainable) # self.kernel_cos = torch.nn.Parameter(self.kernel_cos, requires_grad=self.trainable) # Applying window functions to the Fourier kernels window_mask = torch.tensor(window_mask) wsin = kernel_sin * window_mask wcos = kernel_cos * window_mask if self.trainable==False: self.register_buffer('wsin', wsin) self.register_buffer('wcos', wcos) if self.trainable==True: wsin = torch.nn.Parameter(wsin, requires_grad=self.trainable) wcos = torch.nn.Parameter(wcos, requires_grad=self.trainable) self.register_parameter('wsin', wsin) self.register_parameter('wcos', wcos) # Prepare the shape of window mask so that it can be used later in inverse self.register_buffer('window_mask', window_mask.unsqueeze(0).unsqueeze(-1)) if verbose==True: print("STFT kernels created, time used = {:.4f} seconds".format(time()-start)) else: pass def forward(self, x, output_format=None): """ Convert a batch of waveforms to spectrograms. Parameters ---------- x : torch tensor Input signal should be in either of the following shapes.\n 1. ``(len_audio)``\n 2. ``(num_audio, len_audio)``\n 3. ``(num_audio, 1, len_audio)`` It will be automatically broadcast to the right shape output_format : str Control the type of spectrogram to be return. Can be either ``Magnitude`` or ``Complex`` or ``Phase``. Default value is ``Complex``. """ output_format = output_format or self.output_format self.num_samples = x.shape[-1] x = broadcast_dim(x) if self.center: if self.pad_mode == 'constant': padding = nn.ConstantPad1d(self.pad_amount, 0) elif self.pad_mode == 'reflect': if self.num_samples < self.pad_amount: raise AssertionError("Signal length shorter than reflect padding length (n_fft // 2).") padding = nn.ReflectionPad1d(self.pad_amount) x = padding(x) spec_imag = conv1d(x, self.wsin, stride=self.stride) spec_real = conv1d(x, self.wcos, stride=self.stride) # Doing STFT by using conv1d # remove redundant parts spec_real = spec_real[:, :self.freq_bins, :] spec_imag = spec_imag[:, :self.freq_bins, :] if output_format=='Magnitude': spec = spec_real.pow(2) + spec_imag.pow(2) if self.trainable==True: return torch.sqrt(spec+1e-8) # prevent Nan gradient when sqrt(0) due to output=0 else: return torch.sqrt(spec) elif output_format=='Complex': return torch.stack((spec_real,-spec_imag), -1) # Remember the minus sign for imaginary part elif output_format=='Phase': return torch.atan2(-spec_imag+0.0,spec_real) # +0.0 removes -0.0 elements, which leads to error in calculating phase def inverse(self, X, onesided=True, length=None, refresh_win=True): """ This function is same as the :func:`~nnAudio.Spectrogram.iSTFT` class, which is to convert spectrograms back to waveforms. It only works for the complex value spectrograms. If you have the magnitude spectrograms, please use :func:`~nnAudio.Spectrogram.Griffin_Lim`. Parameters ---------- onesided : bool If your spectrograms only have ``n_fft//2+1`` frequency bins, please use ``onesided=True``, else use ``onesided=False`` length : int To make sure the inverse STFT has the same output length of the original waveform, please set `length` as your intended waveform length. By default, ``length=None``, which will remove ``n_fft//2`` samples from the start and the end of the output. refresh_win : bool Recalculating the window sum square. If you have an input with fixed number of timesteps, you can increase the speed by setting ``refresh_win=False``. Else please keep ``refresh_win=True`` """ if (hasattr(self, 'kernel_sin_inv') != True) or (hasattr(self, 'kernel_cos_inv') != True): raise NameError("Please activate the iSTFT module by setting `iSTFT=True` if you want to use `inverse`") assert X.dim()==4 , "Inverse iSTFT only works for complex number," \ "make sure our tensor is in the shape of (batch, freq_bins, timesteps, 2)."\ "\nIf you have a magnitude spectrogram, please consider using Griffin-Lim." if onesided: X = extend_fbins(X) # extend freq X_real, X_imag = X[:, :, :, 0], X[:, :, :, 1] # broadcast dimensions to support 2D convolution X_real_bc = X_real.unsqueeze(1) X_imag_bc = X_imag.unsqueeze(1) a1 = conv2d(X_real_bc, self.kernel_cos_inv, stride=(1,1)) b2 = conv2d(X_imag_bc, self.kernel_sin_inv, stride=(1,1)) # compute real and imag part. signal lies in the real part real = a1 - b2 real = real.squeeze(-2)*self.window_mask # Normalize the amplitude with n_fft real /= (self.n_fft) # Overlap and Add algorithm to connect all the frames real = overlap_add(real, self.stride) # Prepare the window sumsqure for division # Only need to create this window once to save time # Unless the input spectrograms have different time steps if hasattr(self, 'w_sum')==False or refresh_win==True: self.w_sum = torch_window_sumsquare(self.window_mask.flatten(), X.shape[2], self.stride, self.n_fft).flatten() self.nonzero_indices = (self.w_sum>1e-10) else: pass real[:, self.nonzero_indices] = real[:,self.nonzero_indices].div(self.w_sum[self.nonzero_indices]) # Remove padding if length is None: if self.center: real = real[:, self.pad_amount:-self.pad_amount] else: if self.center: real = real[:, self.pad_amount:self.pad_amount + length] else: real = real[:, :length] return real def extra_repr(self) -> str: return 'n_fft={}, Fourier Kernel size={}, iSTFT={}, trainable={}'.format( self.n_fft, (*self.wsin.shape,), self.iSTFT, self.trainable ) class MelSpectrogram(torch.nn.Module): """This function is to calculate the Melspectrogram of the input signal. Input signal should be in either of the following shapes.\n 1. ``(len_audio)``\n 2. ``(num_audio, len_audio)``\n 3. ``(num_audio, 1, len_audio)`` The correct shape will be inferred automatically if the input follows these 3 shapes. Most of the arguments follow the convention from librosa. This class inherits from ``torch.nn.Module``, therefore, the usage is same as ``torch.nn.Module``. Parameters ---------- sr : int The sampling rate for the input audio. It is used to calculate the correct ``fmin`` and ``fmax``. Setting the correct sampling rate is very important for calculating the correct frequency. n_fft : int The window size for the STFT. Default value is 2048 n_mels : int The number of Mel filter banks. The filter banks maps the n_fft to mel bins. Default value is 128. hop_length : int The hop (or stride) size. Default value is 512. window : str The windowing function for STFT. It uses ``scipy.signal.get_window``, please refer to scipy documentation for possible windowing functions. The default value is 'hann'. center : bool Putting the STFT keneral at the center of the time-step or not. If ``False``, the time index is the beginning of the STFT kernel, if ``True``, the time index is the center of the STFT kernel. Default value if ``True``. pad_mode : str The padding method. Default value is 'reflect'. htk : bool When ``False`` is used, the Mel scale is quasi-logarithmic. When ``True`` is used, the Mel scale is logarithmic. The default value is ``False``. fmin : int The starting frequency for the lowest Mel filter bank. fmax : int The ending frequency for the highest Mel filter bank. trainable_mel : bool Determine if the Mel filter banks are trainable or not. If ``True``, the gradients for Mel filter banks will also be calculated and the Mel filter banks will be updated during model training. Default value is ``False``. trainable_STFT : bool Determine if the STFT kenrels are trainable or not. If ``True``, the gradients for STFT kernels will also be caluclated and the STFT kernels will be updated during model training. Default value is ``False``. verbose : bool If ``True``, it shows layer information. If ``False``, it suppresses all prints. device : str Choose which device to initialize this layer. Default value is 'cpu'. Returns ------- spectrogram : torch.tensor It returns a tensor of spectrograms. shape = ``(num_samples, freq_bins,time_steps)``. Examples -------- >>> spec_layer = Spectrogram.MelSpectrogram() >>> specs = spec_layer(x) """ def __init__(self, sr=22050, n_fft=2048, n_mels=128, hop_length=512, window='hann', center=True, pad_mode='reflect', power=2.0, htk=False, fmin=0.0, fmax=None, norm=1, trainable_mel=False, trainable_STFT=False, verbose=True, **kwargs): super().__init__() self.stride = hop_length self.center = center self.pad_mode = pad_mode self.n_fft = n_fft self.power = power self.trainable_mel = trainable_mel self.trainable_STFT = trainable_STFT self.verbose = verbose # Preparing for the stft layer. No need for center self.stft = STFT(n_fft=n_fft, freq_bins=None, hop_length=hop_length, window=window, freq_scale='no', center=center, pad_mode=pad_mode, sr=sr, trainable=trainable_STFT, output_format="Magnitude", verbose=verbose, **kwargs) # Create filter windows for stft start = time() # Creating kernel for mel spectrogram start = time() mel_basis = mel(sr, n_fft, n_mels, fmin, fmax, htk=htk, norm=norm) mel_basis = torch.tensor(mel_basis) if verbose==True: print("STFT filter created, time used = {:.4f} seconds".format(time()-start)) print("Mel filter created, time used = {:.4f} seconds".format(time()-start)) else: pass if trainable_mel: # Making everything nn.Parameter, so that this model can support nn.DataParallel mel_basis = torch.nn.Parameter(mel_basis, requires_grad=trainable_mel) self.register_parameter('mel_basis', mel_basis) else: self.register_buffer('mel_basis', mel_basis) # if trainable_mel==True: # self.mel_basis = torch.nn.Parameter(self.mel_basis) # if trainable_STFT==True: # self.wsin = torch.nn.Parameter(self.wsin) # self.wcos = torch.nn.Parameter(self.wcos) def forward(self, x): """ Convert a batch of waveforms to Mel spectrograms. Parameters ---------- x : torch tensor Input signal should be in either of the following shapes.\n 1. ``(len_audio)``\n 2. ``(num_audio, len_audio)``\n 3. ``(num_audio, 1, len_audio)`` It will be automatically broadcast to the right shape """ x = broadcast_dim(x) spec = self.stft(x, output_format='Magnitude')**self.power melspec = torch.matmul(self.mel_basis, spec) return melspec def extra_repr(self) -> str: return 'Mel filter banks size = {}, trainable_mel={}'.format( (*self.mel_basis.shape,), self.trainable_mel, self.trainable_STFT ) def to_stft(self, melspec, max_steps=1000, loss_threshold=1e-8, grad_threshold=1e-7, random_start=False, sgd_kwargs=None, eps=1e-12, return_extras=False, verbose=None): """ Best-attempt spectrogram inversion """ def loss_fn(pred, target): pred = pred.unsqueeze(1) if pred.ndim == 3 else pred target = target.unsqueeze(1) if target.ndim == 3 else target loss = (pred - target).pow(2).sum(-2).mean() return loss verbose = verbose or self.verbose # SGD arguments default_sgd_kwargs = dict(lr=1e3, momentum=0.9) if sgd_kwargs: default_sgd_kwargs.update(sgd_kwargs) sgd_kwargs = default_sgd_kwargs mel_basis = self.mel_basis.detach() shape = melspec.shape batch_size, n_mels, time = shape[0], shape[-2], shape[-1] _, n_freq = mel_basis.shape melspec = melspec.detach().view(-1, n_mels, time) if random_start: pred_stft_shape = (batch_size, n_freq, time) pred_stft = torch.zeros(*pred_stft_shape, dtype=torch.float32, device=mel_basis.device).normal_().clamp_(eps) else: pred_stft = (torch.pinverse(mel_basis) @ melspec).clamp(eps) pred_stft = nn.Parameter(pred_stft, requires_grad=True) sgd_kwargs["lr"] = sgd_kwargs["lr"] * batch_size optimizer = torch.optim.SGD([pred_stft], **sgd_kwargs) losses = [] for i in range(max_steps): optimizer.zero_grad() pred_mel = mel_basis @ pred_stft loss = loss_fn(pred_mel, melspec) losses.append(loss.item()) loss.backward() optimizer.step() # Check conditions if not loss.isfinite(): raise OverflowError("Overflow encountered in Mel -> STFT optimization") if loss_threshold and loss < loss_threshold: if verbose: print(f"Target error of {loss_threshold} reached. Stopping optimization.") break if grad_threshold and pred_stft.grad.max() < grad_threshold: if verbose: print(f"Target max gradient of {grad_threshold} reached. Stopping optimization.") break pred_stft = pred_stft.detach().clamp(eps) ** 0.5 pred_stft = pred_stft.view((*shape[:-2], n_freq, time)) if return_extras: return pred_stft, pred_mel.detach(), losses return pred_stft def inverse(self, melspec, mel_inversion_params=None, stft_inversion_params=None): default_mel_inversion_params = {} default_stft_inversion_params = {} mel_inversion_params = mel_inversion_params or {} stft_inversion_params = stft_inversion_params or {} if mel_inversion_params: mel_inversion_params = {**default_mel_inversion_params, **mel_inversion_params} if stft_inversion_params: stft_inversion_params = {**default_stft_inversion_params, **stft_inversion_params} recon_stft = self.to_stft(melspec, **mel_inversion_params) recon_audio = self.stft.inverse(recon_stft, **stft_inversion_params) return recon_audio class MFCC(torch.nn.Module): """This function is to calculate the Mel-frequency cepstral coefficients (MFCCs) of the input signal. This algorithm first extracts Mel spectrograms from the audio clips, then the discrete cosine transform is calcuated to obtain the final MFCCs. Therefore, the Mel spectrogram part can be made trainable using ``trainable_mel`` and ``trainable_STFT``. It only support type-II DCT at the moment. Input signal should be in either of the following shapes.\n 1. ``(len_audio)``\n 2. ``(num_audio, len_audio)``\n 3. ``(num_audio, 1, len_audio)`` The correct shape will be inferred autommatically if the input follows these 3 shapes. Most of the arguments follow the convention from librosa. This class inherits from ``torch.nn.Module``, therefore, the usage is same as ``torch.nn.Module``. Parameters ---------- sr : int The sampling rate for the input audio. It is used to calculate the correct ``fmin`` and ``fmax``. Setting the correct sampling rate is very important for calculating the correct frequency. n_mfcc : int The number of Mel-frequency cepstral coefficients norm : string The default value is 'ortho'. Normalization for DCT basis **kwargs Other arguments for Melspectrogram such as n_fft, n_mels, hop_length, and window Returns ------- MFCCs : torch.tensor It returns a tensor of MFCCs. shape = ``(num_samples, n_mfcc, time_steps)``. Examples -------- >>> spec_layer = Spectrogram.MFCC() >>> mfcc = spec_layer(x) """ def __init__(self, sr=22050, n_mfcc=20, norm='ortho', verbose=True, ref=1.0, amin=1e-10, top_db=80.0, **kwargs): super().__init__() self.melspec_layer = MelSpectrogram(sr=sr, verbose=verbose, **kwargs) self.m_mfcc = n_mfcc # attributes that will be used for _power_to_db if amin <= 0: raise ParameterError('amin must be strictly positive') amin = torch.tensor([amin]) ref = torch.abs(torch.tensor([ref])) self.register_buffer('amin', amin) self.register_buffer('ref', ref) self.top_db = top_db self.n_mfcc = n_mfcc def _power_to_db(self, S): ''' Refer to https://librosa.github.io/librosa/_modules/librosa/core/spectrum.html#power_to_db for the original implmentation. ''' log_spec = 10.0 * torch.log10(torch.max(S, self.amin)) log_spec -= 10.0 * torch.log10(torch.max(self.amin, self.ref)) if self.top_db is not None: if self.top_db < 0: raise ParameterError('top_db must be non-negative') # make the dim same as log_spec so that it can be broadcasted batch_wise_max = log_spec.flatten(1).max(1)[0].unsqueeze(1).unsqueeze(1) log_spec = torch.max(log_spec, batch_wise_max - self.top_db) return log_spec def _dct(self, x, norm=None): ''' Refer to https://github.com/zh217/torch-dct for the original implmentation. ''' x = x.permute(0,2,1) # make freq the last axis, since dct applies to the frequency axis x_shape = x.shape N = x_shape[-1] v = torch.cat([x[:, :, ::2], x[:, :, 1::2].flip([2])], dim=2) Vc = torch.rfft(v, 1, onesided=False) # TODO: Can make the W_r and W_i trainable here k = - torch.arange(N, dtype=x.dtype, device=x.device)[None, :] * np.pi / (2 * N) W_r = torch.cos(k) W_i = torch.sin(k) V = Vc[:, :, :, 0] * W_r - Vc[:, :, :, 1] * W_i if norm == 'ortho': V[:, :, 0] /= np.sqrt(N) * 2 V[:, :, 1:] /= np.sqrt(N / 2) * 2 V = 2 * V return V.permute(0,2,1) # swapping back the time axis and freq axis def forward(self, x): """ Convert a batch of waveforms to MFCC. Parameters ---------- x : torch tensor Input signal should be in either of the following shapes.\n 1. ``(len_audio)``\n 2. ``(num_audio, len_audio)``\n 3. ``(num_audio, 1, len_audio)`` It will be automatically broadcast to the right shape """ x = self.melspec_layer(x) x = self._power_to_db(x) x = self._dct(x, norm='ortho')[:,:self.m_mfcc,:] return x def extra_repr(self) -> str: return 'n_mfcc = {}'.format( (self.n_mfcc) ) class CQT1992(torch.nn.Module): """ This alogrithm uses the method proposed in [1]. Please refer to :func:`~nnAudio.Spectrogram.CQT1992v2` for a more computational and memory efficient version. [1] Brown, Judith C.C. and Miller Puckette. “An efficient algorithm for the calculation of a constant Q transform.” (1992). This function is to calculate the CQT of the input signal. Input signal should be in either of the following shapes.\n 1. ``(len_audio)``\n 2. ``(num_audio, len_audio)``\n 3. ``(num_audio, 1, len_audio)`` The correct shape will be inferred autommatically if the input follows these 3 shapes. Most of the arguments follow the convention from librosa. This class inherits from ``torch.nn.Module``, therefore, the usage is same as ``torch.nn.Module``. Parameters ---------- sr : int The sampling rate for the input audio. It is used to calucate the correct ``fmin`` and ``fmax``. Setting the correct sampling rate is very important for calculating the correct frequency. hop_length : int The hop (or stride) size. Default value is 512. fmin : float The frequency for the lowest CQT bin. Default is 32.70Hz, which coresponds to the note C0. fmax : float The frequency for the highest CQT bin. Default is ``None``, therefore the higest CQT bin is inferred from the ``n_bins`` and ``bins_per_octave``. If ``fmax`` is not ``None``, then the argument ``n_bins`` will be ignored and ``n_bins`` will be calculated automatically. Default is ``None`` n_bins : int The total numbers of CQT bins. Default is 84. Will be ignored if ``fmax`` is not ``None``. bins_per_octave : int Number of bins per octave. Default is 12. trainable_STFT : bool Determine if the time to frequency domain transformation kernel for the input audio is trainable or not. Default is ``False`` trainable_CQT : bool Determine if the frequency domain CQT kernel is trainable or not. Default is ``False`` norm : int Normalization for the CQT kernels. ``1`` means L1 normalization, and ``2`` means L2 normalization. Default is ``1``, which is same as the normalization used in librosa. window : str The windowing function for CQT. It uses ``scipy.signal.get_window``, please refer to scipy documentation for possible windowing functions. The default value is 'hann'. center : bool Putting the CQT keneral at the center of the time-step or not. If ``False``, the time index is the beginning of the CQT kernel, if ``True``, the time index is the center of the CQT kernel. Default value if ``True``. pad_mode : str The padding method. Default value is 'reflect'. trainable : bool Determine if the CQT kernels are trainable or not. If ``True``, the gradients for CQT kernels will also be caluclated and the CQT kernels will be updated during model training. Default value is ``False``. output_format : str Determine the return type. ``Magnitude`` will return the magnitude of the STFT result, shape = ``(num_samples, freq_bins,time_steps)``; ``Complex`` will return the STFT result in complex number, shape = ``(num_samples, freq_bins,time_steps, 2)``; ``Phase`` will return the phase of the STFT reuslt, shape = ``(num_samples, freq_bins,time_steps, 2)``. The complex number is stored as ``(real, imag)`` in the last axis. Default value is 'Magnitude'. verbose : bool If ``True``, it shows layer information. If ``False``, it suppresses all prints Returns ------- spectrogram : torch.tensor It returns a tensor of spectrograms. shape = ``(num_samples, freq_bins,time_steps)`` if ``output_format='Magnitude'``; shape = ``(num_samples, freq_bins,time_steps, 2)`` if ``output_format='Complex' or 'Phase'``; Examples -------- >>> spec_layer = Spectrogram.CQT1992v2() >>> specs = spec_layer(x) """ def __init__(self, sr=22050, hop_length=512, fmin=220, fmax=None, n_bins=84, trainable_STFT=False, trainable_CQT=False, bins_per_octave=12, output_format='Complex', norm=1, window='hann', center=True, pad_mode='reflect'): super().__init__() # norm arg is not functioning self.hop_length = hop_length self.center = center self.pad_mode = pad_mode self.norm = norm self.output_format = output_format # creating kernels for CQT Q = 1/(2**(1/bins_per_octave)-1) print("Creating CQT kernels ...", end='\r') start = time() cqt_kernels, self.kernel_width, lenghts = create_cqt_kernels(Q, sr, fmin, n_bins, bins_per_octave, norm, window, fmax) self.register_buffer('lenghts', lenghts) cqt_kernels = fft(cqt_kernels)[:,:self.kernel_width//2+1] print("CQT kernels created, time used = {:.4f} seconds".format(time()-start)) # creating kernels for stft # self.cqt_kernels_real*=lenghts.unsqueeze(1)/self.kernel_width # Trying to normalize as librosa # self.cqt_kernels_imag*=lenghts.unsqueeze(1)/self.kernel_width print("Creating STFT kernels ...", end='\r') start = time() kernel_sin, kernel_cos, self.bins2freq, _, window = create_fourier_kernels(self.kernel_width, window='ones', freq_scale='no') # Converting kernels from numpy arrays to torch tensors wsin = torch.tensor(kernel_sin * window) wcos = torch.tensor(kernel_cos * window) cqt_kernels_real = torch.tensor(cqt_kernels.real.astype(np.float32)) cqt_kernels_imag = torch.tensor(cqt_kernels.imag.astype(np.float32)) if trainable_STFT: wsin = torch.nn.Parameter(wsin, requires_grad=trainable_kernels) wcos = torch.nn.Parameter(wcos, requires_grad=trainable_kernels) self.register_parameter('wsin', wsin) self.register_parameter('wcos', wcos) else: self.register_buffer('wsin', wsin) self.register_buffer('wcos', wcos) if trainable_CQT: cqt_kernels_real = torch.nn.Parameter(cqt_kernels_real, requires_grad=trainable_kernels) cqt_kernels_imag = torch.nn.Parameter(cqt_kernels_imag, requires_grad=trainable_kernels) self.register_parameter('cqt_kernels_real', cqt_kernels_real) self.register_parameter('cqt_kernels_imag', cqt_kernels_imag) else: self.register_buffer('cqt_kernels_real', cqt_kernels_real) self.register_buffer('cqt_kernels_imag', cqt_kernels_imag) print("STFT kernels created, time used = {:.4f} seconds".format(time()-start)) def forward(self, x, output_format=None): """ Convert a batch of waveforms to CQT spectrograms. Parameters ---------- x : torch tensor Input signal should be in either of the following shapes.\n 1. ``(len_audio)``\n 2. ``(num_audio, len_audio)``\n 3. ``(num_audio, 1, len_audio)`` It will be automatically broadcast to the right shape """ output_format = output_format or self.output_format x = broadcast_dim(x) if self.center: if self.pad_mode == 'constant': padding = nn.ConstantPad1d(self.kernel_width//2, 0) elif self.pad_mode == 'reflect': padding = nn.ReflectionPad1d(self.kernel_width//2) x = padding(x) # STFT fourier_real = conv1d(x, self.wcos, stride=self.hop_length) fourier_imag = conv1d(x, self.wsin, stride=self.hop_length) # CQT CQT_real, CQT_imag = complex_mul((self.cqt_kernels_real, self.cqt_kernels_imag), (fourier_real, fourier_imag)) CQT = torch.stack((CQT_real,-CQT_imag),-1) if self.norm: CQT = CQT/self.kernel_width*torch.sqrt(self.lenghts.view(-1,1,1)) else: CQT = CQT*torch.sqrt(self.lenghts.view(-1,1,1)) if output_format=='Magnitude': # Getting CQT Amplitude return torch.sqrt(CQT.pow(2).sum(-1)) elif output_format=='Complex': return CQT elif output_format=='Phase': phase_real = torch.cos(torch.atan2(CQT_imag,CQT_real)) phase_imag = torch.sin(torch.atan2(CQT_imag,CQT_real)) return torch.stack((phase_real,phase_imag), -1) def extra_repr(self) -> str: return 'STFT kernel size = {}, CQT kernel size = {}'.format( (*self.wcos.shape,), (*self.cqt_kernels_real.shape,) ) class CQT2010(torch.nn.Module): """ This algorithm is using the resampling method proposed in [1]. Instead of convoluting the STFT results with a gigantic CQT kernel covering the full frequency spectrum, we make a small CQT kernel covering only the top octave. Then we keep downsampling the input audio by a factor of 2 to convoluting it with the small CQT kernel. Everytime the input audio is downsampled, the CQT relative to the downsampled input is equavalent to the next lower octave. The kernel creation process is still same as the 1992 algorithm. Therefore, we can reuse the code from the 1992 alogrithm [2] [1] Schörkhuber, Christian. “CONSTANT-Q TRANSFORM TOOLBOX FOR MUSIC PROCESSING.” (2010). [2] Brown, Judith C.C. and Miller Puckette. “An efficient algorithm for the calculation of a constant Q transform.” (1992). early downsampling factor is to downsample the input audio to reduce the CQT kernel size. The result with and without early downsampling are more or less the same except in the very low frequency region where freq < 40Hz. """ def __init__(self, sr=22050, hop_length=512, fmin=32.70, fmax=None, n_bins=84, bins_per_octave=12, norm=True, basis_norm=1, window='hann', pad_mode='reflect', trainable_STFT=False, trainable_CQT=False, output_format='Complex', earlydownsample=True, verbose=True): super().__init__() self.norm = norm # Now norm is used to normalize the final CQT result by dividing n_fft # basis_norm is for normalizing basis self.hop_length = hop_length self.pad_mode = pad_mode self.n_bins = n_bins self.output_format = output_format self.earlydownsample = earlydownsample # TODO: activate early downsampling later if possible # This will be used to calculate filter_cutoff and creating CQT kernels Q = 1/(2**(1/bins_per_octave)-1) # Creating lowpass filter and make it a torch tensor if verbose==True: print("Creating low pass filter ...", end='\r') start = time() lowpass_filter = torch.tensor(create_lowpass_filter( band_center = 0.5, kernelLength=256, transitionBandwidth=0.001 ) ) # Broadcast the tensor to the shape that fits conv1d self.register_buffer('lowpass_filter', lowpass_filter[None,None,:]) if verbose==True: print("Low pass filter created, time used = {:.4f} seconds".format(time()-start)) # Calculate num of filter requires for the kernel # n_octaves determines how many resampling requires for the CQT n_filters = min(bins_per_octave, n_bins) self.n_octaves = int(np.ceil(float(n_bins) / bins_per_octave)) # print("n_octaves = ", self.n_octaves) # Calculate the lowest frequency bin for the top octave kernel self.fmin_t = fmin*2**(self.n_octaves-1) remainder = n_bins % bins_per_octave # print("remainder = ", remainder) if remainder==0: # Calculate the top bin frequency fmax_t = self.fmin_t*2**((bins_per_octave-1)/bins_per_octave) else: # Calculate the top bin frequency fmax_t = self.fmin_t*2**((remainder-1)/bins_per_octave) self.fmin_t = fmax_t/2**(1-1/bins_per_octave) # Adjusting the top minium bins if fmax_t > sr/2: raise ValueError('The top bin {}Hz has exceeded the Nyquist frequency, \ please reduce the n_bins'.format(fmax_t)) if self.earlydownsample == True: # Do early downsampling if this argument is True if verbose==True: print("Creating early downsampling filter ...", end='\r') start = time() sr, self.hop_length, self.downsample_factor, early_downsample_filter, \ self.earlydownsample = get_early_downsample_params(sr, hop_length, fmax_t, Q, self.n_octaves, verbose) self.register_buffer('early_downsample_filter', early_downsample_filter) if verbose==True: print("Early downsampling filter created, \ time used = {:.4f} seconds".format(time()-start)) else: self.downsample_factor=1. # Preparing CQT kernels if verbose==True: print("Creating CQT kernels ...", end='\r') start = time() # print("Q = {}, fmin_t = {}, n_filters = {}".format(Q, self.fmin_t, n_filters)) basis, self.n_fft, _ = create_cqt_kernels(Q, sr, self.fmin_t, n_filters, bins_per_octave, norm=basis_norm, topbin_check=False) # This is for the normalization in the end freqs = fmin * 2.0 ** (np.r_[0:n_bins] / np.float(bins_per_octave)) lenghts = np.ceil(Q * sr / freqs) lenghts = torch.tensor(lenghts).float() self.register_buffer('lenghts', lenghts) self.basis=basis fft_basis = fft(basis)[:,:self.n_fft//2+1] # Convert CQT kenral from time domain to freq domain # These cqt_kernel is already in the frequency domain cqt_kernels_real = torch.tensor(fft_basis.real.astype(np.float32)) cqt_kernels_imag = torch.tensor(fft_basis.imag.astype(np.float32)) if verbose==True: print("CQT kernels created, time used = {:.4f} seconds".format(time()-start)) # print("Getting cqt kernel done, n_fft = ",self.n_fft) # Preparing kernels for Short-Time Fourier Transform (STFT) # We set the frequency range in the CQT filter instead of here. if verbose==True: print("Creating STFT kernels ...", end='\r') start = time() kernel_sin, kernel_cos, self.bins2freq, _, window = create_fourier_kernels(self.n_fft, window='ones', freq_scale='no') wsin = kernel_sin * window wcos = kernel_cos * window wsin = torch.tensor(wsin) wcos = torch.tensor(wcos) if verbose==True: print("STFT kernels created, time used = {:.4f} seconds".format(time()-start)) if trainable_STFT: wsin = torch.nn.Parameter(wsin, requires_grad=trainable_kernels) wcos = torch.nn.Parameter(wcos, requires_grad=trainable_kernels) self.register_parameter('wsin', wsin) self.register_parameter('wcos', wcos) else: self.register_buffer('wsin', wsin) self.register_buffer('wcos', wcos) if trainable_CQT: cqt_kernels_real = torch.nn.Parameter(cqt_kernels_real, requires_grad=trainable_kernels) cqt_kernels_imag = torch.nn.Parameter(cqt_kernels_imag, requires_grad=trainable_kernels) self.register_parameter('cqt_kernels_real', cqt_kernels_real) self.register_parameter('cqt_kernels_imag', cqt_kernels_imag) else: self.register_buffer('cqt_kernels_real', cqt_kernels_real) self.register_buffer('cqt_kernels_imag', cqt_kernels_imag) # If center==True, the STFT window will be put in the middle, and paddings at the beginning # and ending are required. if self.pad_mode == 'constant': self.padding = nn.ConstantPad1d(self.n_fft//2, 0) elif self.pad_mode == 'reflect': self.padding = nn.ReflectionPad1d(self.n_fft//2) def forward(self,x, output_format=None): """ Convert a batch of waveforms to CQT spectrograms. Parameters ---------- x : torch tensor Input signal should be in either of the following shapes.\n 1. ``(len_audio)``\n 2. ``(num_audio, len_audio)``\n 3. ``(num_audio, 1, len_audio)`` It will be automatically broadcast to the right shape """ output_format = output_format or self.output_format x = broadcast_dim(x) if self.earlydownsample==True: x = downsampling_by_n(x, self.early_downsample_filter, self.downsample_factor) hop = self.hop_length CQT = get_cqt_complex(x, self.wcos, self.wsin, hop, self.padding) # Getting the top octave CQT x_down = x # Preparing a new variable for downsampling for i in range(self.n_octaves-1): hop = hop//2 x_down = downsampling_by_2(x_down, self.lowpass_filter) CQT1 = get_cqt_complex(x_down, self.wcos, self.wsin, hop, self.padding) CQT = torch.cat((CQT1, CQT),1) CQT = CQT[:,-self.n_bins:,:] # Removing unwanted top bins if self.norm: CQT = CQT/self.n_fft*torch.sqrt(self.lenghts.view(-1,1,1)) else: CQT = CQT*torch.sqrt(self.lenghts.view(-1,1,1)) # Normalizing the output with the downsampling factor, 2**(self.n_octaves-1) # is make it same mag as 1992 CQT = CQT*self.downsample_factor if output_format=='Magnitude': # Getting CQT Amplitude return torch.sqrt(CQT.pow(2).sum(-1)) elif output_format=='Complex': return CQT elif output_format=='Phase': phase_real = torch.cos(torch.atan2(CQT[:,:,:,1],CQT[:,:,:,0])) phase_imag = torch.sin(torch.atan2(CQT[:,:,:,1],CQT[:,:,:,0])) return torch.stack((phase_real,phase_imag), -1) def extra_repr(self) -> str: return 'STFT kernel size = {}, CQT kernel size = {}'.format( (*self.wcos.shape,), (*self.cqt_kernels_real.shape,) ) class CQT1992v2(torch.nn.Module): """This function is to calculate the CQT of the input signal. Input signal should be in either of the following shapes.\n 1. ``(len_audio)``\n 2. ``(num_audio, len_audio)``\n 3. ``(num_audio, 1, len_audio)`` The correct shape will be inferred autommatically if the input follows these 3 shapes. Most of the arguments follow the convention from librosa. This class inherits from ``torch.nn.Module``, therefore, the usage is same as ``torch.nn.Module``. This alogrithm uses the method proposed in [1]. I slightly modify it so that it runs faster than the original 1992 algorithm, that is why I call it version 2. [1] Brown, Judith C.C. and Miller Puckette. “An efficient algorithm for the calculation of a constant Q transform.” (1992). Parameters ---------- sr : int The sampling rate for the input audio. It is used to calucate the correct ``fmin`` and ``fmax``. Setting the correct sampling rate is very important for calculating the correct frequency. hop_length : int The hop (or stride) size. Default value is 512. fmin : float The frequency for the lowest CQT bin. Default is 32.70Hz, which coresponds to the note C0. fmax : float The frequency for the highest CQT bin. Default is ``None``, therefore the higest CQT bin is inferred from the ``n_bins`` and ``bins_per_octave``. If ``fmax`` is not ``None``, then the argument ``n_bins`` will be ignored and ``n_bins`` will be calculated automatically. Default is ``None`` n_bins : int The total numbers of CQT bins. Default is 84. Will be ignored if ``fmax`` is not ``None``. bins_per_octave : int Number of bins per octave. Default is 12. norm : int Normalization for the CQT kernels. ``1`` means L1 normalization, and ``2`` means L2 normalization. Default is ``1``, which is same as the normalization used in librosa. window : str The windowing function for CQT. It uses ``scipy.signal.get_window``, please refer to scipy documentation for possible windowing functions. The default value is 'hann'. center : bool Putting the CQT keneral at the center of the time-step or not. If ``False``, the time index is the beginning of the CQT kernel, if ``True``, the time index is the center of the CQT kernel. Default value if ``True``. pad_mode : str The padding method. Default value is 'reflect'. trainable : bool Determine if the CQT kernels are trainable or not. If ``True``, the gradients for CQT kernels will also be caluclated and the CQT kernels will be updated during model training. Default value is ``False``. output_format : str Determine the return type. ``Magnitude`` will return the magnitude of the STFT result, shape = ``(num_samples, freq_bins,time_steps)``; ``Complex`` will return the STFT result in complex number, shape = ``(num_samples, freq_bins,time_steps, 2)``; ``Phase`` will return the phase of the STFT reuslt, shape = ``(num_samples, freq_bins,time_steps, 2)``. The complex number is stored as ``(real, imag)`` in the last axis. Default value is 'Magnitude'. verbose : bool If ``True``, it shows layer information. If ``False``, it suppresses all prints Returns ------- spectrogram : torch.tensor It returns a tensor of spectrograms. shape = ``(num_samples, freq_bins,time_steps)`` if ``output_format='Magnitude'``; shape = ``(num_samples, freq_bins,time_steps, 2)`` if ``output_format='Complex' or 'Phase'``; Examples -------- >>> spec_layer = Spectrogram.CQT1992v2() >>> specs = spec_layer(x) """ def __init__(self, sr=22050, hop_length=512, fmin=32.70, fmax=None, n_bins=84, bins_per_octave=12, norm=1, window='hann', center=True, pad_mode='reflect', trainable=False, output_format='Magnitude', verbose=True): super().__init__() # norm arg is not functioning self.trainable = trainable self.hop_length = hop_length self.center = center self.pad_mode = pad_mode self.output_format = output_format # creating kernels for CQT Q = 1/(2**(1/bins_per_octave)-1) if verbose==True: print("Creating CQT kernels ...", end='\r') start = time() cqt_kernels, self.kernel_width, lenghts = create_cqt_kernels(Q, sr, fmin, n_bins, bins_per_octave, norm, window, fmax) self.register_buffer('lenghts', lenghts) cqt_kernels_real = torch.tensor(cqt_kernels.real).unsqueeze(1) cqt_kernels_imag = torch.tensor(cqt_kernels.imag).unsqueeze(1) if trainable: cqt_kernels_real = torch.nn.Parameter(cqt_kernels_real, requires_grad=trainable_kernels) cqt_kernels_imag = torch.nn.Parameter(cqt_kernels_imag, requires_grad=trainable_kernels) self.register_parameter('cqt_kernels_real', cqt_kernels_real) self.register_parameter('cqt_kernels_imag', cqt_kernels_imag) else: self.register_buffer('cqt_kernels_real', cqt_kernels_real) self.register_buffer('cqt_kernels_imag', cqt_kernels_imag) if verbose==True: print("CQT kernels created, time used = {:.4f} seconds".format(time()-start)) def forward(self,x, output_format=None): """ Convert a batch of waveforms to CQT spectrograms. Parameters ---------- x : torch tensor Input signal should be in either of the following shapes.\n 1. ``(len_audio)``\n 2. ``(num_audio, len_audio)``\n 3. ``(num_audio, 1, len_audio)`` It will be automatically broadcast to the right shape """ output_format = output_format or self.output_format x = broadcast_dim(x) if self.center: if self.pad_mode == 'constant': padding = nn.ConstantPad1d(self.kernel_width//2, 0) elif self.pad_mode == 'reflect': padding = nn.ReflectionPad1d(self.kernel_width//2) x = padding(x) # CQT CQT_real = conv1d(x, self.cqt_kernels_real, stride=self.hop_length) * \ torch.sqrt(self.lenghts.view(-1,1)) CQT_imag = -conv1d(x, self.cqt_kernels_imag, stride=self.hop_length) * \ torch.sqrt(self.lenghts.view(-1,1)) if output_format=='Magnitude': if self.trainable==False: # Getting CQT Amplitude CQT = torch.sqrt(CQT_real.pow(2)+CQT_imag.pow(2)) else: CQT = torch.sqrt(CQT_real.pow(2)+CQT_imag.pow(2)+1e-8) return CQT elif output_format=='Complex': return torch.stack((CQT_real,CQT_imag),-1) elif output_format=='Phase': phase_real = torch.cos(torch.atan2(CQT_imag,CQT_real)) phase_imag = torch.sin(torch.atan2(CQT_imag,CQT_real)) return torch.stack((phase_real,phase_imag), -1) def forward_manual(self,x): """ Method for debugging """ x = broadcast_dim(x) if self.center: if self.pad_mode == 'constant': padding = nn.ConstantPad1d(self.kernel_width//2, 0) elif self.pad_mode == 'reflect': padding = nn.ReflectionPad1d(self.kernel_width//2) x = padding(x) # CQT CQT_real = conv1d(x, self.cqt_kernels_real, stride=self.hop_length) CQT_imag = conv1d(x, self.cqt_kernels_imag, stride=self.hop_length) # Getting CQT Amplitude CQT = torch.sqrt(CQT_real.pow(2)+CQT_imag.pow(2)) return CQT*torch.sqrt(self.lenghts.view(-1,1)) class CQT2010v2(torch.nn.Module): """This function is to calculate the CQT of the input signal. Input signal should be in either of the following shapes.\n 1. ``(len_audio)``\n 2. ``(num_audio, len_audio)``\n 3. ``(num_audio, 1, len_audio)`` The correct shape will be inferred autommatically if the input follows these 3 shapes. Most of the arguments follow the convention from librosa. This class inherits from ``torch.nn.Module``, therefore, the usage is same as ``torch.nn.Module``. This alogrithm uses the resampling method proposed in [1]. Instead of convoluting the STFT results with a gigantic CQT kernel covering the full frequency spectrum, we make a small CQT kernel covering only the top octave. Then we keep downsampling the input audio by a factor of 2 to convoluting it with the small CQT kernel. Everytime the input audio is downsampled, the CQT relative to the downsampled input is equivalent to the next lower octave. The kernel creation process is still same as the 1992 algorithm. Therefore, we can reuse the code from the 1992 alogrithm [2] [1] Schörkhuber, Christian. “CONSTANT-Q TRANSFORM TOOLBOX FOR MUSIC PROCESSING.” (2010). [2] Brown, Judith C.C. and Miller Puckette. “An efficient algorithm for the calculation of a constant Q transform.” (1992). Early downsampling factor is to downsample the input audio to reduce the CQT kernel size. The result with and without early downsampling are more or less the same except in the very low frequency region where freq < 40Hz. Parameters ---------- sr : int The sampling rate for the input audio. It is used to calucate the correct ``fmin`` and ``fmax``. Setting the correct sampling rate is very important for calculating the correct frequency. hop_length : int The hop (or stride) size. Default value is 512. fmin : float The frequency for the lowest CQT bin. Default is 32.70Hz, which coresponds to the note C0. fmax : float The frequency for the highest CQT bin. Default is ``None``, therefore the higest CQT bin is inferred from the ``n_bins`` and ``bins_per_octave``. If ``fmax`` is not ``None``, then the argument ``n_bins`` will be ignored and ``n_bins`` will be calculated automatically. Default is ``None`` n_bins : int The total numbers of CQT bins. Default is 84. Will be ignored if ``fmax`` is not ``None``. bins_per_octave : int Number of bins per octave. Default is 12. norm : bool Normalization for the CQT result. basis_norm : int Normalization for the CQT kernels. ``1`` means L1 normalization, and ``2`` means L2 normalization. Default is ``1``, which is same as the normalization used in librosa. window : str The windowing function for CQT. It uses ``scipy.signal.get_window``, please refer to scipy documentation for possible windowing functions. The default value is 'hann' pad_mode : str The padding method. Default value is 'reflect'. trainable : bool Determine if the CQT kernels are trainable or not. If ``True``, the gradients for CQT kernels will also be caluclated and the CQT kernels will be updated during model training. Default value is ``False`` output_format : str Determine the return type. 'Magnitude' will return the magnitude of the STFT result, shape = ``(num_samples, freq_bins, time_steps)``; 'Complex' will return the STFT result in complex number, shape = ``(num_samples, freq_bins, time_steps, 2)``; 'Phase' will return the phase of the STFT reuslt, shape = ``(num_samples, freq_bins,time_steps, 2)``. The complex number is stored as ``(real, imag)`` in the last axis. Default value is 'Magnitude'. verbose : bool If ``True``, it shows layer information. If ``False``, it suppresses all prints. device : str Choose which device to initialize this layer. Default value is 'cpu'. Returns ------- spectrogram : torch.tensor It returns a tensor of spectrograms. shape = ``(num_samples, freq_bins,time_steps)`` if ``output_format='Magnitude'``; shape = ``(num_samples, freq_bins,time_steps, 2)`` if ``output_format='Complex' or 'Phase'``; Examples -------- >>> spec_layer = Spectrogram.CQT2010v2() >>> specs = spec_layer(x) """ # To DO: # need to deal with the filter and other tensors def __init__(self, sr=22050, hop_length=512, fmin=32.70, fmax=None, n_bins=84, bins_per_octave=12, norm=True, basis_norm=1, window='hann', pad_mode='reflect', earlydownsample=True, trainable=False, output_format='Magnitude', verbose=True): super().__init__() self.norm = norm # Now norm is used to normalize the final CQT result by dividing n_fft # basis_norm is for normalizing basis self.hop_length = hop_length self.pad_mode = pad_mode self.n_bins = n_bins self.earlydownsample = earlydownsample # We will activate early downsampling later if possible self.trainable = trainable self.output_format = output_format # It will be used to calculate filter_cutoff and creating CQT kernels Q = 1/(2**(1/bins_per_octave)-1) # Creating lowpass filter and make it a torch tensor if verbose==True: print("Creating low pass filter ...", end='\r') start = time() # self.lowpass_filter = torch.tensor( # create_lowpass_filter( # band_center = 0.50, # kernelLength=256, # transitionBandwidth=0.001)) lowpass_filter = torch.tensor(create_lowpass_filter( band_center = 0.50, kernelLength=256, transitionBandwidth=0.001) ) # Broadcast the tensor to the shape that fits conv1d self.register_buffer('lowpass_filter', lowpass_filter[None,None,:]) if verbose==True: print("Low pass filter created, time used = {:.4f} seconds".format(time()-start)) # Caluate num of filter requires for the kernel # n_octaves determines how many resampling requires for the CQT n_filters = min(bins_per_octave, n_bins) self.n_octaves = int(np.ceil(float(n_bins) / bins_per_octave)) if verbose==True: print("num_octave = ", self.n_octaves) # Calculate the lowest frequency bin for the top octave kernel self.fmin_t = fmin*2**(self.n_octaves-1) remainder = n_bins % bins_per_octave # print("remainder = ", remainder) if remainder==0: # Calculate the top bin frequency fmax_t = self.fmin_t*2**((bins_per_octave-1)/bins_per_octave) else: # Calculate the top bin frequency fmax_t = self.fmin_t*2**((remainder-1)/bins_per_octave) self.fmin_t = fmax_t/2**(1-1/bins_per_octave) # Adjusting the top minium bins if fmax_t > sr/2: raise ValueError('The top bin {}Hz has exceeded the Nyquist frequency, \ please reduce the n_bins'.format(fmax_t)) if self.earlydownsample == True: # Do early downsampling if this argument is True if verbose==True: print("Creating early downsampling filter ...", end='\r') start = time() sr, self.hop_length, self.downsample_factor, early_downsample_filter, \ self.earlydownsample = get_early_downsample_params(sr, hop_length, fmax_t, Q, self.n_octaves, verbose) self.register_buffer('early_downsample_filter', early_downsample_filter) if verbose==True: print("Early downsampling filter created, \ time used = {:.4f} seconds".format(time()-start)) else: self.downsample_factor=1. # Preparing CQT kernels if verbose==True: print("Creating CQT kernels ...", end='\r') start = time() basis, self.n_fft, lenghts = create_cqt_kernels(Q, sr, self.fmin_t, n_filters, bins_per_octave, norm=basis_norm, topbin_check=False) # For normalization in the end freqs = fmin * 2.0 ** (np.r_[0:n_bins] / np.float(bins_per_octave)) lenghts = np.ceil(Q * sr / freqs) lenghts = torch.tensor(lenghts).float() self.register_buffer('lenghts', lenghts) self.basis = basis # These cqt_kernel is already in the frequency domain cqt_kernels_real = torch.tensor(basis.real.astype(np.float32)).unsqueeze(1) cqt_kernels_imag = torch.tensor(basis.imag.astype(np.float32)).unsqueeze(1) if trainable: cqt_kernels_real = torch.nn.Parameter(cqt_kernels_real, requires_grad=trainable_kernels) cqt_kernels_imag = torch.nn.Parameter(cqt_kernels_imag, requires_grad=trainable_kernels) self.register_parameter('cqt_kernels_real', cqt_kernels_real) self.register_parameter('cqt_kernels_imag', cqt_kernels_imag) else: self.register_buffer('cqt_kernels_real', cqt_kernels_real) self.register_buffer('cqt_kernels_imag', cqt_kernels_imag) if verbose==True: print("CQT kernels created, time used = {:.4f} seconds".format(time()-start)) # print("Getting cqt kernel done, n_fft = ",self.n_fft) # If center==True, the STFT window will be put in the middle, and paddings at the beginning # and ending are required. if self.pad_mode == 'constant': self.padding = nn.ConstantPad1d(self.n_fft//2, 0) elif self.pad_mode == 'reflect': self.padding = nn.ReflectionPad1d(self.n_fft//2) def forward(self,x,output_format=None): """ Convert a batch of waveforms to CQT spectrograms. Parameters ---------- x : torch tensor Input signal should be in either of the following shapes.\n 1. ``(len_audio)``\n 2. ``(num_audio, len_audio)``\n 3. ``(num_audio, 1, len_audio)`` It will be automatically broadcast to the right shape """ output_format = output_format or self.output_format x = broadcast_dim(x) if self.earlydownsample==True: x = downsampling_by_n(x, self.early_downsample_filter, self.downsample_factor) hop = self.hop_length CQT = get_cqt_complex(x, self.cqt_kernels_real, self.cqt_kernels_imag, hop, self.padding) # Getting the top octave CQT x_down = x # Preparing a new variable for downsampling for i in range(self.n_octaves-1): hop = hop//2 x_down = downsampling_by_2(x_down, self.lowpass_filter) CQT1 = get_cqt_complex(x_down, self.cqt_kernels_real, self.cqt_kernels_imag, hop, self.padding) CQT = torch.cat((CQT1, CQT),1) CQT = CQT[:,-self.n_bins:,:] # Removing unwanted bottom bins # print("downsample_factor = ",self.downsample_factor) # print(CQT.shape) # print(self.lenghts.view(-1,1).shape) # Normalizing the output with the downsampling factor, 2**(self.n_octaves-1) is make it # same mag as 1992 CQT = CQT*self.downsample_factor # Normalize again to get same result as librosa CQT = CQT*torch.sqrt(self.lenghts.view(-1,1,1)) if output_format=='Magnitude': if self.trainable==False: # Getting CQT Amplitude return torch.sqrt(CQT.pow(2).sum(-1)) else: return torch.sqrt(CQT.pow(2).sum(-1)+1e-8) elif output_format=='Complex': return CQT elif output_format=='Phase': phase_real = torch.cos(torch.atan2(CQT[:,:,:,1],CQT[:,:,:,0])) phase_imag = torch.sin(torch.atan2(CQT[:,:,:,1],CQT[:,:,:,0])) return torch.stack((phase_real,phase_imag), -1) class CQT(CQT1992v2): """An abbreviation for :func:`~nnAudio.Spectrogram.CQT1992v2`. Please refer to the :func:`~nnAudio.Spectrogram.CQT1992v2` documentation""" pass # The section below is for developing purpose # Please don't use the following classes # class DFT(torch.nn.Module): """ Experimental feature before `torch.fft` was made avaliable. The inverse function only works for 1 single frame. i.e. input shape = (batch, n_fft, 1) """ def __init__(self, n_fft=2048, freq_bins=None, hop_length=512, window='hann', freq_scale='no', center=True, pad_mode='reflect', fmin=50, fmax=6000, sr=22050): super().__init__() self.stride = hop_length self.center = center self.pad_mode = pad_mode self.n_fft = n_fft # Create filter windows for stft wsin, wcos, self.bins2freq = create_fourier_kernels(n_fft=n_fft, freq_bins=n_fft, window=window, freq_scale=freq_scale, fmin=fmin, fmax=fmax, sr=sr) self.wsin = torch.tensor(wsin, dtype=torch.float) self.wcos = torch.tensor(wcos, dtype=torch.float) def forward(self,x): """ Convert a batch of waveforms to spectrums. Parameters ---------- x : torch tensor Input signal should be in either of the following shapes.\n 1. ``(len_audio)``\n 2. ``(num_audio, len_audio)``\n 3. ``(num_audio, 1, len_audio)`` It will be automatically broadcast to the right shape """ x = broadcast_dim(x) if self.center: if self.pad_mode == 'constant': padding = nn.ConstantPad1d(self.n_fft//2, 0) elif self.pad_mode == 'reflect': padding = nn.ReflectionPad1d(self.n_fft//2) x = padding(x) imag = conv1d(x, self.wsin, stride=self.stride) real = conv1d(x, self.wcos, stride=self.stride) return (real, -imag) def inverse(self,x_real,x_imag): """ Convert a batch of waveforms to CQT spectrograms. Parameters ---------- x_real : torch tensor Real part of the signal. x_imag : torch tensor Imaginary part of the signal. """ x_real = broadcast_dim(x_real) x_imag = broadcast_dim(x_imag) x_real.transpose_(1,2) # Prepare the right shape to do inverse x_imag.transpose_(1,2) # Prepare the right shape to do inverse # if self.center: # if self.pad_mode == 'constant': # padding = nn.ConstantPad1d(self.n_fft//2, 0) # elif self.pad_mode == 'reflect': # padding = nn.ReflectionPad1d(self.n_fft//2) # x_real = padding(x_real) # x_imag = padding(x_imag) # Watch out for the positive and negative signs # ifft = e^(+2\pi*j)*X # ifft(X_real) = (a1, a2) # ifft(X_imag)*1j = (b1, b2)*1j # = (-b2, b1) a1 = conv1d(x_real, self.wcos, stride=self.stride) a2 = conv1d(x_real, self.wsin, stride=self.stride) b1 = conv1d(x_imag, self.wcos, stride=self.stride) b2 = conv1d(x_imag, self.wsin, stride=self.stride) imag = a2+b1 real = a1-b2 return (real/self.n_fft, imag/self.n_fft) class iSTFT(torch.nn.Module): """This class is to convert spectrograms back to waveforms. It only works for the complex value spectrograms. If you have the magnitude spectrograms, please use :func:`~nnAudio.Spectrogram.Griffin_Lim`. The parameters (e.g. n_fft, window) need to be the same as the STFT in order to obtain the correct inverse. If trainability is not required, it is recommended to use the ``inverse`` method under the ``STFT`` class to save GPU/RAM memory. When ``trainable=True`` and ``freq_scale!='no'``, there is no guarantee that the inverse is perfect, please use with extra care. Parameters ---------- n_fft : int The window size. Default value is 2048. freq_bins : int Number of frequency bins. Default is ``None``, which means ``n_fft//2+1`` bins Please make sure the value is the same as the forward STFT. hop_length : int The hop (or stride) size. Default value is ``None`` which is equivalent to ``n_fft//4``. Please make sure the value is the same as the forward STFT. window : str The windowing function for iSTFT. It uses ``scipy.signal.get_window``, please refer to scipy documentation for possible windowing functions. The default value is 'hann'. Please make sure the value is the same as the forward STFT. freq_scale : 'linear', 'log', or 'no' Determine the spacing between each frequency bin. When `linear` or `log` is used, the bin spacing can be controlled by ``fmin`` and ``fmax``. If 'no' is used, the bin will start at 0Hz and end at Nyquist frequency with linear spacing. Please make sure the value is the same as the forward STFT. center : bool Putting the iSTFT keneral at the center of the time-step or not. If ``False``, the time index is the beginning of the iSTFT kernel, if ``True``, the time index is the center of the iSTFT kernel. Default value if ``True``. Please make sure the value is the same as the forward STFT. fmin : int The starting frequency for the lowest frequency bin. If freq_scale is ``no``, this argument does nothing. Please make sure the value is the same as the forward STFT. fmax : int The ending frequency for the highest frequency bin. If freq_scale is ``no``, this argument does nothing. Please make sure the value is the same as the forward STFT. sr : int The sampling rate for the input audio. It is used to calucate the correct ``fmin`` and ``fmax``. Setting the correct sampling rate is very important for calculating the correct frequency. trainable_kernels : bool Determine if the STFT kenrels are trainable or not. If ``True``, the gradients for STFT kernels will also be caluclated and the STFT kernels will be updated during model training. Default value is ``False``. trainable_window : bool Determine if the window function is trainable or not. Default value is ``False``. verbose : bool If ``True``, it shows layer information. If ``False``, it suppresses all prints. device : str Choose which device to initialize this layer. Default value is 'cpu'. Returns ------- spectrogram : torch.tensor It returns a batch of waveforms. Examples -------- >>> spec_layer = Spectrogram.iSTFT() >>> specs = spec_layer(x) """ def __init__(self, n_fft=2048, win_length=None, freq_bins=None, hop_length=None, window='hann', freq_scale='no', center=True, fmin=50, fmax=6000, sr=22050, trainable_kernels=False, trainable_window=False, verbose=True, refresh_win=True): super().__init__() # Trying to make the default setting same as librosa if win_length==None: win_length = n_fft if hop_length==None: hop_length = int(win_length // 4) self.n_fft = n_fft self.win_length = win_length self.stride = hop_length self.center = center self.pad_amount = self.n_fft // 2 self.refresh_win = refresh_win start = time() # Create the window function and prepare the shape for batch-wise-time-wise multiplication # Create filter windows for inverse kernel_sin, kernel_cos, _, _, window_mask = create_fourier_kernels(n_fft, win_length=win_length, freq_bins=n_fft, window=window, freq_scale=freq_scale, fmin=fmin, fmax=fmax, sr=sr, verbose=False) window_mask = get_window(window,int(win_length), fftbins=True) # For inverse, the Fourier kernels do not need to be windowed window_mask = torch.tensor(window_mask).unsqueeze(0).unsqueeze(-1) # kernel_sin and kernel_cos have the shape (freq_bins, 1, n_fft, 1) to support 2D Conv kernel_sin = torch.tensor(kernel_sin, dtype=torch.float).unsqueeze(-1) kernel_cos = torch.tensor(kernel_cos, dtype=torch.float).unsqueeze(-1) # Decide if the Fourier kernels are trainable if trainable_kernels: # Making all these variables trainable kernel_sin = torch.nn.Parameter(kernel_sin, requires_grad=trainable_kernels) kernel_cos = torch.nn.Parameter(kernel_cos, requires_grad=trainable_kernels) self.register_parameter('kernel_sin', kernel_sin) self.register_parameter('kernel_cos', kernel_cos) else: self.register_buffer('kernel_sin', kernel_sin) self.register_buffer('kernel_cos', kernel_cos) # Decide if the window function is trainable if trainable_window: window_mask = torch.nn.Parameter(window_mask, requires_grad=trainable_window) self.register_parameter('window_mask', window_mask) else: self.register_buffer('window_mask', window_mask) if verbose==True: print("iSTFT kernels created, time used = {:.4f} seconds".format(time()-start)) else: pass def forward(self, X, onesided=False, length=None, refresh_win=None): """ If your spectrograms only have ``n_fft//2+1`` frequency bins, please use ``onesided=True``, else use ``onesided=False`` To make sure the inverse STFT has the same output length of the original waveform, please set `length` as your intended waveform length. By default, ``length=None``, which will remove ``n_fft//2`` samples from the start and the end of the output. If your input spectrograms X are of the same length, please use ``refresh_win=None`` to increase computational speed. """ if refresh_win==None: refresh_win=self.refresh_win assert X.dim()==4 , "Inverse iSTFT only works for complex number," \ "make sure our tensor is in the shape of (batch, freq_bins, timesteps, 2)" # If the input spectrogram contains only half of the n_fft # Use extend_fbins function to get back another half if onesided: X = extend_fbins(X) # extend freq X_real, X_imag = X[:, :, :, 0], X[:, :, :, 1] # broadcast dimensions to support 2D convolution X_real_bc = X_real.unsqueeze(1) X_imag_bc = X_imag.unsqueeze(1) a1 = conv2d(X_real_bc, self.kernel_cos, stride=(1,1)) b2 = conv2d(X_imag_bc, self.kernel_sin, stride=(1,1)) # compute real and imag part. signal lies in the real part real = a1 - b2 real = real.squeeze(-2)*self.window_mask # Normalize the amplitude with n_fft real /= (self.n_fft) # Overlap and Add algorithm to connect all the frames real = overlap_add(real, self.stride) # Prepare the window sumsqure for division # Only need to create this window once to save time # Unless the input spectrograms have different time steps if hasattr(self, 'w_sum')==False or refresh_win==True: self.w_sum = torch_window_sumsquare(self.window_mask.flatten(), X.shape[2], self.stride, self.n_fft).flatten() self.nonzero_indices = (self.w_sum>1e-10) else: pass real[:, self.nonzero_indices] = real[:,self.nonzero_indices].div(self.w_sum[self.nonzero_indices]) # Remove padding if length is None: if self.center: real = real[:, self.pad_amount:-self.pad_amount] else: if self.center: real = real[:, self.pad_amount:self.pad_amount + length] else: real = real[:, :length] return real class Griffin_Lim(torch.nn.Module): """ Converting Magnitude spectrograms back to waveforms based on the "fast Griffin-Lim"[1]. This Griffin Lim is a direct clone from librosa.griffinlim. [1] Perraudin, N., Balazs, P., & Søndergaard, P. L. “A fast Griffin-Lim algorithm,” IEEE Workshop on Applications of Signal Processing to Audio and Acoustics (pp. 1-4), Oct. 2013. Parameters ---------- n_fft : int The window size. Default value is 2048. n_iter=32 : int The number of iterations for Griffin-Lim. The default value is ``32`` hop_length : int The hop (or stride) size. Default value is ``None`` which is equivalent to ``n_fft//4``. Please make sure the value is the same as the forward STFT. window : str The windowing function for iSTFT. It uses ``scipy.signal.get_window``, please refer to scipy documentation for possible windowing functions. The default value is 'hann'. Please make sure the value is the same as the forward STFT. center : bool Putting the iSTFT keneral at the center of the time-step or not. If ``False``, the time index is the beginning of the iSTFT kernel, if ``True``, the time index is the center of the iSTFT kernel. Default value if ``True``. Please make sure the value is the same as the forward STFT. momentum : float The momentum for the update rule. The default value is ``0.99``. device : str Choose which device to initialize this layer. Default value is 'cpu' """ def __init__(self, n_fft, n_iter=32, hop_length=None, win_length=None, window='hann', center=True, pad_mode='reflect', momentum=0.99, device='cpu'): super().__init__() self.n_fft = n_fft self.win_length = win_length self.n_iter = n_iter self.center = center self.pad_mode = pad_mode self.momentum = momentum self.device = device if win_length==None: self.win_length=n_fft else: self.win_length=win_length if hop_length==None: self.hop_length = n_fft//4 else: self.hop_length = hop_length # Creating window function for stft and istft later self.w = torch.tensor(get_window(window, int(self.win_length), fftbins=True), device=device).float() def forward(self, S): """ Convert a batch of magnitude spectrograms to waveforms. Parameters ---------- S : torch tensor Spectrogram of the shape ``(batch, n_fft//2+1, timesteps)`` """ assert S.dim()==3 , "Please make sure your input is in the shape of (batch, freq_bins, timesteps)" # Initializing Random Phase rand_phase = torch.randn(*S.shape, device=self.device) angles = torch.empty((*S.shape,2), device=self.device) angles[:, :,:,0] = torch.cos(2 * np.pi * rand_phase) angles[:,:,:,1] = torch.sin(2 * np.pi * rand_phase) # Initializing the rebuilt magnitude spectrogram rebuilt = torch.zeros(*angles.shape, device=self.device) for _ in range(self.n_iter): tprev = rebuilt # Saving previous rebuilt magnitude spec # spec2wav conversion # print(f'win_length={self.win_length}\tw={self.w.shape}') inverse = torch.istft(S.unsqueeze(-1) * angles, self.n_fft, self.hop_length, win_length=self.win_length, window=self.w, center=self.center) # wav2spec conversion rebuilt = torch.stft(inverse, self.n_fft, self.hop_length, win_length=self.win_length, window=self.w, pad_mode=self.pad_mode) # Phase update rule angles[:,:,:] = rebuilt[:,:,:] - (self.momentum / (1 + self.momentum)) * tprev[:,:,:] # Phase normalization angles = angles.div(torch.sqrt(angles.pow(2).sum(-1)).unsqueeze(-1) + 1e-16) # normalizing the phase # Using the final phase to reconstruct the waveforms inverse = torch.istft(S.unsqueeze(-1) * angles, self.n_fft, self.hop_length, win_length=self.win_length, window=self.w, center=self.center) return inverse
[((127, 16, 127, 22), 'time.time', 'time', ({}, {}), '()', False, 'from time import time\n'), ((143, 21, 143, 64), 'torch.tensor', 'torch.tensor', (), '', False, 'import torch\n'), ((144, 21, 144, 64), 'torch.tensor', 'torch.tensor', (), '', False, 'import torch\n'), ((161, 22, 161, 47), 'torch.tensor', 'torch.tensor', ({(161, 35, 161, 46): 'window_mask'}, {}), '(window_mask)', False, 'import torch\n'), ((218, 20, 218, 60), 'torch.nn.functional.conv1d', 'conv1d', (), '', False, 'from torch.nn.functional import conv1d, conv2d, fold\n'), ((219, 20, 219, 60), 'torch.nn.functional.conv1d', 'conv1d', (), '', False, 'from torch.nn.functional import conv1d, conv2d, fold\n'), ((277, 13, 277, 65), 'torch.nn.functional.conv2d', 'conv2d', (), '', False, 'from torch.nn.functional import conv1d, conv2d, fold\n'), ((278, 13, 278, 65), 'torch.nn.functional.conv2d', 'conv2d', (), '', False, 'from torch.nn.functional import conv1d, conv2d, fold\n'), ((417, 16, 417, 22), 'time.time', 'time', ({}, {}), '()', False, 'from time import time\n'), ((420, 16, 420, 22), 'time.time', 'time', ({}, {}), '()', False, 'from time import time\n'), ((422, 20, 422, 43), 'torch.tensor', 'torch.tensor', ({(422, 33, 422, 42): 'mel_basis'}, {}), '(mel_basis)', False, 'import torch\n'), ((460, 18, 460, 52), 'torch.matmul', 'torch.matmul', ({(460, 31, 460, 45): 'self.mel_basis', (460, 47, 460, 51): 'spec'}, {}), '(self.mel_basis, spec)', False, 'import torch\n'), ((496, 20, 496, 63), 'torch.nn.Parameter', 'nn.Parameter', (), '', True, 'import torch.nn as nn\n'), ((499, 20, 499, 62), 'torch.optim.SGD', 'torch.optim.SGD', ({(499, 36, 499, 47): '[pred_stft]'}, {}), '([pred_stft], **sgd_kwargs)', False, 'import torch\n'), ((593, 15, 593, 35), 'torch.tensor', 'torch.tensor', ({(593, 28, 593, 34): '[amin]'}, {}), '([amin])', False, 'import torch\n'), ((627, 13, 627, 45), 'torch.rfft', 'torch.rfft', (), '', False, 'import torch\n'), ((631, 14, 631, 26), 'torch.cos', 'torch.cos', ({(631, 24, 631, 25): 'k'}, {}), '(k)', False, 'import torch\n'), ((632, 14, 632, 26), 'torch.sin', 'torch.sin', ({(632, 24, 632, 25): 'k'}, {}), '(k)', False, 'import torch\n'), ((783, 16, 783, 22), 'time.time', 'time', ({}, {}), '()', False, 'from time import time\n'), ((802, 16, 802, 22), 'time.time', 'time', ({}, {}), '()', False, 'from time import time\n'), ((808, 15, 808, 48), 'torch.tensor', 'torch.tensor', ({(808, 28, 808, 47): 'kernel_sin * window'}, {}), '(kernel_sin * window)', False, 'import torch\n'), ((809, 15, 809, 48), 'torch.tensor', 'torch.tensor', ({(809, 28, 809, 47): 'kernel_cos * window'}, {}), '(kernel_cos * window)', False, 'import torch\n'), ((859, 23, 859, 67), 'torch.nn.functional.conv1d', 'conv1d', (), '', False, 'from torch.nn.functional import conv1d, conv2d, fold\n'), ((860, 23, 860, 67), 'torch.nn.functional.conv1d', 'conv1d', (), '', False, 'from torch.nn.functional import conv1d, conv2d, fold\n'), ((866, 14, 866, 50), 'torch.stack', 'torch.stack', ({(866, 26, 866, 46): '(CQT_real, -CQT_imag)', (866, 47, 866, 49): '-1'}, {}), '((CQT_real, -CQT_imag), -1)', False, 'import torch\n'), ((932, 16, 932, 22), 'time.time', 'time', ({}, {}), '()', False, 'from time import time\n'), ((992, 16, 992, 22), 'time.time', 'time', ({}, {}), '()', False, 'from time import time\n'), ((1004, 18, 1004, 41), 'numpy.ceil', 'np.ceil', ({(1004, 26, 1004, 40): 'Q * sr / freqs'}, {}), '(Q * sr / freqs)', True, 'import numpy as np\n'), ((1026, 16, 1026, 22), 'time.time', 'time', ({}, {}), '()', False, 'from time import time\n'), ((1031, 15, 1031, 33), 'torch.tensor', 'torch.tensor', ({(1031, 28, 1031, 32): 'wsin'}, {}), '(wsin)', False, 'import torch\n'), ((1032, 15, 1032, 33), 'torch.tensor', 'torch.tensor', ({(1032, 28, 1032, 32): 'wcos'}, {}), '(wcos)', False, 'import torch\n'), ((1224, 16, 1224, 22), 'time.time', 'time', ({}, {}), '()', False, 'from time import time\n'), ((1313, 19, 1313, 75), 'torch.nn.functional.conv1d', 'conv1d', (), '', False, 'from torch.nn.functional import conv1d, conv2d, fold\n'), ((1314, 19, 1314, 75), 'torch.nn.functional.conv1d', 'conv1d', (), '', False, 'from torch.nn.functional import conv1d, conv2d, fold\n'), ((1442, 16, 1442, 22), 'time.time', 'time', ({}, {}), '()', False, 'from time import time\n'), ((1505, 16, 1505, 22), 'time.time', 'time', ({}, {}), '()', False, 'from time import time\n'), ((1516, 18, 1516, 41), 'numpy.ceil', 'np.ceil', ({(1516, 26, 1516, 40): 'Q * sr / freqs'}, {}), '(Q * sr / freqs)', True, 'import numpy as np\n'), ((1637, 20, 1637, 57), 'torch.tensor', 'torch.tensor', (), '', False, 'import torch\n'), ((1638, 20, 1638, 57), 'torch.tensor', 'torch.tensor', (), '', False, 'import torch\n'), ((1662, 15, 1662, 55), 'torch.nn.functional.conv1d', 'conv1d', (), '', False, 'from torch.nn.functional import conv1d, conv2d, fold\n'), ((1663, 15, 1663, 55), 'torch.nn.functional.conv1d', 'conv1d', (), '', False, 'from torch.nn.functional import conv1d, conv2d, fold\n'), ((1700, 13, 1700, 58), 'torch.nn.functional.conv1d', 'conv1d', (), '', False, 'from torch.nn.functional import conv1d, conv2d, fold\n'), ((1701, 13, 1701, 58), 'torch.nn.functional.conv1d', 'conv1d', (), '', False, 'from torch.nn.functional import conv1d, conv2d, fold\n'), ((1702, 13, 1702, 58), 'torch.nn.functional.conv1d', 'conv1d', (), '', False, 'from torch.nn.functional import conv1d, conv2d, fold\n'), ((1703, 13, 1703, 58), 'torch.nn.functional.conv1d', 'conv1d', (), '', False, 'from torch.nn.functional import conv1d, conv2d, fold\n'), ((1808, 16, 1808, 22), 'time.time', 'time', ({}, {}), '()', False, 'from time import time\n'), ((1885, 13, 1885, 61), 'torch.nn.functional.conv2d', 'conv2d', (), '', False, 'from torch.nn.functional import conv1d, conv2d, fold\n'), ((1886, 13, 1886, 61), 'torch.nn.functional.conv2d', 'conv2d', (), '', False, 'from torch.nn.functional import conv1d, conv2d, fold\n'), ((2008, 21, 2008, 62), 'torch.randn', 'torch.randn', (), '', False, 'import torch\n'), ((2009, 17, 2009, 62), 'torch.empty', 'torch.empty', (), '', False, 'import torch\n'), ((2010, 27, 2010, 60), 'torch.cos', 'torch.cos', ({(2010, 37, 2010, 59): '2 * np.pi * rand_phase'}, {}), '(2 * np.pi * rand_phase)', False, 'import torch\n'), ((2011, 26, 2011, 59), 'torch.sin', 'torch.sin', ({(2011, 36, 2011, 58): '2 * np.pi * rand_phase'}, {}), '(2 * np.pi * rand_phase)', False, 'import torch\n'), ((2014, 18, 2014, 64), 'torch.zeros', 'torch.zeros', (), '', False, 'import torch\n'), ((170, 19, 170, 73), 'torch.nn.Parameter', 'torch.nn.Parameter', (), '', False, 'import torch\n'), ((171, 19, 171, 73), 'torch.nn.Parameter', 'torch.nn.Parameter', (), '', False, 'import torch\n'), ((432, 24, 432, 82), 'torch.nn.Parameter', 'torch.nn.Parameter', (), '', False, 'import torch\n'), ((594, 24, 594, 43), 'torch.tensor', 'torch.tensor', ({(594, 37, 594, 42): '[ref]'}, {}), '([ref])', False, 'import torch\n'), ((614, 23, 614, 72), 'torch.max', 'torch.max', ({(614, 33, 614, 41): 'log_spec', (614, 43, 614, 71): 'batch_wise_max - self.top_db'}, {}), '(log_spec, batch_wise_max - self.top_db)', False, 'import torch\n'), ((815, 19, 815, 76), 'torch.nn.Parameter', 'torch.nn.Parameter', (), '', False, 'import torch\n'), ((816, 19, 816, 76), 'torch.nn.Parameter', 'torch.nn.Parameter', (), '', False, 'import torch\n'), ((824, 31, 824, 100), 'torch.nn.Parameter', 'torch.nn.Parameter', (), '', False, 'import torch\n'), ((825, 31, 825, 100), 'torch.nn.Parameter', 'torch.nn.Parameter', (), '', False, 'import torch\n'), ((972, 20, 972, 26), 'time.time', 'time', ({}, {}), '()', False, 'from time import time\n'), ((1038, 19, 1038, 76), 'torch.nn.Parameter', 'torch.nn.Parameter', (), '', False, 'import torch\n'), ((1039, 19, 1039, 76), 'torch.nn.Parameter', 'torch.nn.Parameter', (), '', False, 'import torch\n'), ((1047, 31, 1047, 100), 'torch.nn.Parameter', 'torch.nn.Parameter', (), '', False, 'import torch\n'), ((1048, 31, 1048, 100), 'torch.nn.Parameter', 'torch.nn.Parameter', (), '', False, 'import torch\n'), ((1058, 27, 1058, 61), 'torch.nn.ConstantPad1d', 'nn.ConstantPad1d', ({(1058, 44, 1058, 57): 'self.n_fft // 2', (1058, 59, 1058, 60): '0'}, {}), '(self.n_fft // 2, 0)', True, 'import torch.nn as nn\n'), ((1089, 18, 1089, 42), 'torch.cat', 'torch.cat', ({(1089, 28, 1089, 39): '(CQT1, CQT)', (1089, 40, 1089, 41): '1'}, {}), '((CQT1, CQT), 1)', False, 'import torch\n'), ((1240, 31, 1240, 100), 'torch.nn.Parameter', 'torch.nn.Parameter', (), '', False, 'import torch\n'), ((1241, 31, 1241, 100), 'torch.nn.Parameter', 'torch.nn.Parameter', (), '', False, 'import torch\n'), ((1277, 19, 1277, 75), 'torch.nn.functional.conv1d', 'conv1d', (), '', False, 'from torch.nn.functional import conv1d, conv2d, fold\n'), ((1486, 20, 1486, 26), 'time.time', 'time', ({}, {}), '()', False, 'from time import time\n'), ((1526, 31, 1526, 100), 'torch.nn.Parameter', 'torch.nn.Parameter', (), '', False, 'import torch\n'), ((1527, 31, 1527, 100), 'torch.nn.Parameter', 'torch.nn.Parameter', (), '', False, 'import torch\n'), ((1542, 27, 1542, 61), 'torch.nn.ConstantPad1d', 'nn.ConstantPad1d', ({(1542, 44, 1542, 57): 'self.n_fft // 2', (1542, 59, 1542, 60): '0'}, {}), '(self.n_fft // 2, 0)', True, 'import torch.nn as nn\n'), ((1574, 18, 1574, 42), 'torch.cat', 'torch.cat', ({(1574, 28, 1574, 39): '(CQT1, CQT)', (1574, 40, 1574, 41): '1'}, {}), '((CQT1, CQT), 1)', False, 'import torch\n'), ((1834, 25, 1834, 88), 'torch.nn.Parameter', 'torch.nn.Parameter', (), '', False, 'import torch\n'), ((1835, 25, 1835, 88), 'torch.nn.Parameter', 'torch.nn.Parameter', (), '', False, 'import torch\n'), ((1845, 26, 1845, 89), 'torch.nn.Parameter', 'torch.nn.Parameter', (), '', False, 'import torch\n'), ((2028, 22, 2033, 56), 'torch.stft', 'torch.stft', (), '', False, 'import torch\n'), ((210, 26, 210, 62), 'torch.nn.ConstantPad1d', 'nn.ConstantPad1d', ({(210, 43, 210, 58): 'self.pad_amount', (210, 60, 210, 61): '0'}, {}), '(self.pad_amount, 0)', True, 'import torch.nn as nn\n'), ((228, 23, 228, 44), 'torch.sqrt', 'torch.sqrt', ({(228, 34, 228, 43): '(spec + 1e-08)'}, {}), '(spec + 1e-08)', False, 'import torch\n'), ((230, 23, 230, 39), 'torch.sqrt', 'torch.sqrt', ({(230, 34, 230, 38): 'spec'}, {}), '(spec)', False, 'import torch\n'), ((233, 19, 233, 58), 'torch.stack', 'torch.stack', ({(233, 31, 233, 53): '(spec_real, -spec_imag)', (233, 55, 233, 57): '(-1)'}, {}), '((spec_real, -spec_imag), -1)', False, 'import torch\n'), ((606, 38, 606, 61), 'torch.max', 'torch.max', ({(606, 48, 606, 49): 'S', (606, 51, 606, 60): 'self.amin'}, {}), '(S, self.amin)', False, 'import torch\n'), ((607, 39, 607, 69), 'torch.max', 'torch.max', ({(607, 49, 607, 58): 'self.amin', (607, 60, 607, 68): 'self.ref'}, {}), '(self.amin, self.ref)', False, 'import torch\n'), ((637, 26, 637, 36), 'numpy.sqrt', 'np.sqrt', ({(637, 34, 637, 35): 'N'}, {}), '(N)', True, 'import numpy as np\n'), ((638, 27, 638, 41), 'numpy.sqrt', 'np.sqrt', ({(638, 35, 638, 40): '(N / 2)'}, {}), '(N / 2)', True, 'import numpy as np\n'), ((852, 26, 852, 67), 'torch.nn.ConstantPad1d', 'nn.ConstantPad1d', ({(852, 43, 852, 63): 'self.kernel_width // 2', (852, 65, 852, 66): '0'}, {}), '(self.kernel_width // 2, 0)', True, 'import torch.nn as nn\n'), ((1005, 18, 1005, 39), 'torch.tensor', 'torch.tensor', ({(1005, 31, 1005, 38): 'lenghts'}, {}), '(lenghts)', False, 'import torch\n'), ((1060, 27, 1060, 60), 'torch.nn.ReflectionPad1d', 'nn.ReflectionPad1d', ({(1060, 46, 1060, 59): 'self.n_fft // 2'}, {}), '(self.n_fft // 2)', True, 'import torch.nn as nn\n'), ((1236, 27, 1236, 57), 'torch.tensor', 'torch.tensor', ({(1236, 40, 1236, 56): 'cqt_kernels.real'}, {}), '(cqt_kernels.real)', False, 'import torch\n'), ((1237, 27, 1237, 57), 'torch.tensor', 'torch.tensor', ({(1237, 40, 1237, 56): 'cqt_kernels.imag'}, {}), '(cqt_kernels.imag)', False, 'import torch\n'), ((1270, 26, 1270, 67), 'torch.nn.ConstantPad1d', 'nn.ConstantPad1d', ({(1270, 43, 1270, 63): 'self.kernel_width // 2', (1270, 65, 1270, 66): '0'}, {}), '(self.kernel_width // 2, 0)', True, 'import torch.nn as nn\n'), ((1279, 20, 1279, 76), 'torch.nn.functional.conv1d', 'conv1d', (), '', False, 'from torch.nn.functional import conv1d, conv2d, fold\n'), ((1291, 19, 1291, 54), 'torch.stack', 'torch.stack', ({(1291, 31, 1291, 50): '(CQT_real, CQT_imag)', (1291, 51, 1291, 53): '(-1)'}, {}), '((CQT_real, CQT_imag), -1)', False, 'import torch\n'), ((1306, 26, 1306, 67), 'torch.nn.ConstantPad1d', 'nn.ConstantPad1d', ({(1306, 43, 1306, 63): 'self.kernel_width // 2', (1306, 65, 1306, 66): '0'}, {}), '(self.kernel_width // 2, 0)', True, 'import torch.nn as nn\n'), ((1517, 18, 1517, 39), 'torch.tensor', 'torch.tensor', ({(1517, 31, 1517, 38): 'lenghts'}, {}), '(lenghts)', False, 'import torch\n'), ((1544, 27, 1544, 60), 'torch.nn.ReflectionPad1d', 'nn.ReflectionPad1d', ({(1544, 46, 1544, 59): 'self.n_fft // 2'}, {}), '(self.n_fft // 2)', True, 'import torch.nn as nn\n'), ((1656, 26, 1656, 60), 'torch.nn.ConstantPad1d', 'nn.ConstantPad1d', ({(1656, 43, 1656, 56): 'self.n_fft // 2', (1656, 58, 1656, 59): '0'}, {}), '(self.n_fft // 2, 0)', True, 'import torch.nn as nn\n'), ((1828, 21, 1828, 64), 'torch.tensor', 'torch.tensor', (), '', False, 'import torch\n'), ((1829, 21, 1829, 64), 'torch.tensor', 'torch.tensor', (), '', False, 'import torch\n'), ((215, 26, 215, 61), 'torch.nn.ReflectionPad1d', 'nn.ReflectionPad1d', ({(215, 45, 215, 60): 'self.pad_amount'}, {}), '(self.pad_amount)', True, 'import torch.nn as nn\n'), ((236, 19, 236, 56), 'torch.atan2', 'torch.atan2', ({(236, 31, 236, 45): '(-spec_imag + 0.0)', (236, 46, 236, 55): 'spec_real'}, {}), '(-spec_imag + 0.0, spec_real)', False, 'import torch\n'), ((795, 71, 795, 77), 'time.time', 'time', ({}, {}), '()', False, 'from time import time\n'), ((832, 72, 832, 78), 'time.time', 'time', ({}, {}), '()', False, 'from time import time\n'), ((854, 26, 854, 66), 'torch.nn.ReflectionPad1d', 'nn.ReflectionPad1d', ({(854, 45, 854, 65): 'self.kernel_width // 2'}, {}), '(self.kernel_width // 2)', True, 'import torch.nn as nn\n'), ((883, 19, 883, 59), 'torch.stack', 'torch.stack', ({(883, 31, 883, 54): '(phase_real, phase_imag)', (883, 56, 883, 58): '(-1)'}, {}), '((phase_real, phase_imag), -1)', False, 'import torch\n'), ((1003, 49, 1003, 74), 'numpy.float', 'np.float', ({(1003, 58, 1003, 73): 'bins_per_octave'}, {}), '(bins_per_octave)', True, 'import numpy as np\n'), ((1113, 19, 1113, 59), 'torch.stack', 'torch.stack', ({(1113, 31, 1113, 54): '(phase_real, phase_imag)', (1113, 56, 1113, 58): '(-1)'}, {}), '((phase_real, phase_imag), -1)', False, 'import torch\n'), ((1272, 26, 1272, 66), 'torch.nn.ReflectionPad1d', 'nn.ReflectionPad1d', ({(1272, 45, 1272, 65): 'self.kernel_width // 2'}, {}), '(self.kernel_width // 2)', True, 'import torch.nn as nn\n'), ((1296, 19, 1296, 59), 'torch.stack', 'torch.stack', ({(1296, 31, 1296, 54): '(phase_real, phase_imag)', (1296, 56, 1296, 58): '(-1)'}, {}), '((phase_real, phase_imag), -1)', False, 'import torch\n'), ((1308, 26, 1308, 66), 'torch.nn.ReflectionPad1d', 'nn.ReflectionPad1d', ({(1308, 45, 1308, 65): 'self.kernel_width // 2'}, {}), '(self.kernel_width // 2)', True, 'import torch.nn as nn\n'), ((1515, 49, 1515, 74), 'numpy.float', 'np.float', ({(1515, 58, 1515, 73): 'bins_per_octave'}, {}), '(bins_per_octave)', True, 'import numpy as np\n'), ((1600, 19, 1600, 59), 'torch.stack', 'torch.stack', ({(1600, 31, 1600, 54): '(phase_real, phase_imag)', (1600, 56, 1600, 58): '(-1)'}, {}), '((phase_real, phase_imag), -1)', False, 'import torch\n'), ((1658, 26, 1658, 59), 'torch.nn.ReflectionPad1d', 'nn.ReflectionPad1d', ({(1658, 45, 1658, 58): 'self.n_fft // 2'}, {}), '(self.n_fft // 2)', True, 'import torch.nn as nn\n'), ((182, 76, 182, 82), 'time.time', 'time', ({}, {}), '()', False, 'from time import time\n'), ((425, 75, 425, 81), 'time.time', 'time', ({}, {}), '()', False, 'from time import time\n'), ((426, 74, 426, 80), 'time.time', 'time', ({}, {}), '()', False, 'from time import time\n'), ((495, 25, 495, 50), 'torch.pinverse', 'torch.pinverse', ({(495, 40, 495, 49): 'mel_basis'}, {}), '(mel_basis)', False, 'import torch\n'), ((630, 14, 630, 61), 'torch.arange', 'torch.arange', (), '', False, 'import torch\n'), ((881, 35, 881, 65), 'torch.atan2', 'torch.atan2', ({(881, 47, 881, 55): 'CQT_imag', (881, 56, 881, 64): 'CQT_real'}, {}), '(CQT_imag, CQT_real)', False, 'import torch\n'), ((882, 35, 882, 65), 'torch.atan2', 'torch.atan2', ({(882, 47, 882, 55): 'CQT_imag', (882, 56, 882, 64): 'CQT_real'}, {}), '(CQT_imag, CQT_real)', False, 'import torch\n'), ((944, 79, 944, 85), 'time.time', 'time', ({}, {}), '()', False, 'from time import time\n'), ((1017, 75, 1017, 81), 'time.time', 'time', ({}, {}), '()', False, 'from time import time\n'), ((1035, 76, 1035, 82), 'time.time', 'time', ({}, {}), '()', False, 'from time import time\n'), ((1111, 35, 1111, 73), 'torch.atan2', 'torch.atan2', ({(1111, 47, 1111, 59): 'CQT[:, :, :, (1)]', (1111, 60, 1111, 72): 'CQT[:, :, :, (0)]'}, {}), '(CQT[:, :, :, (1)], CQT[:, :, :, (0)])', False, 'import torch\n'), ((1112, 35, 1112, 73), 'torch.atan2', 'torch.atan2', ({(1112, 47, 1112, 59): 'CQT[:, :, :, (1)]', (1112, 60, 1112, 72): 'CQT[:, :, :, (0)]'}, {}), '(CQT[:, :, :, (1)], CQT[:, :, :, (0)])', False, 'import torch\n'), ((1249, 75, 1249, 81), 'time.time', 'time', ({}, {}), '()', False, 'from time import time\n'), ((1294, 35, 1294, 65), 'torch.atan2', 'torch.atan2', ({(1294, 47, 1294, 55): 'CQT_imag', (1294, 56, 1294, 64): 'CQT_real'}, {}), '(CQT_imag, CQT_real)', False, 'import torch\n'), ((1295, 35, 1295, 65), 'torch.atan2', 'torch.atan2', ({(1295, 47, 1295, 55): 'CQT_imag', (1295, 56, 1295, 64): 'CQT_real'}, {}), '(CQT_imag, CQT_real)', False, 'import torch\n'), ((1457, 79, 1457, 85), 'time.time', 'time', ({}, {}), '()', False, 'from time import time\n'), ((1536, 75, 1536, 81), 'time.time', 'time', ({}, {}), '()', False, 'from time import time\n'), ((1598, 35, 1598, 73), 'torch.atan2', 'torch.atan2', ({(1598, 47, 1598, 59): 'CQT[:, :, :, (1)]', (1598, 60, 1598, 72): 'CQT[:, :, :, (0)]'}, {}), '(CQT[:, :, :, (1)], CQT[:, :, :, (0)])', False, 'import torch\n'), ((1599, 35, 1599, 73), 'torch.atan2', 'torch.atan2', ({(1599, 47, 1599, 59): 'CQT[:, :, :, (1)]', (1599, 60, 1599, 72): 'CQT[:, :, :, (0)]'}, {}), '(CQT[:, :, :, (1)], CQT[:, :, :, (0)])', False, 'import torch\n'), ((1825, 22, 1825, 47), 'torch.tensor', 'torch.tensor', ({(1825, 35, 1825, 46): 'window_mask'}, {}), '(window_mask)', False, 'import torch\n'), ((1852, 77, 1852, 83), 'time.time', 'time', ({}, {}), '()', False, 'from time import time\n'), ((493, 24, 493, 99), 'torch.zeros', 'torch.zeros', (), '', False, 'import torch\n'), ((984, 63, 984, 69), 'time.time', 'time', ({}, {}), '()', False, 'from time import time\n'), ((1498, 59, 1498, 65), 'time.time', 'time', ({}, {}), '()', False, 'from time import time\n')]
hui-won/KoBART_Project
train.py
105608997473abc669d777c588d56382efb524c6
import argparse import logging import os import numpy as np import pandas as pd import pytorch_lightning as pl import torch from pytorch_lightning import loggers as pl_loggers from torch.utils.data import DataLoader, Dataset from dataset import KoBARTSummaryDataset from transformers import BartForConditionalGeneration, PreTrainedTokenizerFast from transformers.optimization import AdamW, get_cosine_schedule_with_warmup from kobart import get_pytorch_kobart_model, get_kobart_tokenizer parser = argparse.ArgumentParser(description='KoBART translation') parser.add_argument('--checkpoint_path', type=str, help='checkpoint path') logger = logging.getLogger() logger.setLevel(logging.INFO) class ArgsBase(): @staticmethod def add_model_specific_args(parent_parser): parser = argparse.ArgumentParser( parents=[parent_parser], add_help=False) parser.add_argument('--train_file', type=str, default='data/train.tsv', help='train file') parser.add_argument('--test_file', type=str, default='data/test.tsv', help='test file') parser.add_argument('--batch_size', type=int, default=28, help='') parser.add_argument('--max_len', type=int, default=512, help='max seq len') return parser class KobartSummaryModule(pl.LightningDataModule): def __init__(self, train_file, test_file, tok, max_len=512, batch_size=8, num_workers=5): super().__init__() self.batch_size = batch_size self.max_len = max_len self.train_file_path = train_file self.test_file_path = test_file if tok is None: self.tok = get_kobart_tokenizer() else: self.tok = tok self.num_workers = num_workers @staticmethod def add_model_specific_args(parent_parser): parser = argparse.ArgumentParser( parents=[parent_parser], add_help=False) parser.add_argument('--num_workers', type=int, default=5, help='num of worker for dataloader') return parser # OPTIONAL, called for every GPU/machine (assigning state is OK) def setup(self, stage): # split dataset self.train = KoBARTSummaryDataset(self.train_file_path, self.tok, self.max_len) self.test = KoBARTSummaryDataset(self.test_file_path, self.tok, self.max_len) def train_dataloader(self): train = DataLoader(self.train, batch_size=self.batch_size, num_workers=self.num_workers, shuffle=True) return train def val_dataloader(self): val = DataLoader(self.test, batch_size=self.batch_size, num_workers=self.num_workers, shuffle=False) return val def test_dataloader(self): test = DataLoader(self.test, batch_size=self.batch_size, num_workers=self.num_workers, shuffle=False) return test class Base(pl.LightningModule): def __init__(self, hparams, **kwargs) -> None: super(Base, self).__init__() self.hparams = hparams @staticmethod def add_model_specific_args(parent_parser): # add model specific args parser = argparse.ArgumentParser( parents=[parent_parser], add_help=False) parser.add_argument('--batch-size', type=int, default=14, help='batch size for training (default: 96)') parser.add_argument('--lr', type=float, default=3e-5, help='The initial learning rate') parser.add_argument('--warmup_ratio', type=float, default=0.1, help='warmup ratio') parser.add_argument('--model_path', type=str, default=None, help='kobart model path') return parser def configure_optimizers(self): # Prepare optimizer param_optimizer = list(self.model.named_parameters()) no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight'] optimizer_grouped_parameters = [ {'params': [p for n, p in param_optimizer if not any( nd in n for nd in no_decay)], 'weight_decay': 0.01}, {'params': [p for n, p in param_optimizer if any( nd in n for nd in no_decay)], 'weight_decay': 0.0} ] optimizer = AdamW(optimizer_grouped_parameters, lr=self.hparams.lr, correct_bias=False) # warm up lr num_workers = (self.hparams.gpus if self.hparams.gpus is not None else 1) * (self.hparams.num_nodes if self.hparams.num_nodes is not None else 1) data_len = len(self.train_dataloader().dataset) logging.info(f'number of workers {num_workers}, data length {data_len}') num_train_steps = int(data_len / (self.hparams.batch_size * num_workers) * self.hparams.max_epochs) logging.info(f'num_train_steps : {num_train_steps}') num_warmup_steps = int(num_train_steps * self.hparams.warmup_ratio) logging.info(f'num_warmup_steps : {num_warmup_steps}') scheduler = get_cosine_schedule_with_warmup( optimizer, num_warmup_steps=num_warmup_steps, num_training_steps=num_train_steps) lr_scheduler = {'scheduler': scheduler, 'monitor': 'loss', 'interval': 'step', 'frequency': 1} return [optimizer], [lr_scheduler] class KoBARTConditionalGeneration(Base): def __init__(self, hparams, **kwargs): super(KoBARTConditionalGeneration, self).__init__(hparams, **kwargs) self.model = BartForConditionalGeneration.from_pretrained(get_pytorch_kobart_model()) self.model.train() self.bos_token = '<s>' self.eos_token = '</s>' self.pad_token_id = 0 self.tokenizer = get_kobart_tokenizer() def forward(self, inputs): attention_mask = inputs['input_ids'].ne(self.pad_token_id).float() decoder_attention_mask = inputs['decoder_input_ids'].ne(self.pad_token_id).float() return self.model(input_ids=inputs['input_ids'], attention_mask=attention_mask, decoder_input_ids=inputs['decoder_input_ids'], decoder_attention_mask=decoder_attention_mask, labels=inputs['labels'], return_dict=True) def training_step(self, batch, batch_idx): outs = self(batch) loss = outs.loss self.log('train_loss', loss, prog_bar=True) return loss def validation_step(self, batch, batch_idx): outs = self(batch) loss = outs['loss'] return (loss) def validation_epoch_end(self, outputs): losses = [] for loss in outputs: losses.append(loss) self.log('val_loss', torch.stack(losses).mean(), prog_bar=True) if __name__ == '__main__': parser = Base.add_model_specific_args(parser) parser = ArgsBase.add_model_specific_args(parser) parser = KobartSummaryModule.add_model_specific_args(parser) parser = pl.Trainer.add_argparse_args(parser) args = parser.parse_args() logging.info(args) model = KoBARTConditionalGeneration(args) dm = KobartSummaryModule(args.train_file, args.test_file, None, max_len=args.max_len, batch_size=args.batch_size, num_workers=args.num_workers) checkpoint_callback = pl.callbacks.ModelCheckpoint(monitor='val_loss', dirpath=args.default_root_dir, filename='model_chp/{epoch:02d}-{val_loss:.3f}', verbose=True, save_last=True, mode='min', save_top_k=-1, prefix='kobart_translation') tb_logger = pl_loggers.TensorBoardLogger(os.path.join(args.default_root_dir, 'tb_logs')) lr_logger = pl.callbacks.LearningRateMonitor() trainer = pl.Trainer.from_argparse_args(args, logger=tb_logger, callbacks=[checkpoint_callback, lr_logger]) trainer.fit(model, dm)
[((15, 9, 15, 66), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (), '', False, 'import argparse\n'), ((21, 9, 21, 28), 'logging.getLogger', 'logging.getLogger', ({}, {}), '()', False, 'import logging\n'), ((210, 13, 210, 49), 'pytorch_lightning.Trainer.add_argparse_args', 'pl.Trainer.add_argparse_args', ({(210, 42, 210, 48): 'parser'}, {}), '(parser)', True, 'import pytorch_lightning as pl\n'), ((212, 4, 212, 22), 'logging.info', 'logging.info', ({(212, 17, 212, 21): 'args'}, {}), '(args)', False, 'import logging\n'), ((223, 26, 230, 83), 'pytorch_lightning.callbacks.ModelCheckpoint', 'pl.callbacks.ModelCheckpoint', (), '', True, 'import pytorch_lightning as pl\n'), ((232, 16, 232, 50), 'pytorch_lightning.callbacks.LearningRateMonitor', 'pl.callbacks.LearningRateMonitor', ({}, {}), '()', True, 'import pytorch_lightning as pl\n'), ((233, 14, 234, 87), 'pytorch_lightning.Trainer.from_argparse_args', 'pl.Trainer.from_argparse_args', (), '', True, 'import pytorch_lightning as pl\n'), ((28, 17, 29, 52), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (), '', False, 'import argparse\n'), ((70, 17, 71, 52), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (), '', False, 'import argparse\n'), ((81, 21, 83, 46), 'dataset.KoBARTSummaryDataset', 'KoBARTSummaryDataset', ({(81, 42, 81, 62): 'self.train_file_path', (82, 33, 82, 41): 'self.tok', (83, 33, 83, 45): 'self.max_len'}, {}), '(self.train_file_path, self.tok, self.max_len)', False, 'from dataset import KoBARTSummaryDataset\n'), ((84, 20, 86, 45), 'dataset.KoBARTSummaryDataset', 'KoBARTSummaryDataset', ({(84, 41, 84, 60): 'self.test_file_path', (85, 32, 85, 40): 'self.tok', (86, 32, 86, 44): 'self.max_len'}, {}), '(self.test_file_path, self.tok, self.max_len)', False, 'from dataset import KoBARTSummaryDataset\n'), ((89, 16, 91, 70), 'torch.utils.data.DataLoader', 'DataLoader', (), '', False, 'from torch.utils.data import DataLoader, Dataset\n'), ((95, 14, 97, 69), 'torch.utils.data.DataLoader', 'DataLoader', (), '', False, 'from torch.utils.data import DataLoader, Dataset\n'), ((101, 15, 103, 70), 'torch.utils.data.DataLoader', 'DataLoader', (), '', False, 'from torch.utils.data import DataLoader, Dataset\n'), ((115, 17, 116, 52), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (), '', False, 'import argparse\n'), ((149, 20, 150, 65), 'transformers.optimization.AdamW', 'AdamW', (), '', False, 'from transformers.optimization import AdamW, get_cosine_schedule_with_warmup\n'), ((154, 8, 154, 80), 'logging.info', 'logging.info', ({(154, 21, 154, 79): 'f"""number of workers {num_workers}, data length {data_len}"""'}, {}), "(f'number of workers {num_workers}, data length {data_len}')", False, 'import logging\n'), ((156, 8, 156, 60), 'logging.info', 'logging.info', ({(156, 21, 156, 59): 'f"""num_train_steps : {num_train_steps}"""'}, {}), "(f'num_train_steps : {num_train_steps}')", False, 'import logging\n'), ((158, 8, 158, 62), 'logging.info', 'logging.info', ({(158, 21, 158, 61): 'f"""num_warmup_steps : {num_warmup_steps}"""'}, {}), "(f'num_warmup_steps : {num_warmup_steps}')", False, 'import logging\n'), ((159, 20, 161, 82), 'transformers.optimization.get_cosine_schedule_with_warmup', 'get_cosine_schedule_with_warmup', (), '', False, 'from transformers.optimization import AdamW, get_cosine_schedule_with_warmup\n'), ((176, 25, 176, 47), 'kobart.get_kobart_tokenizer', 'get_kobart_tokenizer', ({}, {}), '()', False, 'from kobart import get_pytorch_kobart_model, get_kobart_tokenizer\n'), ((231, 45, 231, 91), 'os.path.join', 'os.path.join', ({(231, 58, 231, 79): 'args.default_root_dir', (231, 81, 231, 90): '"""tb_logs"""'}, {}), "(args.default_root_dir, 'tb_logs')", False, 'import os\n'), ((63, 23, 63, 45), 'kobart.get_kobart_tokenizer', 'get_kobart_tokenizer', ({}, {}), '()', False, 'from kobart import get_pytorch_kobart_model, get_kobart_tokenizer\n'), ((171, 66, 171, 92), 'kobart.get_pytorch_kobart_model', 'get_pytorch_kobart_model', ({}, {}), '()', False, 'from kobart import get_pytorch_kobart_model, get_kobart_tokenizer\n'), ((204, 29, 204, 48), 'torch.stack', 'torch.stack', ({(204, 41, 204, 47): 'losses'}, {}), '(losses)', False, 'import torch\n')]
RavensburgOP/core
homeassistant/components/shelly/sensor.py
0ea76e848b182ca0ebb0fdb54558f7f733898ad7
"""Sensor for Shelly.""" from __future__ import annotations from datetime import timedelta import logging from typing import Final, cast import aioshelly from homeassistant.components import sensor from homeassistant.components.sensor import SensorEntity from homeassistant.config_entries import ConfigEntry from homeassistant.const import ( CONCENTRATION_PARTS_PER_MILLION, DEGREE, ELECTRIC_CURRENT_AMPERE, ELECTRIC_POTENTIAL_VOLT, ENERGY_KILO_WATT_HOUR, LIGHT_LUX, PERCENTAGE, POWER_WATT, SIGNAL_STRENGTH_DECIBELS_MILLIWATT, ) from homeassistant.core import HomeAssistant from homeassistant.helpers.entity_platform import AddEntitiesCallback from homeassistant.helpers.typing import StateType from homeassistant.util import dt from . import ShellyDeviceWrapper from .const import LAST_RESET_NEVER, LAST_RESET_UPTIME, SHAIR_MAX_WORK_HOURS from .entity import ( BlockAttributeDescription, RestAttributeDescription, ShellyBlockAttributeEntity, ShellyRestAttributeEntity, ShellySleepingBlockAttributeEntity, async_setup_entry_attribute_entities, async_setup_entry_rest, ) from .utils import get_device_uptime, temperature_unit _LOGGER: Final = logging.getLogger(__name__) SENSORS: Final = { ("device", "battery"): BlockAttributeDescription( name="Battery", unit=PERCENTAGE, device_class=sensor.DEVICE_CLASS_BATTERY, state_class=sensor.STATE_CLASS_MEASUREMENT, removal_condition=lambda settings, _: settings.get("external_power") == 1, ), ("device", "deviceTemp"): BlockAttributeDescription( name="Device Temperature", unit=temperature_unit, value=lambda value: round(value, 1), device_class=sensor.DEVICE_CLASS_TEMPERATURE, state_class=sensor.STATE_CLASS_MEASUREMENT, default_enabled=False, ), ("emeter", "current"): BlockAttributeDescription( name="Current", unit=ELECTRIC_CURRENT_AMPERE, value=lambda value: value, device_class=sensor.DEVICE_CLASS_CURRENT, state_class=sensor.STATE_CLASS_MEASUREMENT, ), ("light", "power"): BlockAttributeDescription( name="Power", unit=POWER_WATT, value=lambda value: round(value, 1), device_class=sensor.DEVICE_CLASS_POWER, state_class=sensor.STATE_CLASS_MEASUREMENT, default_enabled=False, ), ("device", "power"): BlockAttributeDescription( name="Power", unit=POWER_WATT, value=lambda value: round(value, 1), device_class=sensor.DEVICE_CLASS_POWER, state_class=sensor.STATE_CLASS_MEASUREMENT, ), ("emeter", "power"): BlockAttributeDescription( name="Power", unit=POWER_WATT, value=lambda value: round(value, 1), device_class=sensor.DEVICE_CLASS_POWER, state_class=sensor.STATE_CLASS_MEASUREMENT, ), ("emeter", "voltage"): BlockAttributeDescription( name="Voltage", unit=ELECTRIC_POTENTIAL_VOLT, value=lambda value: round(value, 1), device_class=sensor.DEVICE_CLASS_VOLTAGE, state_class=sensor.STATE_CLASS_MEASUREMENT, ), ("emeter", "powerFactor"): BlockAttributeDescription( name="Power Factor", unit=PERCENTAGE, value=lambda value: round(value * 100, 1), device_class=sensor.DEVICE_CLASS_POWER_FACTOR, state_class=sensor.STATE_CLASS_MEASUREMENT, ), ("relay", "power"): BlockAttributeDescription( name="Power", unit=POWER_WATT, value=lambda value: round(value, 1), device_class=sensor.DEVICE_CLASS_POWER, state_class=sensor.STATE_CLASS_MEASUREMENT, ), ("roller", "rollerPower"): BlockAttributeDescription( name="Power", unit=POWER_WATT, value=lambda value: round(value, 1), device_class=sensor.DEVICE_CLASS_POWER, state_class=sensor.STATE_CLASS_MEASUREMENT, ), ("device", "energy"): BlockAttributeDescription( name="Energy", unit=ENERGY_KILO_WATT_HOUR, value=lambda value: round(value / 60 / 1000, 2), device_class=sensor.DEVICE_CLASS_ENERGY, state_class=sensor.STATE_CLASS_MEASUREMENT, last_reset=LAST_RESET_UPTIME, ), ("emeter", "energy"): BlockAttributeDescription( name="Energy", unit=ENERGY_KILO_WATT_HOUR, value=lambda value: round(value / 1000, 2), device_class=sensor.DEVICE_CLASS_ENERGY, state_class=sensor.STATE_CLASS_MEASUREMENT, last_reset=LAST_RESET_NEVER, ), ("emeter", "energyReturned"): BlockAttributeDescription( name="Energy Returned", unit=ENERGY_KILO_WATT_HOUR, value=lambda value: round(value / 1000, 2), device_class=sensor.DEVICE_CLASS_ENERGY, state_class=sensor.STATE_CLASS_MEASUREMENT, last_reset=LAST_RESET_NEVER, ), ("light", "energy"): BlockAttributeDescription( name="Energy", unit=ENERGY_KILO_WATT_HOUR, value=lambda value: round(value / 60 / 1000, 2), device_class=sensor.DEVICE_CLASS_ENERGY, state_class=sensor.STATE_CLASS_MEASUREMENT, default_enabled=False, last_reset=LAST_RESET_UPTIME, ), ("relay", "energy"): BlockAttributeDescription( name="Energy", unit=ENERGY_KILO_WATT_HOUR, value=lambda value: round(value / 60 / 1000, 2), device_class=sensor.DEVICE_CLASS_ENERGY, state_class=sensor.STATE_CLASS_MEASUREMENT, last_reset=LAST_RESET_UPTIME, ), ("roller", "rollerEnergy"): BlockAttributeDescription( name="Energy", unit=ENERGY_KILO_WATT_HOUR, value=lambda value: round(value / 60 / 1000, 2), device_class=sensor.DEVICE_CLASS_ENERGY, state_class=sensor.STATE_CLASS_MEASUREMENT, last_reset=LAST_RESET_UPTIME, ), ("sensor", "concentration"): BlockAttributeDescription( name="Gas Concentration", unit=CONCENTRATION_PARTS_PER_MILLION, icon="mdi:gauge", state_class=sensor.STATE_CLASS_MEASUREMENT, ), ("sensor", "extTemp"): BlockAttributeDescription( name="Temperature", unit=temperature_unit, value=lambda value: round(value, 1), device_class=sensor.DEVICE_CLASS_TEMPERATURE, state_class=sensor.STATE_CLASS_MEASUREMENT, available=lambda block: cast(bool, block.extTemp != 999), ), ("sensor", "humidity"): BlockAttributeDescription( name="Humidity", unit=PERCENTAGE, value=lambda value: round(value, 1), device_class=sensor.DEVICE_CLASS_HUMIDITY, state_class=sensor.STATE_CLASS_MEASUREMENT, available=lambda block: cast(bool, block.extTemp != 999), ), ("sensor", "luminosity"): BlockAttributeDescription( name="Luminosity", unit=LIGHT_LUX, device_class=sensor.DEVICE_CLASS_ILLUMINANCE, state_class=sensor.STATE_CLASS_MEASUREMENT, ), ("sensor", "tilt"): BlockAttributeDescription( name="Tilt", unit=DEGREE, icon="mdi:angle-acute", state_class=sensor.STATE_CLASS_MEASUREMENT, ), ("relay", "totalWorkTime"): BlockAttributeDescription( name="Lamp Life", unit=PERCENTAGE, icon="mdi:progress-wrench", value=lambda value: round(100 - (value / 3600 / SHAIR_MAX_WORK_HOURS), 1), extra_state_attributes=lambda block: { "Operational hours": round(block.totalWorkTime / 3600, 1) }, ), ("adc", "adc"): BlockAttributeDescription( name="ADC", unit=ELECTRIC_POTENTIAL_VOLT, value=lambda value: round(value, 1), device_class=sensor.DEVICE_CLASS_VOLTAGE, state_class=sensor.STATE_CLASS_MEASUREMENT, ), ("sensor", "sensorOp"): BlockAttributeDescription( name="Operation", icon="mdi:cog-transfer", value=lambda value: value, extra_state_attributes=lambda block: {"self_test": block.selfTest}, ), } REST_SENSORS: Final = { "rssi": RestAttributeDescription( name="RSSI", unit=SIGNAL_STRENGTH_DECIBELS_MILLIWATT, value=lambda status, _: status["wifi_sta"]["rssi"], device_class=sensor.DEVICE_CLASS_SIGNAL_STRENGTH, state_class=sensor.STATE_CLASS_MEASUREMENT, default_enabled=False, ), "uptime": RestAttributeDescription( name="Uptime", value=get_device_uptime, device_class=sensor.DEVICE_CLASS_TIMESTAMP, default_enabled=False, ), } async def async_setup_entry( hass: HomeAssistant, config_entry: ConfigEntry, async_add_entities: AddEntitiesCallback, ) -> None: """Set up sensors for device.""" if config_entry.data["sleep_period"]: await async_setup_entry_attribute_entities( hass, config_entry, async_add_entities, SENSORS, ShellySleepingSensor ) else: await async_setup_entry_attribute_entities( hass, config_entry, async_add_entities, SENSORS, ShellySensor ) await async_setup_entry_rest( hass, config_entry, async_add_entities, REST_SENSORS, ShellyRestSensor ) class ShellySensor(ShellyBlockAttributeEntity, SensorEntity): """Represent a shelly sensor.""" def __init__( self, wrapper: ShellyDeviceWrapper, block: aioshelly.Block, attribute: str, description: BlockAttributeDescription, ) -> None: """Initialize sensor.""" super().__init__(wrapper, block, attribute, description) self._last_value: float | None = None if description.last_reset == LAST_RESET_NEVER: self._attr_last_reset = dt.utc_from_timestamp(0) elif description.last_reset == LAST_RESET_UPTIME: self._attr_last_reset = ( dt.utcnow() - timedelta(seconds=wrapper.device.status["uptime"]) ).replace(second=0, microsecond=0) @property def state(self) -> StateType: """Return value of sensor.""" if ( self.description.last_reset == LAST_RESET_UPTIME and self.attribute_value is not None ): value = cast(float, self.attribute_value) if self._last_value and self._last_value > value: self._attr_last_reset = dt.utcnow().replace(second=0, microsecond=0) _LOGGER.info("Energy reset detected for entity %s", self.name) self._last_value = value return self.attribute_value @property def state_class(self) -> str | None: """State class of sensor.""" return self.description.state_class @property def unit_of_measurement(self) -> str | None: """Return unit of sensor.""" return cast(str, self._unit) class ShellyRestSensor(ShellyRestAttributeEntity, SensorEntity): """Represent a shelly REST sensor.""" @property def state(self) -> StateType: """Return value of sensor.""" return self.attribute_value @property def state_class(self) -> str | None: """State class of sensor.""" return self.description.state_class @property def unit_of_measurement(self) -> str | None: """Return unit of sensor.""" return self.description.unit class ShellySleepingSensor(ShellySleepingBlockAttributeEntity, SensorEntity): """Represent a shelly sleeping sensor.""" @property def state(self) -> StateType: """Return value of sensor.""" if self.block is not None: return self.attribute_value return self.last_state @property def state_class(self) -> str | None: """State class of sensor.""" return self.description.state_class @property def unit_of_measurement(self) -> str | None: """Return unit of sensor.""" return cast(str, self._unit)
[((42, 17, 42, 44), 'logging.getLogger', 'logging.getLogger', ({(42, 35, 42, 43): '__name__'}, {}), '(__name__)', False, 'import logging\n'), ((307, 15, 307, 36), 'typing.cast', 'cast', ({(307, 20, 307, 23): 'str', (307, 25, 307, 35): 'self._unit'}, {}), '(str, self._unit)', False, 'from typing import Final, cast\n'), ((348, 15, 348, 36), 'typing.cast', 'cast', ({(348, 20, 348, 23): 'str', (348, 25, 348, 35): 'self._unit'}, {}), '(str, self._unit)', False, 'from typing import Final, cast\n'), ((276, 36, 276, 60), 'homeassistant.util.dt.utc_from_timestamp', 'dt.utc_from_timestamp', ({(276, 58, 276, 59): '0'}, {}), '(0)', False, 'from homeassistant.util import dt\n'), ((289, 20, 289, 53), 'typing.cast', 'cast', ({(289, 25, 289, 30): 'float', (289, 32, 289, 52): 'self.attribute_value'}, {}), '(float, self.attribute_value)', False, 'from typing import Final, cast\n'), ((178, 32, 178, 64), 'typing.cast', 'cast', ({(178, 37, 178, 41): 'bool', (178, 43, 178, 63): '(block.extTemp != 999)'}, {}), '(bool, block.extTemp != 999)', False, 'from typing import Final, cast\n'), ((186, 32, 186, 64), 'typing.cast', 'cast', ({(186, 37, 186, 41): 'bool', (186, 43, 186, 63): '(block.extTemp != 999)'}, {}), '(bool, block.extTemp != 999)', False, 'from typing import Final, cast\n'), ((292, 40, 292, 51), 'homeassistant.util.dt.utcnow', 'dt.utcnow', ({}, {}), '()', False, 'from homeassistant.util import dt\n'), ((279, 16, 279, 27), 'homeassistant.util.dt.utcnow', 'dt.utcnow', ({}, {}), '()', False, 'from homeassistant.util import dt\n'), ((279, 30, 279, 80), 'datetime.timedelta', 'timedelta', (), '', False, 'from datetime import timedelta\n')]
zcqian/biothings.api
tests/web/config.py
61c0300317cf2ac7db8310b5b5741ad9b08c4163
""" Web settings to override for testing. """ import os from biothings.web.settings.default import QUERY_KWARGS # ***************************************************************************** # Elasticsearch Variables # ***************************************************************************** ES_INDEX = 'bts_test' ES_DOC_TYPE = 'gene' ES_SCROLL_SIZE = 60 # ***************************************************************************** # User Input Control # ***************************************************************************** # use a smaller size for testing QUERY_KWARGS['GET']['facet_size']['default'] = 3 QUERY_KWARGS['GET']['facet_size']['max'] = 5 QUERY_KWARGS['POST']['q']['jsoninput'] = True # ***************************************************************************** # Elasticsearch Query Builder # ***************************************************************************** ALLOW_RANDOM_QUERY = True ALLOW_NESTED_AGGS = True USERQUERY_DIR = os.path.join(os.path.dirname(__file__), 'userquery') # ***************************************************************************** # Endpoints Specifics # ***************************************************************************** STATUS_CHECK = { 'id': '1017', 'index': 'bts_test', 'doc_type': '_all' }
[((28, 29, 28, 54), 'os.path.dirname', 'os.path.dirname', ({(28, 45, 28, 53): '__file__'}, {}), '(__file__)', False, 'import os\n')]
rocheparadox/InvenTree
InvenTree/InvenTree/management/commands/rebuild_thumbnails.py
76c1e936db78424e0d6953c4062eb32863e302c6
""" Custom management command to rebuild thumbnail images - May be required after importing a new dataset, for example """ import os import logging from PIL import UnidentifiedImageError from django.core.management.base import BaseCommand from django.conf import settings from django.db.utils import OperationalError, ProgrammingError from company.models import Company from part.models import Part logger = logging.getLogger("inventree-thumbnails") class Command(BaseCommand): """ Rebuild all thumbnail images """ def rebuild_thumbnail(self, model): """ Rebuild the thumbnail specified by the "image" field of the provided model """ if not model.image: return img = model.image url = img.thumbnail.name loc = os.path.join(settings.MEDIA_ROOT, url) if not os.path.exists(loc): logger.info(f"Generating thumbnail image for '{img}'") try: model.image.render_variations(replace=False) except FileNotFoundError: logger.error(f"ERROR: Image file '{img}' is missing") except UnidentifiedImageError: logger.error(f"ERROR: Image file '{img}' is not a valid image") def handle(self, *args, **kwargs): logger.setLevel(logging.INFO) logger.info("Rebuilding Part thumbnails") for part in Part.objects.exclude(image=None): try: self.rebuild_thumbnail(part) except (OperationalError, ProgrammingError): logger.error("ERROR: Database read error.") break logger.info("Rebuilding Company thumbnails") for company in Company.objects.exclude(image=None): try: self.rebuild_thumbnail(company) except (OperationalError, ProgrammingError): logger.error("ERROR: abase read error.") break
[((20, 9, 20, 50), 'logging.getLogger', 'logging.getLogger', ({(20, 27, 20, 49): '"""inventree-thumbnails"""'}, {}), "('inventree-thumbnails')", False, 'import logging\n'), ((38, 14, 38, 52), 'os.path.join', 'os.path.join', ({(38, 27, 38, 46): 'settings.MEDIA_ROOT', (38, 48, 38, 51): 'url'}, {}), '(settings.MEDIA_ROOT, url)', False, 'import os\n'), ((56, 20, 56, 52), 'part.models.Part.objects.exclude', 'Part.objects.exclude', (), '', False, 'from part.models import Part\n'), ((65, 23, 65, 58), 'company.models.Company.objects.exclude', 'Company.objects.exclude', (), '', False, 'from company.models import Company\n'), ((40, 15, 40, 34), 'os.path.exists', 'os.path.exists', ({(40, 30, 40, 33): 'loc'}, {}), '(loc)', False, 'import os\n')]
Baracchino-Della-Scuola/Bot
cogs/carbon.py
65c1ef37ca9eae5d104de7d7de5cc58cc138402d
import discord from discord.ext import commands import urllib.parse from .constants import themes, controls, languages, fonts, escales import os from pathlib import Path from typing import Any # from pyppeteer import launch from io import * import requests def encode_url(text: str) -> str: first_encoding = urllib.parse.quote(text, safe="*()") return urllib.parse.quote(first_encoding, safe="*") # Carbonsh encodes text twice def hex_to_rgb(hex: str) -> tuple: """ Args: hex (str): """ return tuple(int(hex.lstrip("#")[i : i + 2], 16) for i in (0, 2, 4)) def parse_bg(background) -> str: if background == "": return "rgba(171, 184, 195, 1)" elif background[0] == "#" or "(" not in background: return f"rgba{hex_to_rgb(background) + (1,)}" return background def int_to_px(number) -> str: return f"{number}px" def int_to_percent(number) -> str: return f"{number}%" def trim_url(text: str) -> str: if len(text) < 2000: return text if "%25" not in text: return text[:2000] if text[:2003][:-3] == "%25": return text[:2000] last_percent = text[:2000].rindex("%25") return text[:last_percent] _carbon_url = "https://carbonnowsh.herokuapp.com/" def code_to_url(code: str) -> str: return f"{_carbon_url}?&code={trim_url(encode_url(code))}" class Carbon(commands.Cog): def __init__(self, bot): self.bot = bot @commands.command() async def carbonate(self, ctx, *, code): carbon_url = code_to_url(code) r = requests.get(carbon_url) b = BytesIO(r.content) await ctx.send(file=discord.File(fp=b, filename="code.png")) async def setup(bot): await bot.add_cog(Carbon(bot))
[((68, 5, 68, 23), 'discord.ext.commands.command', 'commands.command', ({}, {}), '()', False, 'from discord.ext import commands\n'), ((72, 12, 72, 36), 'requests.get', 'requests.get', ({(72, 25, 72, 35): 'carbon_url'}, {}), '(carbon_url)', False, 'import requests\n'), ((75, 28, 75, 67), 'discord.File', 'discord.File', (), '', False, 'import discord\n')]
jimcortez/spotipy_twisted
examples/show_artist.py
49ff2a4a5a5a9b3184b22adbe068eb91a38f3102
# shows artist info for a URN or URL import spotipy_twisted import sys import pprint if len(sys.argv) > 1: urn = sys.argv[1] else: urn = 'spotify:artist:3jOstUTkEu2JkjvRdBA5Gu' sp = spotipy_twisted.Spotify() artist = sp.artist(urn) pprint.pprint(artist)
[((12, 5, 12, 30), 'spotipy_twisted.Spotify', 'spotipy_twisted.Spotify', ({}, {}), '()', False, 'import spotipy_twisted\n'), ((16, 0, 16, 21), 'pprint.pprint', 'pprint.pprint', ({(16, 14, 16, 20): 'artist'}, {}), '(artist)', False, 'import pprint\n')]
sommersoft/Adafruit_CircuitPython_MCP3xxx
examples/mcp3xxx_mcp3002_single_ended_simpletest.py
94088a7e2b30f1b34e8a5fd7076075d88aad460b
import busio import digitalio import board import adafruit_mcp3xxx.mcp3002 as MCP from adafruit_mcp3xxx.analog_in import AnalogIn # create the spi bus spi = busio.SPI(clock=board.SCK, MISO=board.MISO, MOSI=board.MOSI) # create the cs (chip select) cs = digitalio.DigitalInOut(board.D5) # create the mcp object mcp = MCP.MCP3002(spi, cs) # create an analog input channel on pin 0 chan = AnalogIn(mcp, MCP.P0) print("Raw ADC Value: ", chan.value) print("ADC Voltage: " + str(chan.voltage) + "V")
[((8, 6, 8, 66), 'busio.SPI', 'busio.SPI', (), '', False, 'import busio\n'), ((11, 5, 11, 37), 'digitalio.DigitalInOut', 'digitalio.DigitalInOut', ({(11, 28, 11, 36): 'board.D5'}, {}), '(board.D5)', False, 'import digitalio\n'), ((14, 6, 14, 26), 'adafruit_mcp3xxx.mcp3002.MCP3002', 'MCP.MCP3002', ({(14, 18, 14, 21): 'spi', (14, 23, 14, 25): 'cs'}, {}), '(spi, cs)', True, 'import adafruit_mcp3xxx.mcp3002 as MCP\n'), ((17, 7, 17, 28), 'adafruit_mcp3xxx.analog_in.AnalogIn', 'AnalogIn', ({(17, 16, 17, 19): 'mcp', (17, 21, 17, 27): 'MCP.P0'}, {}), '(mcp, MCP.P0)', False, 'from adafruit_mcp3xxx.analog_in import AnalogIn\n')]
rosteen/glue
glue/core/data_factories/tables.py
ed71979f8e0e41f993a2363b3b5a8f8c3167a130
from glue.core.data_factories.helpers import has_extension from glue.config import data_factory __all__ = ['tabular_data'] @data_factory(label="ASCII Table", identifier=has_extension('csv txt tsv tbl dat ' 'csv.gz txt.gz tbl.bz ' 'dat.gz'), priority=1) def tabular_data(path, **kwargs): from glue.core.data_factories.astropy_table import astropy_tabular_data from glue.core.data_factories.pandas import pandas_read_table for fac in [astropy_tabular_data, pandas_read_table]: try: return fac(path, **kwargs) except Exception: pass else: raise IOError("Could not parse file: %s" % path)
[((9, 25, 11, 48), 'glue.core.data_factories.helpers.has_extension', 'has_extension', ({(9, 39, 11, 47): '"""csv txt tsv tbl dat csv.gz txt.gz tbl.bz dat.gz"""'}, {}), "('csv txt tsv tbl dat csv.gz txt.gz tbl.bz dat.gz')", False, 'from glue.core.data_factories.helpers import has_extension\n')]
coordt/code_doc
code_doc/views/author_views.py
c2fac64ac3ad61952a2d9f036727166741f9aff9
from django.shortcuts import render from django.http import Http404 from django.views.generic.edit import UpdateView from django.views.generic import ListView, View from django.contrib.auth.decorators import login_required from django.contrib.auth.models import User from django.utils.decorators import method_decorator import logging from ..models.projects import Project from ..models.authors import Author from ..forms import AuthorForm from .permission_helpers import PermissionOnObjectViewMixin # logger for this file logger = logging.getLogger(__name__) class AuthorListView(ListView): """A generic view of the authors in a list""" paginate_by = 10 template_name = "code_doc/authors/author_list.html" context_object_name = "authors" model = Author def detail_author(request, author_id): try: author = Author.objects.get(pk=author_id) except Author.DoesNotExist: raise Http404 project_list = Project.objects.filter(authors=author) coauthor_list = ( Author.objects.filter(project__in=project_list).distinct().exclude(pk=author_id) ) return render( request, "code_doc/authors/author_details.html", { "project_list": project_list, "author": author, "user": request.user, "coauthor_list": coauthor_list, }, ) class AuthorUpdateView(PermissionOnObjectViewMixin, UpdateView): """View for editing information about an Author .. note:: in order to be able to edit an Author, the user should have the 'code_doc.author_edit' permission on the Author object. """ form_class = AuthorForm model = Author permissions_on_object = ("code_doc.author_edit",) permissions_object_getter = "get_author_from_request" template_name = "code_doc/authors/author_edit.html" pk_url_kwarg = "author_id" def get_author_from_request(self, request, *args, **kwargs): # TODO check if needed try: return Author.objects.get(pk=kwargs["author_id"]) except Author.DoesNotExist: logger.warning( "[AuthorUpdateView] non existent Author with id %s", kwargs["author_id"] ) return None class MaintainerProfileView(View): """Manages the views associated to the maintainers""" @method_decorator(login_required) def get(self, request, maintainer_id): try: maintainer = User.objects.get(pk=maintainer_id) except Project.DoesNotExist: raise Http404 projects = Project.objects.filter(administrators=maintainer) return render( request, "code_doc/maintainer_details.html", {"projects": projects, "maintainer": maintainer}, ) @method_decorator(login_required) def post(self, request): pass
[((19, 9, 19, 36), 'logging.getLogger', 'logging.getLogger', ({(19, 27, 19, 35): '__name__'}, {}), '(__name__)', False, 'import logging\n'), ((42, 11, 51, 5), 'django.shortcuts.render', 'render', ({(43, 8, 43, 15): 'request', (44, 8, 44, 46): '"""code_doc/authors/author_details.html"""', (45, 8, 50, 9): "{'project_list': project_list, 'author': author, 'user': request.user,\n 'coauthor_list': coauthor_list}"}, {}), "(request, 'code_doc/authors/author_details.html', {'project_list':\n project_list, 'author': author, 'user': request.user, 'coauthor_list':\n coauthor_list})", False, 'from django.shortcuts import render\n'), ((85, 5, 85, 37), 'django.utils.decorators.method_decorator', 'method_decorator', ({(85, 22, 85, 36): 'login_required'}, {}), '(login_required)', False, 'from django.utils.decorators import method_decorator\n'), ((99, 5, 99, 37), 'django.utils.decorators.method_decorator', 'method_decorator', ({(99, 22, 99, 36): 'login_required'}, {}), '(login_required)', False, 'from django.utils.decorators import method_decorator\n'), ((93, 15, 97, 9), 'django.shortcuts.render', 'render', ({(94, 12, 94, 19): 'request', (95, 12, 95, 46): '"""code_doc/maintainer_details.html"""', (96, 12, 96, 60): "{'projects': projects, 'maintainer': maintainer}"}, {}), "(request, 'code_doc/maintainer_details.html', {'projects': projects,\n 'maintainer': maintainer})", False, 'from django.shortcuts import render\n'), ((88, 25, 88, 59), 'django.contrib.auth.models.User.objects.get', 'User.objects.get', (), '', False, 'from django.contrib.auth.models import User\n')]
rehosting/rehosting_sok
d00dfeed/analyses/print_sloc_per_soc.py
499b625c8aa60020f311df97a6253820982f20d4
# External deps import os, sys, json from pathlib import Path from typing import Dict, List # Internal deps os.chdir(sys.path[0]) sys.path.append("..") import df_common as dfc import analyses_common as ac # Generated files directory GEN_FILE_DIR = str(Path(__file__).resolve().parent.parent) + os.sep + "generated_files" # TODO: ugly parent.parent pathing if os.path.exists(GEN_FILE_DIR): sys.path.append(GEN_FILE_DIR) if os.path.exists(os.path.join(GEN_FILE_DIR, "sloc_cnt.py")): from sloc_cnt import DRIVER_NAME_TO_SLOC else: print("Error: no SLOC file! Run \'df_analyze.py\' with \'--linux-src-dir\'") sys.exit(1) if __name__ == "__main__": json_files = ac.argparse_and_get_files("Graph SLOC/SoC data") soc_sloc_by_arch: Dict[str, List[int]] = {} print("Gathering SLOC average by arch...") from graph_dd_sloc_by_arch import get_sloc_avg_and_list_by_arch cmp_by_arch = ac.build_dict_two_lvl_cnt(json_files, dfc.JSON_ARC, dfc.JSON_CMP_STR) avg_sloc_by_arch, sloc_list_by_arch = get_sloc_avg_and_list_by_arch(cmp_by_arch, verbose = False) # Collection print("Iterating DTBs/SoCs...") for dtb_json in json_files: with open(dtb_json) as json_file: data = json.load(json_file) soc_sloc = 0 arch = data[dfc.JSON_ARC] cmp_strs = data[dfc.JSON_CMP_STR] # Total SLOC for this SoC for cmp_str in cmp_strs: driver_sloc = dfc.cmp_str_to_sloc(cmp_str) if not driver_sloc: # Closed-source driver driver_sloc = avg_sloc_by_arch[arch] soc_sloc += driver_sloc #print("{}: {}".format(cmp_str, driver_sloc)) if arch not in soc_sloc_by_arch: soc_sloc_by_arch[arch] = [] else: soc_sloc_by_arch[arch].append(soc_sloc) print("{} ({}): {}".format(dtb_json.split(os.sep)[-1], arch, soc_sloc)) # Final stats ac.print_mean_median_std_dev_for_dict_of_lists(soc_sloc_by_arch, "\nSloc Per Soc, format: [arch : (mean, median, std_dev)]\n")
[((7, 0, 7, 21), 'os.chdir', 'os.chdir', ({(7, 9, 7, 20): 'sys.path[0]'}, {}), '(sys.path[0])', False, 'import os, sys, json\n'), ((8, 0, 8, 21), 'sys.path.append', 'sys.path.append', ({(8, 16, 8, 20): '""".."""'}, {}), "('..')", False, 'import os, sys, json\n'), ((14, 3, 14, 31), 'os.path.exists', 'os.path.exists', ({(14, 18, 14, 30): 'GEN_FILE_DIR'}, {}), '(GEN_FILE_DIR)', False, 'import os, sys, json\n'), ((15, 4, 15, 33), 'sys.path.append', 'sys.path.append', ({(15, 20, 15, 32): 'GEN_FILE_DIR'}, {}), '(GEN_FILE_DIR)', False, 'import os, sys, json\n'), ((20, 4, 20, 15), 'sys.exit', 'sys.exit', ({(20, 13, 20, 14): '(1)'}, {}), '(1)', False, 'import os, sys, json\n'), ((24, 17, 24, 65), 'analyses_common.argparse_and_get_files', 'ac.argparse_and_get_files', ({(24, 43, 24, 64): '"""Graph SLOC/SoC data"""'}, {}), "('Graph SLOC/SoC data')", True, 'import analyses_common as ac\n'), ((29, 18, 29, 87), 'analyses_common.build_dict_two_lvl_cnt', 'ac.build_dict_two_lvl_cnt', ({(29, 44, 29, 54): 'json_files', (29, 56, 29, 68): 'dfc.JSON_ARC', (29, 70, 29, 86): 'dfc.JSON_CMP_STR'}, {}), '(json_files, dfc.JSON_ARC, dfc.JSON_CMP_STR)', True, 'import analyses_common as ac\n'), ((30, 42, 30, 101), 'graph_dd_sloc_by_arch.get_sloc_avg_and_list_by_arch', 'get_sloc_avg_and_list_by_arch', (), '', False, 'from graph_dd_sloc_by_arch import get_sloc_avg_and_list_by_arch\n'), ((59, 4, 60, 69), 'analyses_common.print_mean_median_std_dev_for_dict_of_lists', 'ac.print_mean_median_std_dev_for_dict_of_lists', ({(59, 51, 59, 67): 'soc_sloc_by_arch', (60, 8, 60, 68): '"""\nSloc Per Soc, format: [arch : (mean, median, std_dev)]\n"""'}, {}), '(soc_sloc_by_arch,\n """\nSloc Per Soc, format: [arch : (mean, median, std_dev)]\n""")', True, 'import analyses_common as ac\n'), ((16, 22, 16, 63), 'os.path.join', 'os.path.join', ({(16, 35, 16, 47): 'GEN_FILE_DIR', (16, 49, 16, 62): '"""sloc_cnt.py"""'}, {}), "(GEN_FILE_DIR, 'sloc_cnt.py')", False, 'import os, sys, json\n'), ((37, 19, 37, 39), 'json.load', 'json.load', ({(37, 29, 37, 38): 'json_file'}, {}), '(json_file)', False, 'import os, sys, json\n'), ((45, 26, 45, 54), 'df_common.cmp_str_to_sloc', 'dfc.cmp_str_to_sloc', ({(45, 46, 45, 53): 'cmp_str'}, {}), '(cmp_str)', True, 'import df_common as dfc\n'), ((13, 19, 13, 33), 'pathlib.Path', 'Path', ({(13, 24, 13, 32): '__file__'}, {}), '(__file__)', False, 'from pathlib import Path\n')]
asigalov61/minGPT
mingpt/lr_decay.py
b4f8d57aaf1bb5c64d480f8005b73d39b075ae4b
import math import pytorch_lightning as pl class LearningRateDecayCallback(pl.Callback): def __init__(self, learning_rate, warmup_tokens=375e6, final_tokens=260e9, lr_decay=True): super().__init__() self.learning_rate = learning_rate self.tokens = 0 self.final_tokens = final_tokens self.lr_decay = lr_decay self.warmup_tokens = warmup_tokens def on_train_batch_end(self, trainer, pl_module, batch, batch_idx, dataloader_idx): optimizer = trainer.optimizers[0] _, y = batch if self.lr_decay: self.tokens += (y >= 0).sum() # number of tokens processed this step (i.e. label is not -100) if self.tokens < self.warmup_tokens: # linear warmup lr_mult = float(self.tokens) / float(max(1, self.warmup_tokens)) else: # cosine learning rate decay progress = float(self.tokens - self.warmup_tokens) / float( max(1, self.final_tokens - self.warmup_tokens)) lr_mult = max(0.1, 0.5 * (1.0 + math.cos(math.pi * progress))) lr = self.learning_rate * lr_mult for param_group in optimizer.param_groups: param_group['lr'] = lr
[((28, 48, 28, 76), 'math.cos', 'math.cos', ({(28, 57, 28, 75): 'math.pi * progress'}, {}), '(math.pi * progress)', False, 'import math\n')]
calvinbui/apprise
apprise/config/ConfigBase.py
a5510790baf5aa1d74afabab25ff57d6b2304d56
# -*- coding: utf-8 -*- # # Copyright (C) 2020 Chris Caron <[email protected]> # All rights reserved. # # This code is licensed under the MIT License. # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files(the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and / or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions : # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. import os import re import six import yaml import time from .. import plugins from ..AppriseAsset import AppriseAsset from ..URLBase import URLBase from ..common import ConfigFormat from ..common import CONFIG_FORMATS from ..common import ContentIncludeMode from ..utils import GET_SCHEMA_RE from ..utils import parse_list from ..utils import parse_bool from ..utils import parse_urls from . import SCHEMA_MAP # Test whether token is valid or not VALID_TOKEN = re.compile( r'(?P<token>[a-z0-9][a-z0-9_]+)', re.I) class ConfigBase(URLBase): """ This is the base class for all supported configuration sources """ # The Default Encoding to use if not otherwise detected encoding = 'utf-8' # The default expected configuration format unless otherwise # detected by the sub-modules default_config_format = ConfigFormat.TEXT # This is only set if the user overrides the config format on the URL # this should always initialize itself as None config_format = None # Don't read any more of this amount of data into memory as there is no # reason we should be reading in more. This is more of a safe guard then # anything else. 128KB (131072B) max_buffer_size = 131072 # By default all configuration is not includable using the 'include' # line found in configuration files. allow_cross_includes = ContentIncludeMode.NEVER # the config path manages the handling of relative include config_path = os.getcwd() def __init__(self, cache=True, recursion=0, insecure_includes=False, **kwargs): """ Initialize some general logging and common server arguments that will keep things consistent when working with the configurations that inherit this class. By default we cache our responses so that subsiquent calls does not cause the content to be retrieved again. For local file references this makes no difference at all. But for remote content, this does mean more then one call can be made to retrieve the (same) data. This method can be somewhat inefficient if disabled. Only disable caching if you understand the consequences. You can alternatively set the cache value to an int identifying the number of seconds the previously retrieved can exist for before it should be considered expired. recursion defines how deep we recursively handle entries that use the `include` keyword. This keyword requires us to fetch more configuration from another source and add it to our existing compilation. If the file we remotely retrieve also has an `include` reference, we will only advance through it if recursion is set to 2 deep. If set to zero it is off. There is no limit to how high you set this value. It would be recommended to keep it low if you do intend to use it. insecure_include by default are disabled. When set to True, all Apprise Config files marked to be in STRICT mode are treated as being in ALWAYS mode. Take a file:// based configuration for example, only a file:// based configuration can include another file:// based one. because it is set to STRICT mode. If an http:// based configuration file attempted to include a file:// one it woul fail. However this include would be possible if insecure_includes is set to True. There are cases where a self hosting apprise developer may wish to load configuration from memory (in a string format) that contains 'include' entries (even file:// based ones). In these circumstances if you want these 'include' entries to be honored, this value must be set to True. """ super(ConfigBase, self).__init__(**kwargs) # Tracks the time the content was last retrieved on. This place a role # for cases where we are not caching our response and are required to # re-retrieve our settings. self._cached_time = None # Tracks previously loaded content for speed self._cached_servers = None # Initialize our recursion value self.recursion = recursion # Initialize our insecure_includes flag self.insecure_includes = insecure_includes if 'encoding' in kwargs: # Store the encoding self.encoding = kwargs.get('encoding') if 'format' in kwargs \ and isinstance(kwargs['format'], six.string_types): # Store the enforced config format self.config_format = kwargs.get('format').lower() if self.config_format not in CONFIG_FORMATS: # Simple error checking err = 'An invalid config format ({}) was specified.'.format( self.config_format) self.logger.warning(err) raise TypeError(err) # Set our cache flag; it can be True or a (positive) integer try: self.cache = cache if isinstance(cache, bool) else int(cache) if self.cache < 0: err = 'A negative cache value ({}) was specified.'.format( cache) self.logger.warning(err) raise TypeError(err) except (ValueError, TypeError): err = 'An invalid cache value ({}) was specified.'.format(cache) self.logger.warning(err) raise TypeError(err) return def servers(self, asset=None, **kwargs): """ Performs reads loaded configuration and returns all of the services that could be parsed and loaded. """ if not self.expired(): # We already have cached results to return; use them return self._cached_servers # Our cached response object self._cached_servers = list() # read() causes the child class to do whatever it takes for the # config plugin to load the data source and return unparsed content # None is returned if there was an error or simply no data content = self.read(**kwargs) if not isinstance(content, six.string_types): # Set the time our content was cached at self._cached_time = time.time() # Nothing more to do; return our empty cache list return self._cached_servers # Our Configuration format uses a default if one wasn't one detected # or enfored. config_format = \ self.default_config_format \ if self.config_format is None else self.config_format # Dynamically load our parse_ function based on our config format fn = getattr(ConfigBase, 'config_parse_{}'.format(config_format)) # Initialize our asset object asset = asset if isinstance(asset, AppriseAsset) else self.asset # Execute our config parse function which always returns a tuple # of our servers and our configuration servers, configs = fn(content=content, asset=asset) self._cached_servers.extend(servers) # Configuration files were detected; recursively populate them # If we have been configured to do so for url in configs: if self.recursion > 0: # Attempt to acquire the schema at the very least to allow # our configuration based urls. schema = GET_SCHEMA_RE.match(url) if schema is None: # Plan B is to assume we're dealing with a file schema = 'file' if not os.path.isabs(url): # We're dealing with a relative path; prepend # our current config path url = os.path.join(self.config_path, url) url = '{}://{}'.format(schema, URLBase.quote(url)) else: # Ensure our schema is always in lower case schema = schema.group('schema').lower() # Some basic validation if schema not in SCHEMA_MAP: ConfigBase.logger.warning( 'Unsupported include schema {}.'.format(schema)) continue # Parse our url details of the server object as dictionary # containing all of the information parsed from our URL results = SCHEMA_MAP[schema].parse_url(url) if not results: # Failed to parse the server URL self.logger.warning( 'Unparseable include URL {}'.format(url)) continue # Handle cross inclusion based on allow_cross_includes rules if (SCHEMA_MAP[schema].allow_cross_includes == ContentIncludeMode.STRICT and schema not in self.schemas() and not self.insecure_includes) or \ SCHEMA_MAP[schema].allow_cross_includes == \ ContentIncludeMode.NEVER: # Prevent the loading if insecure base protocols ConfigBase.logger.warning( 'Including {}:// based configuration is prohibited. ' 'Ignoring URL {}'.format(schema, url)) continue # Prepare our Asset Object results['asset'] = asset # No cache is required because we're just lumping this in # and associating it with the cache value we've already # declared (prior to our recursion) results['cache'] = False # Recursion can never be parsed from the URL; we decrement # it one level results['recursion'] = self.recursion - 1 # Insecure Includes flag can never be parsed from the URL results['insecure_includes'] = self.insecure_includes try: # Attempt to create an instance of our plugin using the # parsed URL information cfg_plugin = SCHEMA_MAP[results['schema']](**results) except Exception as e: # the arguments are invalid or can not be used. self.logger.warning( 'Could not load include URL: {}'.format(url)) self.logger.debug('Loading Exception: {}'.format(str(e))) continue # if we reach here, we can now add this servers found # in this configuration file to our list self._cached_servers.extend( cfg_plugin.servers(asset=asset)) # We no longer need our configuration object del cfg_plugin else: self.logger.debug( 'Recursion limit reached; ignoring Include URL: %s' % url) if self._cached_servers: self.logger.info('Loaded {} entries from {}'.format( len(self._cached_servers), self.url())) else: self.logger.warning( 'Failed to load Apprise configuration from {}'.format( self.url())) # Set the time our content was cached at self._cached_time = time.time() return self._cached_servers def read(self): """ This object should be implimented by the child classes """ return None def expired(self): """ Simply returns True if the configuration should be considered as expired or False if content should be retrieved. """ if isinstance(self._cached_servers, list) and self.cache: # We have enough reason to look further into our cached content # and verify it has not expired. if self.cache is True: # we have not expired, return False return False # Verify our cache time to determine whether we will get our # content again. age_in_sec = time.time() - self._cached_time if age_in_sec <= self.cache: # We have not expired; return False return False # If we reach here our configuration should be considered # missing and/or expired. return True @staticmethod def parse_url(url, verify_host=True): """Parses the URL and returns it broken apart into a dictionary. This is very specific and customized for Apprise. Args: url (str): The URL you want to fully parse. verify_host (:obj:`bool`, optional): a flag kept with the parsed URL which some child classes will later use to verify SSL keys (if SSL transactions take place). Unless under very specific circumstances, it is strongly recomended that you leave this default value set to True. Returns: A dictionary is returned containing the URL fully parsed if successful, otherwise None is returned. """ results = URLBase.parse_url(url, verify_host=verify_host) if not results: # We're done; we failed to parse our url return results # Allow overriding the default config format if 'format' in results['qsd']: results['format'] = results['qsd'].get('format') if results['format'] not in CONFIG_FORMATS: URLBase.logger.warning( 'Unsupported format specified {}'.format( results['format'])) del results['format'] # Defines the encoding of the payload if 'encoding' in results['qsd']: results['encoding'] = results['qsd'].get('encoding') # Our cache value if 'cache' in results['qsd']: # First try to get it's integer value try: results['cache'] = int(results['qsd']['cache']) except (ValueError, TypeError): # No problem, it just isn't an integer; now treat it as a bool # instead: results['cache'] = parse_bool(results['qsd']['cache']) return results @staticmethod def detect_config_format(content, **kwargs): """ Takes the specified content and attempts to detect the format type The function returns the actual format type if detected, otherwise it returns None """ # Detect Format Logic: # - A pound/hashtag (#) is alawys a comment character so we skip over # lines matched here. # - Detection begins on the first non-comment and non blank line # matched. # - If we find a string followed by a colon, we know we're dealing # with a YAML file. # - If we find a string that starts with a URL, or our tag # definitions (accepting commas) followed by an equal sign we know # we're dealing with a TEXT format. # Define what a valid line should look like valid_line_re = re.compile( r'^\s*(?P<line>([;#]+(?P<comment>.*))|' r'(?P<text>((?P<tag>[ \t,a-z0-9_-]+)=)?[a-z0-9]+://.*)|' r'((?P<yaml>[a-z0-9]+):.*))?$', re.I) try: # split our content up to read line by line content = re.split(r'\r*\n', content) except TypeError: # content was not expected string type ConfigBase.logger.error( 'Invalid Apprise configuration specified.') return None # By default set our return value to None since we don't know # what the format is yet config_format = None # iterate over each line of the file to attempt to detect it # stop the moment a the type has been determined for line, entry in enumerate(content, start=1): result = valid_line_re.match(entry) if not result: # Invalid syntax ConfigBase.logger.error( 'Undetectable Apprise configuration found ' 'based on line {}.'.format(line)) # Take an early exit return None # Attempt to detect configuration if result.group('yaml'): config_format = ConfigFormat.YAML ConfigBase.logger.debug( 'Detected YAML configuration ' 'based on line {}.'.format(line)) break elif result.group('text'): config_format = ConfigFormat.TEXT ConfigBase.logger.debug( 'Detected TEXT configuration ' 'based on line {}.'.format(line)) break # If we reach here, we have a comment entry # Adjust default format to TEXT config_format = ConfigFormat.TEXT return config_format @staticmethod def config_parse(content, asset=None, config_format=None, **kwargs): """ Takes the specified config content and loads it based on the specified config_format. If a format isn't specified, then it is auto detected. """ if config_format is None: # Detect the format config_format = ConfigBase.detect_config_format(content) if not config_format: # We couldn't detect configuration ConfigBase.logger.error('Could not detect configuration') return (list(), list()) if config_format not in CONFIG_FORMATS: # Invalid configuration type specified ConfigBase.logger.error( 'An invalid configuration format ({}) was specified'.format( config_format)) return (list(), list()) # Dynamically load our parse_ function based on our config format fn = getattr(ConfigBase, 'config_parse_{}'.format(config_format)) # Execute our config parse function which always returns a list return fn(content=content, asset=asset) @staticmethod def config_parse_text(content, asset=None): """ Parse the specified content as though it were a simple text file only containing a list of URLs. Return a tuple that looks like (servers, configs) where: - servers contains a list of loaded notification plugins - configs contains a list of additional configuration files referenced. You may also optionally associate an asset with the notification. The file syntax is: # # pound/hashtag allow for line comments # # One or more tags can be idenified using comma's (,) to separate # them. <Tag(s)>=<URL> # Or you can use this format (no tags associated) <URL> # you can also use the keyword 'include' and identify a # configuration location (like this file) which will be included # as additional configuration entries when loaded. include <ConfigURL> """ # A list of loaded Notification Services servers = list() # A list of additional configuration files referenced using # the include keyword configs = list() # Define what a valid line should look like valid_line_re = re.compile( r'^\s*(?P<line>([;#]+(?P<comment>.*))|' r'(\s*(?P<tags>[^=]+)=|=)?\s*' r'(?P<url>[a-z0-9]{2,9}://.*)|' r'include\s+(?P<config>.+))?\s*$', re.I) try: # split our content up to read line by line content = re.split(r'\r*\n', content) except TypeError: # content was not expected string type ConfigBase.logger.error( 'Invalid Apprise TEXT based configuration specified.') return (list(), list()) for line, entry in enumerate(content, start=1): result = valid_line_re.match(entry) if not result: # Invalid syntax ConfigBase.logger.error( 'Invalid Apprise TEXT configuration format found ' '{} on line {}.'.format(entry, line)) # Assume this is a file we shouldn't be parsing. It's owner # can read the error printed to screen and take action # otherwise. return (list(), list()) url, config = result.group('url'), result.group('config') if not (url or config): # Comment/empty line; do nothing continue if config: ConfigBase.logger.debug('Include URL: {}'.format(config)) # Store our include line configs.append(config.strip()) continue # Acquire our url tokens results = plugins.url_to_dict(url) if results is None: # Failed to parse the server URL ConfigBase.logger.warning( 'Unparseable URL {} on line {}.'.format(url, line)) continue # Build a list of tags to associate with the newly added # notifications if any were set results['tag'] = set(parse_list(result.group('tags'))) # Prepare our Asset Object results['asset'] = \ asset if isinstance(asset, AppriseAsset) else AppriseAsset() try: # Attempt to create an instance of our plugin using the # parsed URL information plugin = plugins.SCHEMA_MAP[results['schema']](**results) # Create log entry of loaded URL ConfigBase.logger.debug('Loaded URL: {}'.format(plugin.url())) except Exception as e: # the arguments are invalid or can not be used. ConfigBase.logger.warning( 'Could not load URL {} on line {}.'.format( url, line)) ConfigBase.logger.debug('Loading Exception: %s' % str(e)) continue # if we reach here, we successfully loaded our data servers.append(plugin) # Return what was loaded return (servers, configs) @staticmethod def config_parse_yaml(content, asset=None): """ Parse the specified content as though it were a yaml file specifically formatted for Apprise. Return a tuple that looks like (servers, configs) where: - servers contains a list of loaded notification plugins - configs contains a list of additional configuration files referenced. You may optionally associate an asset with the notification. """ # A list of loaded Notification Services servers = list() # A list of additional configuration files referenced using # the include keyword configs = list() try: # Load our data (safely) result = yaml.load(content, Loader=yaml.SafeLoader) except (AttributeError, yaml.parser.ParserError, yaml.error.MarkedYAMLError) as e: # Invalid content ConfigBase.logger.error( 'Invalid Apprise YAML data specified.') ConfigBase.logger.debug( 'YAML Exception:{}{}'.format(os.linesep, e)) return (list(), list()) if not isinstance(result, dict): # Invalid content ConfigBase.logger.error( 'Invalid Apprise YAML based configuration specified.') return (list(), list()) # YAML Version version = result.get('version', 1) if version != 1: # Invalid syntax ConfigBase.logger.error( 'Invalid Apprise YAML version specified {}.'.format(version)) return (list(), list()) # # global asset object # asset = asset if isinstance(asset, AppriseAsset) else AppriseAsset() tokens = result.get('asset', None) if tokens and isinstance(tokens, dict): for k, v in tokens.items(): if k.startswith('_') or k.endswith('_'): # Entries are considered reserved if they start or end # with an underscore ConfigBase.logger.warning( 'Ignored asset key "{}".'.format(k)) continue if not (hasattr(asset, k) and isinstance(getattr(asset, k), (bool, six.string_types))): # We can't set a function or non-string set value ConfigBase.logger.warning( 'Invalid asset key "{}".'.format(k)) continue if v is None: # Convert to an empty string v = '' if (isinstance(v, (bool, six.string_types)) and isinstance(getattr(asset, k), bool)): # If the object in the Asset is a boolean, then # we want to convert the specified string to # match that. setattr(asset, k, parse_bool(v)) elif isinstance(v, six.string_types): # Set our asset object with the new value setattr(asset, k, v.strip()) else: # we must set strings with a string ConfigBase.logger.warning( 'Invalid asset value to "{}".'.format(k)) continue # # global tag root directive # global_tags = set() tags = result.get('tag', None) if tags and isinstance(tags, (list, tuple, six.string_types)): # Store any preset tags global_tags = set(parse_list(tags)) # # include root directive # includes = result.get('include', None) if isinstance(includes, six.string_types): # Support a single inline string or multiple ones separated by a # comma and/or space includes = parse_urls(includes) elif not isinstance(includes, (list, tuple)): # Not a problem; we simply have no includes includes = list() # Iterate over each config URL for no, url in enumerate(includes): if isinstance(url, six.string_types): # Support a single inline string or multiple ones separated by # a comma and/or space configs.extend(parse_urls(url)) elif isinstance(url, dict): # Store the url and ignore arguments associated configs.extend(u for u in url.keys()) # # urls root directive # urls = result.get('urls', None) if not isinstance(urls, (list, tuple)): # Not a problem; we simply have no urls urls = list() # Iterate over each URL for no, url in enumerate(urls): # Our results object is what we use to instantiate our object if # we can. Reset it to None on each iteration results = list() if isinstance(url, six.string_types): # We're just a simple URL string... schema = GET_SCHEMA_RE.match(url) if schema is None: # Log invalid entries so that maintainer of config # config file at least has something to take action # with. ConfigBase.logger.warning( 'Invalid URL {}, entry #{}'.format(url, no + 1)) continue # We found a valid schema worthy of tracking; store it's # details: _results = plugins.url_to_dict(url) if _results is None: ConfigBase.logger.warning( 'Unparseable URL {}, entry #{}'.format( url, no + 1)) continue # add our results to our global set results.append(_results) elif isinstance(url, dict): # We are a url string with additional unescaped options. In # this case we want to iterate over all of our options so we # can at least tell the end user what entries were ignored # due to errors if six.PY2: it = url.iteritems() else: # six.PY3 it = iter(url.items()) # Track the URL to-load _url = None # Track last acquired schema schema = None for key, tokens in it: # Test our schema _schema = GET_SCHEMA_RE.match(key) if _schema is None: # Log invalid entries so that maintainer of config # config file at least has something to take action # with. ConfigBase.logger.warning( 'Ignored entry {} found under urls, entry #{}' .format(key, no + 1)) continue # Store our schema schema = _schema.group('schema').lower() # Store our URL and Schema Regex _url = key if _url is None: # the loop above failed to match anything ConfigBase.logger.warning( 'Unsupported URL, entry #{}'.format(no + 1)) continue _results = plugins.url_to_dict(_url) if _results is None: # Setup dictionary _results = { # Minimum requirements 'schema': schema, } if isinstance(tokens, (list, tuple, set)): # populate and/or override any results populated by # parse_url() for entries in tokens: # Copy ourselves a template of our parsed URL as a base # to work with r = _results.copy() # We are a url string with additional unescaped options if isinstance(entries, dict): if six.PY2: _url, tokens = next(url.iteritems()) else: # six.PY3 _url, tokens = next(iter(url.items())) # Tags you just can't over-ride if 'schema' in entries: del entries['schema'] # support our special tokens (if they're present) if schema in plugins.SCHEMA_MAP: entries = ConfigBase.__extract_special_tokens( schema, entries) # Extend our dictionary with our new entries r.update(entries) # add our results to our global set results.append(r) elif isinstance(tokens, dict): # support our special tokens (if they're present) if schema in plugins.SCHEMA_MAP: tokens = ConfigBase.__extract_special_tokens( schema, tokens) # Copy ourselves a template of our parsed URL as a base to # work with r = _results.copy() # add our result set r.update(tokens) # add our results to our global set results.append(r) else: # add our results to our global set results.append(_results) else: # Unsupported ConfigBase.logger.warning( 'Unsupported Apprise YAML entry #{}'.format(no + 1)) continue # Track our entries entry = 0 while len(results): # Increment our entry count entry += 1 # Grab our first item _results = results.pop(0) # tag is a special keyword that is managed by Apprise object. # The below ensures our tags are set correctly if 'tag' in _results: # Tidy our list up _results['tag'] = \ set(parse_list(_results['tag'])) | global_tags else: # Just use the global settings _results['tag'] = global_tags for key in list(_results.keys()): # Strip out any tokens we know that we can't accept and # warn the user match = VALID_TOKEN.match(key) if not match: ConfigBase.logger.warning( 'Ignoring invalid token ({}) found in YAML ' 'configuration entry #{}, item #{}' .format(key, no + 1, entry)) del _results[key] ConfigBase.logger.trace( 'URL #{}: {} unpacked as:{}{}' .format(no + 1, url, os.linesep, os.linesep.join( ['{}="{}"'.format(k, a) for k, a in _results.items()]))) # Prepare our Asset Object _results['asset'] = asset try: # Attempt to create an instance of our plugin using the # parsed URL information plugin = plugins.SCHEMA_MAP[_results['schema']](**_results) # Create log entry of loaded URL ConfigBase.logger.debug( 'Loaded URL: {}'.format(plugin.url())) except Exception as e: # the arguments are invalid or can not be used. ConfigBase.logger.warning( 'Could not load Apprise YAML configuration ' 'entry #{}, item #{}' .format(no + 1, entry)) ConfigBase.logger.debug('Loading Exception: %s' % str(e)) continue # if we reach here, we successfully loaded our data servers.append(plugin) return (servers, configs) def pop(self, index=-1): """ Removes an indexed Notification Service from the stack and returns it. By default, the last element of the list is removed. """ if not isinstance(self._cached_servers, list): # Generate ourselves a list of content we can pull from self.servers() # Pop the element off of the stack return self._cached_servers.pop(index) @staticmethod def __extract_special_tokens(schema, tokens): """ This function takes a list of tokens and updates them to no longer include any special tokens such as +,-, and : - schema must be a valid schema of a supported plugin type - tokens must be a dictionary containing the yaml entries parsed. The idea here is we can post process a set of tokens provided in a YAML file where the user provided some of the special keywords. We effectivley look up what these keywords map to their appropriate value they're expected """ # Create a copy of our dictionary tokens = tokens.copy() for kw, meta in plugins.SCHEMA_MAP[schema]\ .template_kwargs.items(): # Determine our prefix: prefix = meta.get('prefix', '+') # Detect any matches matches = \ {k[1:]: str(v) for k, v in tokens.items() if k.startswith(prefix)} if not matches: # we're done with this entry continue if not isinstance(tokens.get(kw, None), dict): # Invalid; correct it tokens[kw] = dict() # strip out processed tokens tokens = {k: v for k, v in tokens.items() if not k.startswith(prefix)} # Update our entries tokens[kw].update(matches) # Return our tokens return tokens def __getitem__(self, index): """ Returns the indexed server entry associated with the loaded notification servers """ if not isinstance(self._cached_servers, list): # Generate ourselves a list of content we can pull from self.servers() return self._cached_servers[index] def __iter__(self): """ Returns an iterator to our server list """ if not isinstance(self._cached_servers, list): # Generate ourselves a list of content we can pull from self.servers() return iter(self._cached_servers) def __len__(self): """ Returns the total number of servers loaded """ if not isinstance(self._cached_servers, list): # Generate ourselves a list of content we can pull from self.servers() return len(self._cached_servers) def __bool__(self): """ Allows the Apprise object to be wrapped in an Python 3.x based 'if statement'. True is returned if our content was downloaded correctly. """ if not isinstance(self._cached_servers, list): # Generate ourselves a list of content we can pull from self.servers() return True if self._cached_servers else False def __nonzero__(self): """ Allows the Apprise object to be wrapped in an Python 2.x based 'if statement'. True is returned if our content was downloaded correctly. """ if not isinstance(self._cached_servers, list): # Generate ourselves a list of content we can pull from self.servers() return True if self._cached_servers else False
[((45, 14, 46, 43), 're.compile', 're.compile', ({(46, 4, 46, 36): '"""(?P<token>[a-z0-9][a-z0-9_]+)"""', (46, 38, 46, 42): 're.I'}, {}), "('(?P<token>[a-z0-9][a-z0-9_]+)', re.I)", False, 'import re\n'), ((75, 18, 75, 29), 'os.getcwd', 'os.getcwd', ({}, {}), '()', False, 'import os\n'), ((307, 28, 307, 39), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((413, 24, 416, 49), 're.compile', 're.compile', ({(414, 12, 416, 42): '"""^\\\\s*(?P<line>([;#]+(?P<comment>.*))|(?P<text>((?P<tag>[ \\\\t,a-z0-9_-]+)=)?[a-z0-9]+://.*)|((?P<yaml>[a-z0-9]+):.*))?$"""', (416, 44, 416, 48): 're.I'}, {}), "(\n '^\\\\s*(?P<line>([;#]+(?P<comment>.*))|(?P<text>((?P<tag>[ \\\\t,a-z0-9_-]+)=)?[a-z0-9]+://.*)|((?P<yaml>[a-z0-9]+):.*))?$'\n , re.I)", False, 'import re\n'), ((535, 24, 539, 52), 're.compile', 're.compile', ({(536, 12, 539, 45): '"""^\\\\s*(?P<line>([;#]+(?P<comment>.*))|(\\\\s*(?P<tags>[^=]+)=|=)?\\\\s*(?P<url>[a-z0-9]{2,9}://.*)|include\\\\s+(?P<config>.+))?\\\\s*$"""', (539, 47, 539, 51): 're.I'}, {}), "(\n '^\\\\s*(?P<line>([;#]+(?P<comment>.*))|(\\\\s*(?P<tags>[^=]+)=|=)?\\\\s*(?P<url>[a-z0-9]{2,9}://.*)|include\\\\s+(?P<config>.+))?\\\\s*$'\n , re.I)", False, 'import re\n'), ((187, 32, 187, 43), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((420, 22, 420, 49), 're.split', 're.split', ({(420, 31, 420, 39): '"""\\\\r*\\\\n"""', (420, 41, 420, 48): 'content'}, {}), "('\\\\r*\\\\n', content)", False, 'import re\n'), ((543, 22, 543, 49), 're.split', 're.split', ({(543, 31, 543, 39): '"""\\\\r*\\\\n"""', (543, 41, 543, 48): 'content'}, {}), "('\\\\r*\\\\n', content)", False, 'import re\n'), ((638, 21, 638, 63), 'yaml.load', 'yaml.load', (), '', False, 'import yaml\n'), ((332, 25, 332, 36), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((220, 27, 220, 45), 'os.path.isabs', 'os.path.isabs', ({(220, 41, 220, 44): 'url'}, {}), '(url)', False, 'import os\n'), ((223, 30, 223, 65), 'os.path.join', 'os.path.join', ({(223, 43, 223, 59): 'self.config_path', (223, 61, 223, 64): 'url'}, {}), '(self.config_path, url)', False, 'import os\n')]
manuel-fischer/ScrollRec
ffmpeg_util.py
ec5662d3f61630f939613481290a166133d23a20
import sys import subprocess from subprocess import Popen, PIPE AV_LOG_QUIET = "quiet" AV_LOG_PANIC = "panic" AV_LOG_FATAL = "fatal" AV_LOG_ERROR = "error" AV_LOG_WARNING = "warning" AV_LOG_INFO = "info" AV_LOG_VERBOSE = "verbose" AV_LOG_DEBUG = "debug" ffmpeg_loglevel = AV_LOG_ERROR IS_WIN32 = 'win32' in str(sys.platform).lower() SUBPROCESS_ARGS = {} if IS_WIN32: startupinfo = subprocess.STARTUPINFO() startupinfo.dwFlags = subprocess.CREATE_NEW_CONSOLE | subprocess.STARTF_USESHOWWINDOW startupinfo.wShowWindow = subprocess.SW_HIDE SUBPROCESS_ARGS['startupinfo'] = startupinfo def popen_ffmpeg(inner_args): cmd = [ 'ffmpeg', *inner_args, # logging '-loglevel', ffmpeg_loglevel, '-hide_banner', ] process = Popen(cmd, stdout=PIPE, stderr=PIPE, **SUBPROCESS_ARGS) stdout, stderr = process.communicate() print(stderr.decode(), end='', file=sys.stderr) return stdout, stderr
[((19, 18, 19, 42), 'subprocess.STARTUPINFO', 'subprocess.STARTUPINFO', ({}, {}), '()', False, 'import subprocess\n'), ((37, 14, 37, 69), 'subprocess.Popen', 'Popen', (), '', False, 'from subprocess import Popen, PIPE\n')]
rizar/CLOSURE
setup.py
57f80d4e89fa281830bb9c8b6a7a2498747e727a
from setuptools import setup setup( name="nmn-iwp", version="0.1", keywords="", packages=["vr", "vr.models"] )
[((3, 0, 8, 1), 'setuptools.setup', 'setup', (), '', False, 'from setuptools import setup\n')]
lefevre-fraser/openmeta-mms
analysis_tools/PYTHON_RICARDO/output_ingress_egress/scripts/uniform_grid.py
08f3115e76498df1f8d70641d71f5c52cab4ce5f
""" Represent a triangulated surface using a 3D boolean grid""" import logging import numpy as np from rpl.tools.ray_tracing.bsp_tree_poly import BSP_Element from rpl.tools.geometry import geom_utils import data_io class BSP_Grid(object): def __init__(self, node_array, tris, allocate_step=100000): """ Store the triangles with an enumeration so that even when they are subdivided their identity is not lost. """ tri_nums = np.arange(len(tris), dtype=np.int32).reshape((len(tris), 1)) minus_ones = -np.ones((len(tris), 6), dtype=np.int32) self.tris = np.hstack((tris, minus_ones, tri_nums)) self.allocate_step = allocate_step self.node_array = node_array # Reference to the full list of nodes self._resize() self.next_free = len(node_array) self.split_cache = np.zeros(len(self.tris), dtype=np.int32) def _resize(self): """ Increase node array size by the allocate_step amount. """ self.array_size = len(self.node_array) + self.allocate_step self.node_array = np.concatenate((self.node_array, np.zeros((self.allocate_step, 3)))) def add_node(self, node): """ Adds a new node to the end of the node array (expanding if required). Returns the index of the newly added node. """ if self.next_free == self.array_size: self._resize() self.node_array[self.next_free] = node self.next_free += 1 return self.next_free - 1 def prepare_add(self, num_add_nodes): """ Make sure that ``num_add_nodes`` can be added later without needing a resize. Useful if adding nodes from within cython where resizing is tricky. """ if self.next_free + num_add_nodes >= self.array_size: self._resize() return self.next_free def make_grid(veh_surfs, settings): """ Make coordinates of voxelated grid based on overall list of vehicle surfaces """ ## Find overall bounding box x_min, x_max = 1e30, -1e30 y_min, y_max = 1e30, -1e30 z_min, z_max = 1e30, -1e30 for key, veh_surf in veh_surfs.items(): x_min, x_max = min(x_min, np.min(veh_surf["x"])), max(x_max, np.max(veh_surf["x"])) y_min, y_max = min(y_min, np.min(veh_surf["y"])), max(y_max, np.max(veh_surf["y"])) z_min, z_max = min(z_min, np.min(veh_surf["z"])), max(z_max, np.max(veh_surf["z"])) x_min, x_max = x_min - settings["voxel_size"], x_max + settings["voxel_size"] y_min, y_max = y_min - settings["voxel_size"], y_max + settings["voxel_size"] z_min, z_max = z_min - settings["voxel_size"], z_max + settings["voxel_size"] ########################################### # Create the uniformly spaced grid points x_grid = np.arange(x_min, x_max + settings["voxel_size"], settings["voxel_size"]) y_grid = np.arange(y_min, y_max + settings["voxel_size"], settings["voxel_size"]) z_grid = np.arange(z_min, z_max + settings["voxel_size"], settings["voxel_size"]) return x_grid, y_grid, z_grid def convert_geom(veh_surf, tr_mat): """ Rotate nodes using provided transformation matrix; convert xyz node dict to nodes array """ veh_surf["nodes"] = np.vstack((veh_surf["x"], veh_surf["y"], veh_surf["z"])).T veh_surf['nodes'] = np.dot(veh_surf['nodes'], tr_mat[:3, :3]) veh_surf["x"] = veh_surf['nodes'][:, 0] veh_surf["y"] = veh_surf['nodes'][:, 1] veh_surf["z"] = veh_surf['nodes'][:, 2] return veh_surf def find_occupied_voxels(surf, surf_mask, voxel_data): """ Voxels with any triangle from ``surf`` are considered occupied and or'ed with ``group_mask``. If the supplied ``occupied_voxels`` is None a voxel array is created and returned. """ nodes = surf["nodes"] tris = surf["tris"] x_pts, y_pts, z_pts = [voxel_data[k] for k in ("x_grid", "y_grid", "z_grid")] vox_size = voxel_data["vox_size"] ## Find the local extents of this part min_x, max_x = np.min(surf["x"]) - vox_size, np.max(surf["x"]) + vox_size min_y, max_y = np.min(surf["y"]) - vox_size, np.max(surf["y"]) + vox_size min_z, max_z = np.min(surf["z"]) - vox_size, np.max(surf["z"]) + vox_size b_tree = BSP_Grid(nodes, tris) # Create BSP tree elements- we're not using a tree, but we are using some of the functions b_x_root = BSP_Element(b_tree.tris, b_tree) size_i, size_j, size_k = len(x_pts), len(y_pts), len(z_pts) ## Create the occupied voxels if none were supplied if voxel_data["value"] is None: voxel_data["value"] = np.zeros((size_i - 1, size_j - 1, size_k - 1), dtype=np.uint32) occupied_voxels = voxel_data["value"] ## The [1:] is because to make n voxels in a given direction we need n-1 splits for i, x_pos in enumerate(x_pts[1:]): if x_pos < min_x: continue if x_pos > max_x: break b_above_x, b_below_x = b_x_root.split_at(0, x_pos) b_y_root = b_below_x for j, y_pos in enumerate(y_pts[1:]): if b_y_root is None: break if y_pos < min_y: continue if y_pos > max_y: break b_above_y, b_below_y = b_y_root.split_at(1, y_pos) b_z_root = b_below_y for k, z_pos in enumerate(z_pts[1:]): if b_z_root is None: break if z_pos < min_z: continue if z_pos > max_z: break b_above_z, b_below_z = b_z_root.split_at(2, z_pos) if not (b_below_z and (len(b_below_z.tris) == 0)): ## There is at least part of triangle here so mark as occupied occupied_voxels[i, j, k] |= surf_mask b_z_root = b_above_z b_y_root = b_above_y b_x_root = b_above_x return voxel_data ############# # Main code def main(vehicle_comp_coords, tr_mat, voxel_masks, settings): """ Perform voxelization for all vehicle geometries in a list of parts. Combine on a uniform grid. """ for key, veh_surf in vehicle_comp_coords.items(): # Convert coordinates and find overall best bounding box veh_surf = convert_geom(veh_surf, tr_mat) x_grid, y_grid, z_grid = make_grid(vehicle_comp_coords, settings) voxel_data = {"x_grid": x_grid, "y_grid": y_grid, "z_grid": z_grid, "vox_size": settings["voxel_size"], "csys_trans": tr_mat, "value": None} for key, veh_surf in vehicle_comp_coords.items(): # Build up the voxel_data logging.debug("Sampling component: {}".format(key)) ## Default mask is 1 for anything not in an identified set surf_mask = 1 for mask, geo_set in voxel_masks.items(): if veh_surf['part_class'] in geo_set: surf_mask |= mask voxel_data = find_occupied_voxels(veh_surf, surf_mask, voxel_data) return voxel_data if __name__ == "__main__": from rpl.tools.api import test_bench_api as tb_api SETTINGS = tb_api.load_settings("settings.js") DOORS = {'Hatch_Assembly_Rear_Ramp', 'Hatch_Assembly_Personnel_Door'} HATCHES = {'Hatch_Assembly_Driver_Commander', 'Hatch_Assembly_Cargo'} HULLS = {"Hull_Assembly_Parametric", 'Hull_Assembly_Example_With_Connector'} MANIKINS = {"Manikin"} # Special labels applied to specific types of voxels VOXEL_LABELS = {2: HULLS, 4: DOORS, 8: HATCHES, 16: MANIKINS} vehicle_surfs = tb_api.load_geometry(tb_api.get_all_geom_set() - MANIKINS, single_file=False) # Modify node coords so object aligns with cartesian axes of occ voxel grid, +z=up # Vector to rotate around is cross product of current z axis and sfc normal veh_up = np.array([0., 1., 0.]) rot_around = np.cross(veh_up, np.array([0, 0, 1])) rot_ang = -np.arccos(veh_up[2]) tr_mat = geom_utils.rotation_about_vector(rot_around, rot_ang) # voxel_data = main(vehicle_surfs, tr_mat, VOXEL_LABELS, SETTINGS) vox_veh_folder = r"voxelated_models/vehicles/{}/{}".format(SETTINGS["run_id"], SETTINGS["voxel_size"]) vox_veh_file = "voxels_{}_vox{}_hacked".format(SETTINGS["run_id"], SETTINGS["voxel_size"]) try: voxel_data = data_io.load_array(vox_veh_folder, vox_veh_file, True) except: voxel_data = main(vehicle_surfs, tr_mat, VOXEL_LABELS, SETTINGS) from mayavi import mlab xo, yo, zo = np.where(voxel_data["value"] == 1) plot_vehicle = mlab.points3d(voxel_data["x_grid"][xo], voxel_data["y_grid"][yo], voxel_data["z_grid"][zo], color=(0.9, 0.9, 0.9), scale_mode="none", scale_factor=voxel_data["vox_size"], mode='cube', opacity=1) xo, yo, zo = np.where(voxel_data["value"] & 2) plot_vehicle = mlab.points3d(voxel_data["x_grid"][xo], voxel_data["y_grid"][yo], voxel_data["z_grid"][zo], color=(1, 1, 1), scale_mode="none", scale_factor=voxel_data["vox_size"], mode='cube', opacity=0.05) xo, yo, zo = np.where(voxel_data["value"] & 4) plot_vehicle = mlab.points3d(voxel_data["x_grid"][xo], voxel_data["y_grid"][yo], voxel_data["z_grid"][zo], color=(1.0, 0.5, 0.5), scale_mode="none", scale_factor=voxel_data["vox_size"], mode='cube', opacity=1) xo, yo, zo = np.where(voxel_data["value"] & 8) plot_vehicle = mlab.points3d(voxel_data["x_grid"][xo], voxel_data["y_grid"][yo], voxel_data["z_grid"][zo], color=(0.6, 0.6, 1.0), scale_mode="none", scale_factor=voxel_data["vox_size"], mode='cube', opacity=1) # No manikins included, no need to plot them # xo, yo, zo = np.where(voxel_data["value"] & 16) # plot_vehicle = mlab.points3d(voxel_data["x_grid"][xo], # voxel_data["y_grid"][yo], # voxel_data["z_grid"][zo], # color=(0.5, 1.0, 0.8), # scale_mode="none", scale_factor=voxel_data["vox_size"], # mode='cube', opacity=1.0) mlab.show() # Save the voxelated model of the vehicle (sans door and other excluded parts) data_io.save_multi_array(vox_veh_folder, vox_veh_file, voxel_data)
[((77, 13, 77, 85), 'numpy.arange', 'np.arange', ({(77, 23, 77, 28): 'x_min', (77, 30, 77, 60): "x_max + settings['voxel_size']", (77, 62, 77, 84): "settings['voxel_size']"}, {}), "(x_min, x_max + settings['voxel_size'], settings['voxel_size'])", True, 'import numpy as np\n'), ((78, 13, 78, 85), 'numpy.arange', 'np.arange', ({(78, 23, 78, 28): 'y_min', (78, 30, 78, 60): "y_max + settings['voxel_size']", (78, 62, 78, 84): "settings['voxel_size']"}, {}), "(y_min, y_max + settings['voxel_size'], settings['voxel_size'])", True, 'import numpy as np\n'), ((79, 13, 79, 85), 'numpy.arange', 'np.arange', ({(79, 23, 79, 28): 'z_min', (79, 30, 79, 60): "z_max + settings['voxel_size']", (79, 62, 79, 84): "settings['voxel_size']"}, {}), "(z_min, z_max + settings['voxel_size'], settings['voxel_size'])", True, 'import numpy as np\n'), ((89, 24, 89, 65), 'numpy.dot', 'np.dot', ({(89, 31, 89, 48): "veh_surf['nodes']", (89, 50, 89, 64): 'tr_mat[:3, :3]'}, {}), "(veh_surf['nodes'], tr_mat[:3, :3])", True, 'import numpy as np\n'), ((117, 15, 117, 47), 'rpl.tools.ray_tracing.bsp_tree_poly.BSP_Element', 'BSP_Element', ({(117, 27, 117, 38): 'b_tree.tris', (117, 40, 117, 46): 'b_tree'}, {}), '(b_tree.tris, b_tree)', False, 'from rpl.tools.ray_tracing.bsp_tree_poly import BSP_Element\n'), ((192, 15, 192, 50), 'rpl.tools.api.test_bench_api.load_settings', 'tb_api.load_settings', ({(192, 36, 192, 49): '"""settings.js"""'}, {}), "('settings.js')", True, 'from rpl.tools.api import test_bench_api as tb_api\n'), ((208, 13, 208, 35), 'numpy.array', 'np.array', ({(208, 22, 208, 34): '[0.0, 1.0, 0.0]'}, {}), '([0.0, 1.0, 0.0])', True, 'import numpy as np\n'), ((211, 13, 211, 66), 'rpl.tools.geometry.geom_utils.rotation_about_vector', 'geom_utils.rotation_about_vector', ({(211, 46, 211, 56): 'rot_around', (211, 58, 211, 65): 'rot_ang'}, {}), '(rot_around, rot_ang)', False, 'from rpl.tools.geometry import geom_utils\n'), ((226, 17, 226, 51), 'numpy.where', 'np.where', ({(226, 26, 226, 50): "voxel_data['value'] == 1"}, {}), "(voxel_data['value'] == 1)", True, 'import numpy as np\n'), ((227, 19, 232, 56), 'mayavi.mlab.points3d', 'mlab.points3d', (), '', False, 'from mayavi import mlab\n'), ((234, 17, 234, 50), 'numpy.where', 'np.where', ({(234, 26, 234, 49): "voxel_data['value'] & 2"}, {}), "(voxel_data['value'] & 2)", True, 'import numpy as np\n'), ((235, 19, 240, 59), 'mayavi.mlab.points3d', 'mlab.points3d', (), '', False, 'from mayavi import mlab\n'), ((242, 17, 242, 50), 'numpy.where', 'np.where', ({(242, 26, 242, 49): "voxel_data['value'] & 4"}, {}), "(voxel_data['value'] & 4)", True, 'import numpy as np\n'), ((243, 19, 248, 56), 'mayavi.mlab.points3d', 'mlab.points3d', (), '', False, 'from mayavi import mlab\n'), ((250, 17, 250, 50), 'numpy.where', 'np.where', ({(250, 26, 250, 49): "voxel_data['value'] & 8"}, {}), "(voxel_data['value'] & 8)", True, 'import numpy as np\n'), ((251, 19, 256, 56), 'mayavi.mlab.points3d', 'mlab.points3d', (), '', False, 'from mayavi import mlab\n'), ((266, 4, 266, 15), 'mayavi.mlab.show', 'mlab.show', ({}, {}), '()', False, 'from mayavi import mlab\n'), ((269, 4, 269, 70), 'data_io.save_multi_array', 'data_io.save_multi_array', ({(269, 29, 269, 43): 'vox_veh_folder', (269, 45, 269, 57): 'vox_veh_file', (269, 59, 269, 69): 'voxel_data'}, {}), '(vox_veh_folder, vox_veh_file, voxel_data)', False, 'import data_io\n'), ((19, 20, 19, 59), 'numpy.hstack', 'np.hstack', ({(19, 30, 19, 58): '(tris, minus_ones, tri_nums)'}, {}), '((tris, minus_ones, tri_nums))', True, 'import numpy as np\n'), ((88, 24, 88, 80), 'numpy.vstack', 'np.vstack', ({(88, 34, 88, 79): "(veh_surf['x'], veh_surf['y'], veh_surf['z'])"}, {}), "((veh_surf['x'], veh_surf['y'], veh_surf['z']))", True, 'import numpy as np\n'), ((123, 30, 123, 93), 'numpy.zeros', 'np.zeros', (), '', True, 'import numpy as np\n'), ((209, 34, 209, 53), 'numpy.array', 'np.array', ({(209, 43, 209, 52): '[0, 0, 1]'}, {}), '([0, 0, 1])', True, 'import numpy as np\n'), ((210, 15, 210, 35), 'numpy.arccos', 'np.arccos', ({(210, 25, 210, 34): 'veh_up[2]'}, {}), '(veh_up[2])', True, 'import numpy as np\n'), ((221, 21, 221, 75), 'data_io.load_array', 'data_io.load_array', ({(221, 40, 221, 54): 'vox_veh_folder', (221, 56, 221, 68): 'vox_veh_file', (221, 70, 221, 74): 'True'}, {}), '(vox_veh_folder, vox_veh_file, True)', False, 'import data_io\n'), ((110, 19, 110, 36), 'numpy.min', 'np.min', ({(110, 26, 110, 35): "surf['x']"}, {}), "(surf['x'])", True, 'import numpy as np\n'), ((110, 49, 110, 66), 'numpy.max', 'np.max', ({(110, 56, 110, 65): "surf['x']"}, {}), "(surf['x'])", True, 'import numpy as np\n'), ((111, 19, 111, 36), 'numpy.min', 'np.min', ({(111, 26, 111, 35): "surf['y']"}, {}), "(surf['y'])", True, 'import numpy as np\n'), ((111, 49, 111, 66), 'numpy.max', 'np.max', ({(111, 56, 111, 65): "surf['y']"}, {}), "(surf['y'])", True, 'import numpy as np\n'), ((112, 19, 112, 36), 'numpy.min', 'np.min', ({(112, 26, 112, 35): "surf['z']"}, {}), "(surf['z'])", True, 'import numpy as np\n'), ((112, 49, 112, 66), 'numpy.max', 'np.max', ({(112, 56, 112, 65): "surf['z']"}, {}), "(surf['z'])", True, 'import numpy as np\n'), ((204, 41, 204, 66), 'rpl.tools.api.test_bench_api.get_all_geom_set', 'tb_api.get_all_geom_set', ({}, {}), '()', True, 'from rpl.tools.api import test_bench_api as tb_api\n'), ((33, 59, 33, 92), 'numpy.zeros', 'np.zeros', ({(33, 68, 33, 91): '(self.allocate_step, 3)'}, {}), '((self.allocate_step, 3))', True, 'import numpy as np\n'), ((67, 34, 67, 55), 'numpy.min', 'np.min', ({(67, 41, 67, 54): "veh_surf['x']"}, {}), "(veh_surf['x'])", True, 'import numpy as np\n'), ((67, 69, 67, 90), 'numpy.max', 'np.max', ({(67, 76, 67, 89): "veh_surf['x']"}, {}), "(veh_surf['x'])", True, 'import numpy as np\n'), ((68, 34, 68, 55), 'numpy.min', 'np.min', ({(68, 41, 68, 54): "veh_surf['y']"}, {}), "(veh_surf['y'])", True, 'import numpy as np\n'), ((68, 69, 68, 90), 'numpy.max', 'np.max', ({(68, 76, 68, 89): "veh_surf['y']"}, {}), "(veh_surf['y'])", True, 'import numpy as np\n'), ((69, 34, 69, 55), 'numpy.min', 'np.min', ({(69, 41, 69, 54): "veh_surf['z']"}, {}), "(veh_surf['z'])", True, 'import numpy as np\n'), ((69, 69, 69, 90), 'numpy.max', 'np.max', ({(69, 76, 69, 89): "veh_surf['z']"}, {}), "(veh_surf['z'])", True, 'import numpy as np\n')]
Hojung-Jeong/Silver-Bullet-Encryption-Tool
silver_bullet/crypto.py
5ea29b3cd78cf7488e0cbdcf4ea60d7c9151c2a7
''' >List of functions 1. encrypt(user_input,passphrase) - Encrypt the given string with the given passphrase. Returns cipher text and locked pad. 2. decrypt(cipher_text,locked_pad,passphrase) - Decrypt the cipher text encrypted with SBET. It requires cipher text, locked pad, and passphrase. ''' # CODE ======================================================================== import zlib import random from hashlib import sha1 from silver_bullet.TRNG import trlist from silver_bullet.contain_value import contain ascii_value=256 def ciphering(target_list,pad,decrypt=False): result=[] for counter in range(len(pad)): if decrypt==False: operated=contain(target_list[counter]+pad[counter],ascii_value) else: operated=contain(int(target_list[counter])-pad[counter],ascii_value) result.append(operated) return result def locker(pad,passphrase): cutter=round(len(passphrase)/2) splited=[passphrase[:cutter],passphrase[cutter:]] locker=[0 for counter in range(len(pad))] for element in splited: bloated_seed=sha1(element.encode()).hexdigest() random.seed(bloated_seed) locker=[contain(random.randrange(ascii_value)+element,ascii_value) for element in locker] holder=[] for counter in range(len(pad)): operated=int(pad[counter])^locker[counter] holder.append(operated) return holder def encrypt(user_input,passphrase): compressed=zlib.compress(user_input.encode()) ui_listed=list(compressed) pad=trlist(len(ui_listed),ascii_value) ct=ciphering(ui_listed,pad) lp=locker(pad,passphrase) cipher_text=' '.join(map(str,ct)) locked_pad=' '.join(map(str,lp)) return cipher_text, locked_pad def decrypt(cipher_text,locked_pad,passphrase): ct=cipher_text.split(' ') lp=locked_pad.split(' ') pad=locker(lp,passphrase) pt=ciphering(ct,pad,True) byted=bytes(pt) decompressed=zlib.decompress(byted).decode() return decompressed
[((41, 2, 41, 27), 'random.seed', 'random.seed', ({(41, 14, 41, 26): 'bloated_seed'}, {}), '(bloated_seed)', False, 'import random\n'), ((25, 12, 25, 66), 'silver_bullet.contain_value.contain', 'contain', ({(25, 20, 25, 53): 'target_list[counter] + pad[counter]', (25, 54, 25, 65): 'ascii_value'}, {}), '(target_list[counter] + pad[counter], ascii_value)', False, 'from silver_bullet.contain_value import contain\n'), ((74, 14, 74, 36), 'zlib.decompress', 'zlib.decompress', ({(74, 30, 74, 35): 'byted'}, {}), '(byted)', False, 'import zlib\n'), ((42, 18, 42, 47), 'random.randrange', 'random.randrange', ({(42, 35, 42, 46): 'ascii_value'}, {}), '(ascii_value)', False, 'import random\n')]
RavidLevi98/pyfire
pyfire/errors.py
404ae2082fd5be3ef652b3e15a66ad0d79b7a1b5
# -*- coding: utf-8 -*- """ pyfire.errors ~~~~~~~~~~~~~~~~~~~~~~ Holds the global used base errors :copyright: 2011 by the pyfire Team, see AUTHORS for more details. :license: BSD, see LICENSE for more details. """ import xml.etree.ElementTree as ET class XMPPProtocolError(Exception): """Base class for all errors that can be sent via XMPP Protocol to peer """ def __init__(self, error_element, error_namespace, error_name=None): self.error_name = error_name self.element = ET.Element(error_element) self.element.set("xmlns", error_namespace) # per default all errors are recoverable self.unrecoverable = False def __unicode__(self): if self.error_name is not None: self.element.append(ET.Element(self.error_name)) return unicode(ET.tostring(self.element))
[((22, 23, 22, 48), 'xml.etree.ElementTree.Element', 'ET.Element', ({(22, 34, 22, 47): 'error_element'}, {}), '(error_element)', True, 'import xml.etree.ElementTree as ET\n'), ((30, 23, 30, 48), 'xml.etree.ElementTree.tostring', 'ET.tostring', ({(30, 35, 30, 47): 'self.element'}, {}), '(self.element)', True, 'import xml.etree.ElementTree as ET\n'), ((29, 32, 29, 59), 'xml.etree.ElementTree.Element', 'ET.Element', ({(29, 43, 29, 58): 'self.error_name'}, {}), '(self.error_name)', True, 'import xml.etree.ElementTree as ET\n')]
thekitbag/starter-snake-python
app/nextMoveLogic.py
48d12d2fa61ecfc976cd5750316b1db49a641f7f
import random class Status(object): def getHeadPosition(gamedata): me = gamedata['you'] my_position = me['body'] head = my_position[0] return head def getMyLength(gamedata): me = gamedata['you'] my_position = me['body'] if my_position[0] == my_position[1] == my_position[2]: return 1 elif my_position[1] == my_position[2]: return 2 else: return len(my_position) def getMyDirection(gamedata): me = gamedata['you'] my_position = me['body'] if Status.getMyLength(gamedata) == 1: return 'none' elif my_position[0]['x'] > my_position[1]['x']: return 'right' elif my_position[0]['x'] < my_position[1]['x']: return 'left' elif my_position[0]['x'] == my_position[1]['x'] and my_position[0]['y'] < my_position[1]['y']: return 'up' else: return 'down' def getHealth(gamedata): pass def getBoardSize(gamedata): board_height = gamedata['board']['height'] board_width = gamedata['board']['width'] dimensions = {'height': board_height, 'width': board_width} return dimensions def getFoodPositions(gamedata): pass def getSnakesPositions(gamedata): pass class Assess(object): def wallProximity(gamedata): """returns proximity to a wall either parallel to, head-on or corner""" head = Status.getHeadPosition(gamedata) board_size = Status.getBoardSize(gamedata) direction = Status.getMyDirection(gamedata) height = board_size['height'] - 1 width = board_size['width'] - 1 #corners if head['x'] == 0 and head['y'] == 0: return {'type': 'corner', 'identifier': 'top left', 'direction': direction} elif head['x'] == 0 and head['y'] == height: return {'type': 'corner', 'identifier': 'bottom left', 'direction': direction} elif head['x'] == width and head['y'] == 0: return {'type': 'corner', 'identifier': 'top right', 'direction': direction} elif head['x'] == width and head['y'] == height: return {'type': 'corner', 'identifier': 'bottom right', 'direction': direction} #headons elif head['x'] == 0 and direction == 'left': return {'type': 'head-on', 'identifier': 'left', 'direction': direction} elif head['y'] == 0 and direction == 'up': return {'type': 'head-on', 'identifier': 'top', 'direction': direction} elif head['x'] == width and direction == 'right': return {'type': 'head-on', 'identifier': 'right', 'direction': direction} elif head['y'] == height and direction == 'down': return {'type': 'head-on', 'identifier': 'bottom', 'direction': direction} #parrallels elif head['x'] == 0 and direction == 'up' or head['x'] == 0 and direction == 'down': return {'type': 'parallel', 'identifier': 'left', 'direction': direction} elif head['y'] == 0 and direction == 'right' or head['y'] == 0 and direction =='left': return {'type': 'parallel', 'identifier': 'top', 'direction': direction} elif head['x'] == width and direction =='down' or head['x'] == width and direction == 'up': return {'type': 'parallel', 'identifier': 'right', 'direction': direction} elif head['y'] == height and direction == 'left' or head['y'] == height and direction == 'right': return {'type': 'parallel', 'identifier': 'bottom', 'direction': direction} else: return False def ownBodyProximity(gamedata): pass def killPossible(gamedata): pass def smallerSnakeNearby(gamedata): pass def biggerSnakeNearby(gamedata): pass def foodNearby(gamedata): pass class Action(object): def avoidDeath(): pass def chaseFood(): pass def fleeSnake(): pass def chaseSnake(): pass class Decision(object): def chooseBestOption(gamedata): options = ['up', 'down', 'right', 'left'] current_direction = Status.getMyDirection(gamedata) #first go if current_direction == 'none': choice = random.choice(options) #remove opposite direction if current_direction == 'up': options.remove('down') if current_direction == 'down': options.remove('up') if current_direction == 'right': options.remove('left') if current_direction == 'left': options.remove('right') #no danger keep going if Assess.wallProximity(gamedata) == False: choice = current_direction #in a corner elif Assess.wallProximity(gamedata)['type'] == 'corner': options.remove(current_direction) if Assess.wallProximity(gamedata)['identifier'][0] == 't' and Assess.wallProximity(gamedata)['identifier'][4] == 'l': if 'up' in options: choice = 'down' else: choice = 'right' elif Assess.wallProximity(gamedata)['identifier'][0] == 't' and Assess.wallProximity(gamedata)['identifier'][4] == 'r': if 'up' in options: choice = 'down' else: choice = 'left' #headon elif Assess.wallProximity(gamedata)['type'] == 'head-on': options.remove(current_direction) choice = random.choice(options) #parallel elif Assess.wallProximity(gamedata)['type'] == 'parallel': choice = current_direction else: print("shit") print(options) return choice
[((128, 12, 128, 34), 'random.choice', 'random.choice', ({(128, 26, 128, 33): 'options'}, {}), '(options)', False, 'import random\n'), ((156, 12, 156, 34), 'random.choice', 'random.choice', ({(156, 26, 156, 33): 'options'}, {}), '(options)', False, 'import random\n')]
gbtami/lichess-puzzler
generator/util.py
e7338b35f592481141acefe39c7aaa444b26aa9e
from dataclasses import dataclass import math import chess import chess.engine from model import EngineMove, NextMovePair from chess import Color, Board from chess.pgn import GameNode from chess.engine import SimpleEngine, Score nps = [] def material_count(board: Board, side: Color) -> int: values = { chess.PAWN: 1, chess.KNIGHT: 3, chess.BISHOP: 3, chess.ROOK: 5, chess.QUEEN: 9 } return sum(len(board.pieces(piece_type, side)) * value for piece_type, value in values.items()) def material_diff(board: Board, side: Color) -> int: return material_count(board, side) - material_count(board, not side) def is_up_in_material(board: Board, side: Color) -> bool: return material_diff(board, side) > 0 def get_next_move_pair(engine: SimpleEngine, node: GameNode, winner: Color, limit: chess.engine.Limit) -> NextMovePair: info = engine.analyse(node.board(), multipv = 2, limit = limit) global nps nps.append(info[0]["nps"]) nps = nps[-20:] # print(info) best = EngineMove(info[0]["pv"][0], info[0]["score"].pov(winner)) second = EngineMove(info[1]["pv"][0], info[1]["score"].pov(winner)) if len(info) > 1 else None return NextMovePair(node, winner, best, second) def avg_knps(): global nps return round(sum(nps) / len(nps) / 1000) if nps else 0 def win_chances(score: Score) -> float: """ winning chances from -1 to 1 https://graphsketch.com/?eqn1_color=1&eqn1_eqn=100+*+%282+%2F+%281+%2B+exp%28-0.004+*+x%29%29+-+1%29&eqn2_color=2&eqn2_eqn=&eqn3_color=3&eqn3_eqn=&eqn4_color=4&eqn4_eqn=&eqn5_color=5&eqn5_eqn=&eqn6_color=6&eqn6_eqn=&x_min=-1000&x_max=1000&y_min=-100&y_max=100&x_tick=100&y_tick=10&x_label_freq=2&y_label_freq=2&do_grid=0&do_grid=1&bold_labeled_lines=0&bold_labeled_lines=1&line_width=4&image_w=850&image_h=525 """ mate = score.mate() if mate is not None: return 1 if mate > 0 else -1 cp = score.score() return 2 / (1 + math.exp(-0.004 * cp)) - 1 if cp is not None else 0 CORRESP_TIME = 999999 def reject_by_time_control(line: str, has_master: bool, master_only: bool, bullet: bool, mates: bool) -> bool: if not line.startswith("[TimeControl "): return False if master_only and not has_master: return True try: seconds, increment = line[1:][:-2].split()[1].replace("\"", "").split("+") total = int(seconds) + int(increment) * 40 if master_only or mates: if bullet: return total < 30 or total >= 160 else: return total < 160 or total >= 480 else: return total < (160 if has_master else 480) except: return True def exclude_rating(line: str, mates: bool) -> bool: if not line.startswith("[WhiteElo ") and not line.startswith("[BlackElo "): return False try: return int(line[11:15]) < (1501 if mates else 1600) except: return True
[((31, 11, 31, 51), 'model.NextMovePair', 'NextMovePair', ({(31, 24, 31, 28): 'node', (31, 30, 31, 36): 'winner', (31, 38, 31, 42): 'best', (31, 44, 31, 50): 'second'}, {}), '(node, winner, best, second)', False, 'from model import EngineMove, NextMovePair\n'), ((46, 20, 46, 41), 'math.exp', 'math.exp', ({(46, 29, 46, 40): '(-0.004 * cp)'}, {}), '(-0.004 * cp)', False, 'import math\n')]
SkylerHoward/O
sleep.py
989246a5cdc297ab9f76cb6b26daebd799a03741
import time, morning from datetime import datetime def main(): while True: a = time.mktime(datetime.now().timetuple()) n = datetime.now() if n.hour == 6 and (n.minute-(n.minute%5)) == 15: return morning.main() time.sleep(300 - (time.mktime(datetime.now().timetuple())-a))
[((7, 6, 7, 20), 'datetime.datetime.now', 'datetime.now', ({}, {}), '()', False, 'from datetime import datetime\n'), ((9, 10, 9, 24), 'morning.main', 'morning.main', ({}, {}), '()', False, 'import time, morning\n'), ((6, 18, 6, 32), 'datetime.datetime.now', 'datetime.now', ({}, {}), '()', False, 'from datetime import datetime\n'), ((10, 32, 10, 46), 'datetime.datetime.now', 'datetime.now', ({}, {}), '()', False, 'from datetime import datetime\n')]
ourobouros/aws-sam-cli
tests/unit/commands/local/start_lambda/test_cli.py
3fba861f5106d604fde6d023923a9b83377a35d9
from unittest import TestCase from mock import patch, Mock from samcli.commands.local.start_lambda.cli import do_cli as start_lambda_cli from samcli.commands.local.cli_common.user_exceptions import UserException from samcli.commands.validate.lib.exceptions import InvalidSamDocumentException from samcli.commands.local.lib.exceptions import OverridesNotWellDefinedError class TestCli(TestCase): def setUp(self): self.template = "template" self.env_vars = "env-vars" self.debug_port = 123 self.debug_args = "args" self.debugger_path = "/test/path" self.docker_volume_basedir = "basedir" self.docker_network = "network" self.log_file = "logfile" self.skip_pull_image = True self.profile = "profile" self.region = "region" self.parameter_overrides = {} self.host = "host" self.port = 123 @patch("samcli.commands.local.start_lambda.cli.InvokeContext") @patch("samcli.commands.local.start_lambda.cli.LocalLambdaService") def test_cli_must_setup_context_and_start_service(self, local_lambda_service_mock, invoke_context_mock): # Mock the __enter__ method to return a object inside a context manager context_mock = Mock() invoke_context_mock.return_value.__enter__.return_value = context_mock service_mock = Mock() local_lambda_service_mock.return_value = service_mock self.call_cli() invoke_context_mock.assert_called_with(template_file=self.template, function_identifier=None, env_vars_file=self.env_vars, docker_volume_basedir=self.docker_volume_basedir, docker_network=self.docker_network, log_file=self.log_file, skip_pull_image=self.skip_pull_image, aws_profile=self.profile, debug_port=self.debug_port, debug_args=self.debug_args, debugger_path=self.debugger_path, aws_region=self.region, parameter_overrides=self.parameter_overrides) local_lambda_service_mock.assert_called_with(lambda_invoke_context=context_mock, port=self.port, host=self.host) service_mock.start.assert_called_with() @patch("samcli.commands.local.start_lambda.cli.InvokeContext") def test_must_raise_user_exception_on_invalid_sam_template(self, invoke_context_mock): invoke_context_mock.side_effect = InvalidSamDocumentException("bad template") with self.assertRaises(UserException) as context: self.call_cli() msg = str(context.exception) expected = "bad template" self.assertEquals(msg, expected) @patch("samcli.commands.local.start_lambda.cli.InvokeContext") def test_must_raise_user_exception_on_invalid_env_vars(self, invoke_context_mock): invoke_context_mock.side_effect = OverridesNotWellDefinedError("bad env vars") with self.assertRaises(UserException) as context: self.call_cli() msg = str(context.exception) expected = "bad env vars" self.assertEquals(msg, expected) def call_cli(self): start_lambda_cli(ctx=None, host=self.host, port=self.port, template=self.template, env_vars=self.env_vars, debug_port=self.debug_port, debug_args=self.debug_args, debugger_path=self.debugger_path, docker_volume_basedir=self.docker_volume_basedir, docker_network=self.docker_network, log_file=self.log_file, skip_pull_image=self.skip_pull_image, profile=self.profile, region=self.region, parameter_overrides=self.parameter_overrides)
[((29, 5, 29, 66), 'mock.patch', 'patch', ({(29, 11, 29, 65): '"""samcli.commands.local.start_lambda.cli.InvokeContext"""'}, {}), "('samcli.commands.local.start_lambda.cli.InvokeContext')", False, 'from mock import patch, Mock\n'), ((30, 5, 30, 71), 'mock.patch', 'patch', ({(30, 11, 30, 70): '"""samcli.commands.local.start_lambda.cli.LocalLambdaService"""'}, {}), "('samcli.commands.local.start_lambda.cli.LocalLambdaService')", False, 'from mock import patch, Mock\n'), ((62, 5, 62, 66), 'mock.patch', 'patch', ({(62, 11, 62, 65): '"""samcli.commands.local.start_lambda.cli.InvokeContext"""'}, {}), "('samcli.commands.local.start_lambda.cli.InvokeContext')", False, 'from mock import patch, Mock\n'), ((73, 5, 73, 66), 'mock.patch', 'patch', ({(73, 11, 73, 65): '"""samcli.commands.local.start_lambda.cli.InvokeContext"""'}, {}), "('samcli.commands.local.start_lambda.cli.InvokeContext')", False, 'from mock import patch, Mock\n'), ((34, 23, 34, 29), 'mock.Mock', 'Mock', ({}, {}), '()', False, 'from mock import patch, Mock\n'), ((37, 23, 37, 29), 'mock.Mock', 'Mock', ({}, {}), '()', False, 'from mock import patch, Mock\n'), ((64, 42, 64, 85), 'samcli.commands.validate.lib.exceptions.InvalidSamDocumentException', 'InvalidSamDocumentException', ({(64, 70, 64, 84): '"""bad template"""'}, {}), "('bad template')", False, 'from samcli.commands.validate.lib.exceptions import InvalidSamDocumentException\n'), ((75, 42, 75, 86), 'samcli.commands.local.lib.exceptions.OverridesNotWellDefinedError', 'OverridesNotWellDefinedError', ({(75, 71, 75, 85): '"""bad env vars"""'}, {}), "('bad env vars')", False, 'from samcli.commands.local.lib.exceptions import OverridesNotWellDefinedError\n'), ((85, 8, 99, 70), 'samcli.commands.local.start_lambda.cli.do_cli', 'start_lambda_cli', (), '', True, 'from samcli.commands.local.start_lambda.cli import do_cli as start_lambda_cli\n')]
Varun487/CapstoneProject_TradingSystem
restapi/services/Utils/test_getters.py
b21e3f2c6c5e75596927666bf65294a2014babcf
from django.test import TestCase import pandas as pd from .getters import Getter from .converter import Converter from strategies.models import Company from strategies.models import IndicatorType class GetDataTestCase(TestCase): def setUp(self) -> None: # Dummy company data Company.objects.create(name='abc', ticker='ABC', description='desc') Company.objects.create(name='abcd', ticker='ABCD', description='desc') Company.objects.create(name='abce', ticker='ABCE', description='desc') # Dummy indicator data IndicatorType.objects.create(name='abc', description='desc') IndicatorType.objects.create(name='abcd', description='desc') IndicatorType.objects.create(name='abce', description='desc') self.param_list_company = {"name": "abc", "ticker": 'ABC', "description": 'desc'} self.param_list_indicator_type = {"name": "abc", "description": 'desc'} def test_input_none(self): """No inputs are given""" self.assertEquals(Getter().table_name, None) self.assertEquals(Getter().param_list, None) self.assertEquals(Getter().df_flag, False) def test_input_all(self): """All inputs provided""" self.assertEquals(Getter(table_name=Company, df_flag=True, param_list=self.param_list_company).table_name, Company) self.assertEquals(Getter(table_name=Company, df_flag=True, param_list=self.param_list_company).param_list, self.param_list_company) self.assertEquals(Getter(table_name=Company, df_flag=True, param_list=self.param_list_company).df_flag, True) def test_input_df_flag(self): """Only df_flag input is provided""" self.assertEquals(Getter(df_flag=True).df_flag, True) self.assertEquals(Getter(df_flag=False).df_flag, False) def test_get_data_correct_obj_list(self): """Whether it returns correct obj list when input is correct""" # Returns correct object list for company self.assertEquals(Getter(table_name=Company, df_flag=False, param_list=self.param_list_company).get_data(), list(Company.objects.filter(**self.param_list_company))) self.assertEquals(Getter(table_name=Company, param_list={"description": 'desc'}).get_data(), list(Company.objects.filter(**{"description": 'desc'}))) self.assertEquals(Getter(table_name=Company, param_list={"name": "abcd"}).get_data(), list(Company.objects.filter(**{"name": "abcd"}))) # Returns correct object list for Indicator self.assertEquals( Getter(table_name=IndicatorType, df_flag=False, param_list=self.param_list_indicator_type).get_data(), list(IndicatorType.objects.filter(**self.param_list_indicator_type))) self.assertEquals(Getter(table_name=IndicatorType, param_list={"description": 'desc'}).get_data(), list(IndicatorType.objects.filter(**{"description": 'desc'}))) self.assertEquals(Getter(table_name=IndicatorType, param_list={"name": "abcd"}).get_data(), list(IndicatorType.objects.filter(**{"name": "abcd"}))) def test_get_data_correct_df(self): """Whether it returns correct df when input is correct""" # Returns correct df for company self.assertEquals(Getter(table_name=Company, df_flag=True, param_list=self.param_list_company).get_data() .equals( Converter(obj_list=list(Company.objects.filter(**self.param_list_company))).to_df() ), True) self.assertEquals(Getter(table_name=Company, df_flag=True, param_list={"description": 'desc'}).get_data() .equals( Converter(obj_list=list(Company.objects.filter(**{"description": 'desc'}))).to_df() ), True) self.assertEquals(Getter(table_name=Company, df_flag=True, param_list={"name": "abcd"}).get_data() .equals( Converter(obj_list=list(Company.objects.filter(**{"name": "abcd"}))).to_df() ), True) # Returns correct df for indicator type self.assertEquals( Getter(table_name=IndicatorType, df_flag=True, param_list=self.param_list_indicator_type).get_data() .equals( Converter(obj_list=list(IndicatorType.objects.filter(**self.param_list_indicator_type))).to_df() ), True) self.assertEquals(Getter(table_name=IndicatorType, df_flag=True, param_list={"description": 'desc'}).get_data() .equals( Converter(obj_list=list(IndicatorType.objects.filter(**{"description": 'desc'}))).to_df() ), True) self.assertEquals(Getter(table_name=IndicatorType, df_flag=True, param_list={"name": "abcd"}).get_data() .equals( Converter(obj_list=list(IndicatorType.objects.filter(**{"name": "abcd"}))).to_df() ), True) def test_get_data_invalid_inputs(self): self.assertRaises(TypeError, Getter(table_name="IndicatorTyp", df_flag=True, param_list={"name": "abcd"}).get_data) self.assertRaises(TypeError, Getter(table_name=IndicatorType, param_list={"nam": "abcd"}).get_data) self.assertRaises(TypeError, Getter(table_name=Company, param_list={"name": "abcd", "res": "abcd"}))
[((16, 8, 16, 76), 'strategies.models.Company.objects.create', 'Company.objects.create', (), '', False, 'from strategies.models import Company\n'), ((17, 8, 17, 78), 'strategies.models.Company.objects.create', 'Company.objects.create', (), '', False, 'from strategies.models import Company\n'), ((18, 8, 18, 78), 'strategies.models.Company.objects.create', 'Company.objects.create', (), '', False, 'from strategies.models import Company\n'), ((21, 8, 21, 68), 'strategies.models.IndicatorType.objects.create', 'IndicatorType.objects.create', (), '', False, 'from strategies.models import IndicatorType\n'), ((22, 8, 22, 69), 'strategies.models.IndicatorType.objects.create', 'IndicatorType.objects.create', (), '', False, 'from strategies.models import IndicatorType\n'), ((23, 8, 23, 69), 'strategies.models.IndicatorType.objects.create', 'IndicatorType.objects.create', (), '', False, 'from strategies.models import IndicatorType\n'), ((51, 31, 51, 80), 'strategies.models.Company.objects.filter', 'Company.objects.filter', ({}, {}), '(**self.param_list_company)', False, 'from strategies.models import Company\n'), ((53, 31, 53, 80), 'strategies.models.Company.objects.filter', 'Company.objects.filter', ({}, {}), "(**{'description': 'desc'})", False, 'from strategies.models import Company\n'), ((55, 31, 55, 73), 'strategies.models.Company.objects.filter', 'Company.objects.filter', ({}, {}), "(**{'name': 'abcd'})", False, 'from strategies.models import Company\n'), ((60, 17, 60, 79), 'strategies.models.IndicatorType.objects.filter', 'IndicatorType.objects.filter', ({}, {}), '(**self.param_list_indicator_type)', False, 'from strategies.models import IndicatorType\n'), ((62, 31, 62, 86), 'strategies.models.IndicatorType.objects.filter', 'IndicatorType.objects.filter', ({}, {}), "(**{'description': 'desc'})", False, 'from strategies.models import IndicatorType\n'), ((64, 31, 64, 79), 'strategies.models.IndicatorType.objects.filter', 'IndicatorType.objects.filter', ({}, {}), "(**{'name': 'abcd'})", False, 'from strategies.models import IndicatorType\n'), ((71, 36, 71, 85), 'strategies.models.Company.objects.filter', 'Company.objects.filter', ({}, {}), '(**self.param_list_company)', False, 'from strategies.models import Company\n'), ((75, 36, 75, 85), 'strategies.models.Company.objects.filter', 'Company.objects.filter', ({}, {}), "(**{'description': 'desc'})", False, 'from strategies.models import Company\n'), ((79, 36, 79, 78), 'strategies.models.Company.objects.filter', 'Company.objects.filter', ({}, {}), "(**{'name': 'abcd'})", False, 'from strategies.models import Company\n'), ((86, 40, 86, 102), 'strategies.models.IndicatorType.objects.filter', 'IndicatorType.objects.filter', ({}, {}), '(**self.param_list_indicator_type)', False, 'from strategies.models import IndicatorType\n'), ((90, 36, 90, 91), 'strategies.models.IndicatorType.objects.filter', 'IndicatorType.objects.filter', ({}, {}), "(**{'description': 'desc'})", False, 'from strategies.models import IndicatorType\n'), ((94, 36, 94, 84), 'strategies.models.IndicatorType.objects.filter', 'IndicatorType.objects.filter', ({}, {}), "(**{'name': 'abcd'})", False, 'from strategies.models import IndicatorType\n')]
abhatikar/training_extensions
pytorch_toolkit/ote/ote/modules/trainers/mmdetection.py
1c96e0f5f39688f8b79735e8dfa90646afc3d5e6
""" Copyright (c) 2020 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import json import logging import subprocess import tempfile from ote import MMDETECTION_TOOLS from .base import BaseTrainer from ..registry import TRAINERS @TRAINERS.register_module() class MMDetectionTrainer(BaseTrainer): def __init__(self): super(MMDetectionTrainer, self).__init__() def _get_tools_dir(self): return MMDETECTION_TOOLS def _add_extra_args(self, cfg, config_path, update_config): if self.__is_clustering_needed(cfg): update_config = self.__cluster(cfg, config_path, update_config) return update_config @staticmethod def __is_clustering_needed(cfg): if cfg.total_epochs > 0: return False if not hasattr(cfg.model, 'bbox_head') or not cfg.model.bbox_head.type == 'SSDHead': return False if not cfg.model.bbox_head.anchor_generator.type == 'SSDAnchorGeneratorClustered': return False return True @staticmethod def __cluster(cfg, config_path, update_config): logging.info('Clustering started...') widths = cfg.model.bbox_head.anchor_generator.widths n_clust = 0 for w in widths: n_clust += len(w) if isinstance(w, (list, tuple)) else 1 n_clust = ' --n_clust ' + str(n_clust) group_as = '' if isinstance(widths[0], (list, tuple)): group_as = ' --group_as ' + ' '.join([str(len(w)) for w in widths]) config = ' --config ' + config_path tmp_file = tempfile.NamedTemporaryFile(delete=False) out = f' --out {tmp_file.name}' if 'pipeline' in cfg.data.train: img_shape = [t for t in cfg.data.train.pipeline if t['type'] == 'Resize'][0][ 'img_scale'] else: img_shape = [t for t in cfg.data.train.dataset.pipeline if t['type'] == 'Resize'][0][ 'img_scale'] img_shape = f' --image_size_wh {img_shape[0]} {img_shape[1]}' subprocess.run(f'python {MMDETECTION_TOOLS}/cluster_boxes.py' f'{config}' f'{n_clust}' f'{group_as}' f'{update_config}' f'{img_shape}' f'{out}'.split(' '), check=True) with open(tmp_file.name) as src_file: content = json.load(src_file) widths, heights = content['widths'], content['heights'] if not update_config: update_config = ' --update_config' update_config += f' model.bbox_head.anchor_generator.widths={str(widths).replace(" ", "")}' update_config += f' model.bbox_head.anchor_generator.heights={str(heights).replace(" ", "")}' logging.info('... clustering completed.') return update_config
[((54, 8, 54, 45), 'logging.info', 'logging.info', ({(54, 21, 54, 44): '"""Clustering started..."""'}, {}), "('Clustering started...')", False, 'import logging\n'), ((67, 19, 67, 60), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', (), '', False, 'import tempfile\n'), ((95, 8, 95, 49), 'logging.info', 'logging.info', ({(95, 21, 95, 48): '"""... clustering completed."""'}, {}), "('... clustering completed.')", False, 'import logging\n'), ((88, 22, 88, 41), 'json.load', 'json.load', ({(88, 32, 88, 40): 'src_file'}, {}), '(src_file)', False, 'import json\n')]
BT-OpenSource/bt-betalab
svn-go-stats/transform.py
af5a1b0d778c1746312149f62da0c4159f387293
import sys import json import subprocess import re import statistics def get_complexity(): # Load the cyclomatic complexity info cyclostats = subprocess.check_output(['./gocyclo', 'repo']).decode("utf-8") results = re.findall('([0-9]+)\s([^\s]+)\s([^\s]+)\s([^:]+):([0-9]+):([0-9]+)', cyclostats) # Setup a dictionary in which to keep track of the complixities # for each file files = {} # Build an array of complexities for each file for result in results: if result[3] in files: files[result[3]].append(int(result[0])) else: files[result[3]] = [int(result[0])] # Pick out the median value (picking the highest of the two # middle entries if needed) for each file for name, values in files.items(): files[name] = statistics.median_high(values) return files def get_duplicate_const_strings(): # Load the const string duplication info cyclostats = subprocess.check_output(['./goconst', './repo/...']).decode("utf-8") results = re.findall('([^:]+).+ other occurrence\(s\) of \"(.+)\" found in: ([^:]+).+\n?', cyclostats) files = {} # Build an array containing the number of potentially duplicated # constants by file for result in results: if result[0] in files: files[result[0]] = files[result[0]]+1 else: files[result[0]] = 1 return files # Main service body if __name__ == "__main__": complexity = get_complexity() duplicate_const_strings = get_duplicate_const_strings() files = set() files.update(complexity.keys()) files.update(duplicate_const_strings.keys()) result = [] for f in files: result.append({ 'filename': f, 'cyclomaticComplexity': complexity[f] if f in complexity else 0, 'duplicateConstStrings': duplicate_const_strings[f] if f in duplicate_const_strings else 0 }) print(json.dumps(result))
[((11, 14, 11, 95), 're.findall', 're.findall', ({(11, 25, 11, 82): '"""([0-9]+)\\\\s([^\\\\s]+)\\\\s([^\\\\s]+)\\\\s([^:]+):([0-9]+):([0-9]+)"""', (11, 84, 11, 94): 'cyclostats'}, {}), "('([0-9]+)\\\\s([^\\\\s]+)\\\\s([^\\\\s]+)\\\\s([^:]+):([0-9]+):([0-9]+)',\n cyclostats)", False, 'import re\n'), ((35, 14, 35, 106), 're.findall', 're.findall', ({(35, 25, 35, 93): '"""([^:]+).+ other occurrence\\\\(s\\\\) of "(.+)" found in: ([^:]+).+\n?"""', (35, 95, 35, 105): 'cyclostats'}, {}), '(\n """([^:]+).+ other occurrence\\\\(s\\\\) of "(.+)" found in: ([^:]+).+\n?""",\n cyclostats)', False, 'import re\n'), ((27, 22, 27, 52), 'statistics.median_high', 'statistics.median_high', ({(27, 45, 27, 51): 'values'}, {}), '(values)', False, 'import statistics\n'), ((68, 10, 68, 28), 'json.dumps', 'json.dumps', ({(68, 21, 68, 27): 'result'}, {}), '(result)', False, 'import json\n'), ((10, 17, 10, 63), 'subprocess.check_output', 'subprocess.check_output', ({(10, 41, 10, 62): "['./gocyclo', 'repo']"}, {}), "(['./gocyclo', 'repo'])", False, 'import subprocess\n'), ((34, 17, 34, 69), 'subprocess.check_output', 'subprocess.check_output', ({(34, 41, 34, 68): "['./goconst', './repo/...']"}, {}), "(['./goconst', './repo/...'])", False, 'import subprocess\n')]
yangyangxcf/parso
test/test_python_errors.py
e496b07b6342f6182225a60aad6031d7ad08f24d
""" Testing if parso finds syntax errors and indentation errors. """ import sys import warnings import pytest import parso from parso._compatibility import is_pypy from .failing_examples import FAILING_EXAMPLES, indent, build_nested if is_pypy: # The errors in PyPy might be different. Just skip the module for now. pytestmark = pytest.mark.skip() def _get_error_list(code, version=None): grammar = parso.load_grammar(version=version) tree = grammar.parse(code) return list(grammar.iter_errors(tree)) def assert_comparison(code, error_code, positions): errors = [(error.start_pos, error.code) for error in _get_error_list(code)] assert [(pos, error_code) for pos in positions] == errors @pytest.mark.parametrize('code', FAILING_EXAMPLES) def test_python_exception_matches(code): wanted, line_nr = _get_actual_exception(code) errors = _get_error_list(code) actual = None if errors: error, = errors actual = error.message assert actual in wanted # Somehow in Python3.3 the SyntaxError().lineno is sometimes None assert line_nr is None or line_nr == error.start_pos[0] def test_non_async_in_async(): """ This example doesn't work with FAILING_EXAMPLES, because the line numbers are not always the same / incorrect in Python 3.8. """ if sys.version_info[:2] < (3, 5): pytest.skip() # Raises multiple errors in previous versions. code = 'async def foo():\n def nofoo():[x async for x in []]' wanted, line_nr = _get_actual_exception(code) errors = _get_error_list(code) if errors: error, = errors actual = error.message assert actual in wanted if sys.version_info[:2] < (3, 8): assert line_nr == error.start_pos[0] else: assert line_nr == 0 # For whatever reason this is zero in Python 3.8+ @pytest.mark.parametrize( ('code', 'positions'), [ ('1 +', [(1, 3)]), ('1 +\n', [(1, 3)]), ('1 +\n2 +', [(1, 3), (2, 3)]), ('x + 2', []), ('[\n', [(2, 0)]), ('[\ndef x(): pass', [(2, 0)]), ('[\nif 1: pass', [(2, 0)]), ('1+?', [(1, 2)]), ('?', [(1, 0)]), ('??', [(1, 0)]), ('? ?', [(1, 0)]), ('?\n?', [(1, 0), (2, 0)]), ('? * ?', [(1, 0)]), ('1 + * * 2', [(1, 4)]), ('?\n1\n?', [(1, 0), (3, 0)]), ] ) def test_syntax_errors(code, positions): assert_comparison(code, 901, positions) @pytest.mark.parametrize( ('code', 'positions'), [ (' 1', [(1, 0)]), ('def x():\n 1\n 2', [(3, 0)]), ('def x():\n 1\n 2', [(3, 0)]), ('def x():\n1', [(2, 0)]), ] ) def test_indentation_errors(code, positions): assert_comparison(code, 903, positions) def _get_actual_exception(code): with warnings.catch_warnings(): # We don't care about warnings where locals/globals misbehave here. # It's as simple as either an error or not. warnings.filterwarnings('ignore', category=SyntaxWarning) try: compile(code, '<unknown>', 'exec') except (SyntaxError, IndentationError) as e: wanted = e.__class__.__name__ + ': ' + e.msg line_nr = e.lineno except ValueError as e: # The ValueError comes from byte literals in Python 2 like '\x' # that are oddly enough not SyntaxErrors. wanted = 'SyntaxError: (value error) ' + str(e) line_nr = None else: assert False, "The piece of code should raise an exception." # SyntaxError # Python 2.6 has a bit different error messages here, so skip it. if sys.version_info[:2] == (2, 6) and wanted == 'SyntaxError: unexpected EOF while parsing': wanted = 'SyntaxError: invalid syntax' if wanted == 'SyntaxError: non-keyword arg after keyword arg': # The python 3.5+ way, a bit nicer. wanted = 'SyntaxError: positional argument follows keyword argument' elif wanted == 'SyntaxError: assignment to keyword': return [wanted, "SyntaxError: can't assign to keyword", 'SyntaxError: cannot assign to __debug__'], line_nr elif wanted == 'SyntaxError: assignment to None': # Python 2.6 does has a slightly different error. wanted = 'SyntaxError: cannot assign to None' elif wanted == 'SyntaxError: can not assign to __debug__': # Python 2.6 does has a slightly different error. wanted = 'SyntaxError: cannot assign to __debug__' elif wanted == 'SyntaxError: can use starred expression only as assignment target': # Python 3.4/3.4 have a bit of a different warning than 3.5/3.6 in # certain places. But in others this error makes sense. return [wanted, "SyntaxError: can't use starred expression here"], line_nr elif wanted == 'SyntaxError: f-string: unterminated string': wanted = 'SyntaxError: EOL while scanning string literal' elif wanted == 'SyntaxError: f-string expression part cannot include a backslash': return [ wanted, "SyntaxError: EOL while scanning string literal", "SyntaxError: unexpected character after line continuation character", ], line_nr elif wanted == "SyntaxError: f-string: expecting '}'": wanted = 'SyntaxError: EOL while scanning string literal' elif wanted == 'SyntaxError: f-string: empty expression not allowed': wanted = 'SyntaxError: invalid syntax' elif wanted == "SyntaxError: f-string expression part cannot include '#'": wanted = 'SyntaxError: invalid syntax' elif wanted == "SyntaxError: f-string: single '}' is not allowed": wanted = 'SyntaxError: invalid syntax' return [wanted], line_nr def test_default_except_error_postition(): # For this error the position seemed to be one line off, but that doesn't # really matter. code = 'try: pass\nexcept: pass\nexcept X: pass' wanted, line_nr = _get_actual_exception(code) error, = _get_error_list(code) assert error.message in wanted assert line_nr != error.start_pos[0] # I think this is the better position. assert error.start_pos[0] == 2 def test_statically_nested_blocks(): def build(code, depth): if depth == 0: return code new_code = 'if 1:\n' + indent(code) return build(new_code, depth - 1) def get_error(depth, add_func=False): code = build('foo', depth) if add_func: code = 'def bar():\n' + indent(code) errors = _get_error_list(code) if errors: assert errors[0].message == 'SyntaxError: too many statically nested blocks' return errors[0] return None assert get_error(19) is None assert get_error(19, add_func=True) is None assert get_error(20) assert get_error(20, add_func=True) def test_future_import_first(): def is_issue(code, *args): code = code % args return bool(_get_error_list(code)) i1 = 'from __future__ import division' i2 = 'from __future__ import absolute_import' assert not is_issue(i1) assert not is_issue(i1 + ';' + i2) assert not is_issue(i1 + '\n' + i2) assert not is_issue('"";' + i1) assert not is_issue('"";' + i1) assert not is_issue('""\n' + i1) assert not is_issue('""\n%s\n%s', i1, i2) assert not is_issue('""\n%s;%s', i1, i2) assert not is_issue('"";%s;%s ', i1, i2) assert not is_issue('"";%s\n%s ', i1, i2) assert is_issue('1;' + i1) assert is_issue('1\n' + i1) assert is_issue('"";1\n' + i1) assert is_issue('""\n%s\nfrom x import a\n%s', i1, i2) assert is_issue('%s\n""\n%s', i1, i2) def test_named_argument_issues(works_not_in_py): message = works_not_in_py.get_error_message('def foo(*, **dict): pass') message = works_not_in_py.get_error_message('def foo(*): pass') if works_not_in_py.version.startswith('2'): assert message == 'SyntaxError: invalid syntax' else: assert message == 'SyntaxError: named arguments must follow bare *' works_not_in_py.assert_no_error_in_passing('def foo(*, name): pass') works_not_in_py.assert_no_error_in_passing('def foo(bar, *, name=1): pass') works_not_in_py.assert_no_error_in_passing('def foo(bar, *, name=1, **dct): pass') def test_escape_decode_literals(each_version): """ We are using internal functions to assure that unicode/bytes escaping is without syntax errors. Here we make a bit of quality assurance that this works through versions, because the internal function might change over time. """ def get_msg(end, to=1): base = "SyntaxError: (unicode error) 'unicodeescape' " \ "codec can't decode bytes in position 0-%s: " % to return base + end def get_msgs(escape): return (get_msg('end of string in escape sequence'), get_msg(r"truncated %s escape" % escape)) error, = _get_error_list(r'u"\x"', version=each_version) assert error.message in get_msgs(r'\xXX') error, = _get_error_list(r'u"\u"', version=each_version) assert error.message in get_msgs(r'\uXXXX') error, = _get_error_list(r'u"\U"', version=each_version) assert error.message in get_msgs(r'\UXXXXXXXX') error, = _get_error_list(r'u"\N{}"', version=each_version) assert error.message == get_msg(r'malformed \N character escape', to=2) error, = _get_error_list(r'u"\N{foo}"', version=each_version) assert error.message == get_msg(r'unknown Unicode character name', to=6) # Finally bytes. error, = _get_error_list(r'b"\x"', version=each_version) wanted = r'SyntaxError: (value error) invalid \x escape' if sys.version_info >= (3, 0): # The positioning information is only available in Python 3. wanted += ' at position 0' assert error.message == wanted def test_too_many_levels_of_indentation(): assert not _get_error_list(build_nested('pass', 99)) assert _get_error_list(build_nested('pass', 100)) base = 'def x():\n if x:\n' assert not _get_error_list(build_nested('pass', 49, base=base)) assert _get_error_list(build_nested('pass', 50, base=base)) @pytest.mark.parametrize( 'code', [ "f'{*args,}'", r'f"\""', r'f"\\\""', r'fr"\""', r'fr"\\\""', r"print(f'Some {x:.2f} and some {y}')", ] ) def test_valid_fstrings(code): assert not _get_error_list(code, version='3.6') @pytest.mark.parametrize( ('code', 'message'), [ ("f'{1+}'", ('invalid syntax')), (r'f"\"', ('invalid syntax')), (r'fr"\"', ('invalid syntax')), ] ) def test_invalid_fstrings(code, message): """ Some fstring errors are handled differntly in 3.6 and other versions. Therefore check specifically for these errors here. """ error, = _get_error_list(code, version='3.6') assert message in error.message @pytest.mark.parametrize( 'code', [ "from foo import (\nbar,\n rab,\n)", "from foo import (bar, rab, )", ] ) def test_trailing_comma(code): errors = _get_error_list(code) assert not errors
[((30, 1, 30, 50), 'pytest.mark.parametrize', 'pytest.mark.parametrize', ({(30, 25, 30, 31): '"""code"""', (30, 33, 30, 49): 'FAILING_EXAMPLES'}, {}), "('code', FAILING_EXAMPLES)", False, 'import pytest\n'), ((67, 1, 85, 1), 'pytest.mark.parametrize', 'pytest.mark.parametrize', ({(68, 4, 68, 25): "('code', 'positions')", (68, 27, 84, 5): "[('1 +', [(1, 3)]), ('1 +\\n', [(1, 3)]), ('1 +\\n2 +', [(1, 3), (2, 3)]), (\n 'x + 2', []), ('[\\n', [(2, 0)]), ('[\\ndef x(): pass', [(2, 0)]), (\n '[\\nif 1: pass', [(2, 0)]), ('1+?', [(1, 2)]), ('?', [(1, 0)]), ('??',\n [(1, 0)]), ('? ?', [(1, 0)]), ('?\\n?', [(1, 0), (2, 0)]), ('? * ?', [(1,\n 0)]), ('1 + * * 2', [(1, 4)]), ('?\\n1\\n?', [(1, 0), (3, 0)])]"}, {}), "(('code', 'positions'), [('1 +', [(1, 3)]), ('1 +\\n',\n [(1, 3)]), ('1 +\\n2 +', [(1, 3), (2, 3)]), ('x + 2', []), ('[\\n', [(2, \n 0)]), ('[\\ndef x(): pass', [(2, 0)]), ('[\\nif 1: pass', [(2, 0)]), (\n '1+?', [(1, 2)]), ('?', [(1, 0)]), ('??', [(1, 0)]), ('? ?', [(1, 0)]),\n ('?\\n?', [(1, 0), (2, 0)]), ('? * ?', [(1, 0)]), ('1 + * * 2', [(1, 4)]\n ), ('?\\n1\\n?', [(1, 0), (3, 0)])])", False, 'import pytest\n'), ((90, 1, 97, 1), 'pytest.mark.parametrize', 'pytest.mark.parametrize', ({(91, 4, 91, 25): "('code', 'positions')", (91, 27, 96, 5): '[(\' 1\', [(1, 0)]), ("""def x():\n 1\n 2""", [(3, 0)]), (\n \'def x():\\n 1\\n 2\', [(3, 0)]), (\'def x():\\n1\', [(2, 0)])]'}, {}), '((\'code\', \'positions\'), [(\' 1\', [(1, 0)]), (\n """def x():\n 1\n 2""", [(3, 0)]), (\'def x():\\n 1\\n 2\', [(3, 0)]), (\n \'def x():\\n1\', [(2, 0)])])', False, 'import pytest\n'), ((282, 1, 291, 1), 'pytest.mark.parametrize', 'pytest.mark.parametrize', ({(283, 4, 283, 10): '"""code"""', (283, 12, 290, 5): '["f\'{*args,}\'", \'f"\\\\""\', \'f"\\\\\\\\\\\\""\', \'fr"\\\\""\', \'fr"\\\\\\\\\\\\""\',\n "print(f\'Some {x:.2f} and some {y}\')"]'}, {}), '(\'code\', ["f\'{*args,}\'", \'f"\\\\""\', \'f"\\\\\\\\\\\\""\',\n \'fr"\\\\""\', \'fr"\\\\\\\\\\\\""\', "print(f\'Some {x:.2f} and some {y}\')"])', False, 'import pytest\n'), ((296, 1, 302, 1), 'pytest.mark.parametrize', 'pytest.mark.parametrize', ({(297, 4, 297, 23): "('code', 'message')", (297, 25, 301, 5): '[("f\'{1+}\'", \'invalid syntax\'), (\'f"\\\\"\', \'invalid syntax\'), (\'fr"\\\\"\',\n \'invalid syntax\')]'}, {}), '((\'code\', \'message\'), [("f\'{1+}\'", \'invalid syntax\'),\n (\'f"\\\\"\', \'invalid syntax\'), (\'fr"\\\\"\', \'invalid syntax\')])', False, 'import pytest\n'), ((312, 1, 317, 1), 'pytest.mark.parametrize', 'pytest.mark.parametrize', ({(313, 4, 313, 10): '"""code"""', (313, 12, 316, 5): '["""from foo import (\nbar,\n rab,\n)""", \'from foo import (bar, rab, )\']'}, {}), '(\'code\', ["""from foo import (\nbar,\n rab,\n)""",\n \'from foo import (bar, rab, )\'])', False, 'import pytest\n'), ((16, 17, 16, 35), 'pytest.mark.skip', 'pytest.mark.skip', ({}, {}), '()', False, 'import pytest\n'), ((20, 14, 20, 49), 'parso.load_grammar', 'parso.load_grammar', (), '', False, 'import parso\n'), ((50, 8, 50, 21), 'pytest.skip', 'pytest.skip', ({}, {}), '()', False, 'import pytest\n'), ((103, 9, 103, 34), 'warnings.catch_warnings', 'warnings.catch_warnings', ({}, {}), '()', False, 'import warnings\n'), ((106, 8, 106, 65), 'warnings.filterwarnings', 'warnings.filterwarnings', (), '', False, 'import warnings\n')]
koji-hirono/pytk-shogi-replayer
shogitk/usikif.py
a10819a797faecbee5c7b0654beb3694eb522840
# -*- coding: utf-8 -*- from __future__ import unicode_literals from shogitk.shogi import Coords, Move, BLACK, WHITE, DROP, PROMOTE RANKNUM = { 'a': 1, 'b': 2, 'c': 3, 'd': 4, 'e': 5, 'f': 6, 'g': 7, 'h': 8, 'i': 9 } def decoder(f): color = [BLACK, WHITE] step = 0 for line in f: line = line.strip() if line[0] == '[': pass elif line[0].isdigit(): src = Coords(int(line[0]), RANKNUM[line[1]]) dst = Coords(int(line[2]), RANKNUM[line[3]]) if line[-1] == '+': modifier = PROMOTE else: modifier = None yield Move(color[step & 1], dst, src, None, modifier=modifier) step += 1 elif line[0].isupper(): dst = Coords(int(line[2]), RANKNUM[line[3]]) yield Move(color[step & 1], dst, None, line[0], modifier=DROP) step += 1
[((32, 18, 32, 74), 'shogitk.shogi.Move', 'Move', (), '', False, 'from shogitk.shogi import Coords, Move, BLACK, WHITE, DROP, PROMOTE\n'), ((36, 18, 36, 74), 'shogitk.shogi.Move', 'Move', (), '', False, 'from shogitk.shogi import Coords, Move, BLACK, WHITE, DROP, PROMOTE\n')]
ideal-money/etherbank-cli
etherbank_cli/oracles.py
d957daa13aa951331cadc35c246c1ce8459ca8df
import click from . import utils @click.group() def main(): "Simple CLI for oracles to work with Ether dollar" pass @main.command() @click.option('--ether-price', type=float, help="The ether price in ether dollar") @click.option('--collateral-ratio', type=float, help="The collateral ratio") @click.option( '--liquidation-duration', type=int, help="The liquidation duration in minutes") @click.option( '--private-key', callback=utils.check_account, help='The privat key to sign the transaction') def vote(ether_price, collateral_ratio, liquidation_duration, private_key): "Vote on the variable for setting up Ether Bank" assert [ether_price, collateral_ratio, liquidation_duration ].count(None) == 2, "You should set one variable per vote" if ether_price: var_code = 0 value = int(ether_price * 100) elif collateral_ratio: var_code = 1 value = int(collateral_ratio * 1000) elif liquidation_duration: var_code = 2 value = liquidation_duration * 60 func = utils.contracts['oracles'].functions.vote(var_code, value) tx_hash = utils.send_transaction(func, 0, private_key) return tx_hash @main.command() @click.option('--oracle', required=True, help="The oracle's address") @click.option('--score', type=int, required=True, help="The oracle's score") @click.option( '--private-key', callback=utils.check_account, help='The privat key to sign the transaction') def set_score(oracle, score, private_key): "Edit oracle's score" oracle = utils.w3.toChecksumAddress(oracle) func = utils.contracts['oracles'].functions.setScore(oracle, score) tx_hash = utils.send_transaction(func, 0, private_key) return tx_hash @main.command() @click.option( '--private-key', callback=utils.check_account, help='The privat key to sign the transaction') def finish_recruiting(private_key): "Set recruiting as finished" func = utils.contracts['oracles'].functions.finishRecruiting() tx_hash = utils.send_transaction(func, 0, private_key) return tx_hash if __name__ == '__main__': main()
[((5, 1, 5, 14), 'click.group', 'click.group', ({}, {}), '()', False, 'import click\n'), ((12, 1, 12, 82), 'click.option', 'click.option', (), '', False, 'import click\n'), ((13, 1, 13, 76), 'click.option', 'click.option', (), '', False, 'import click\n'), ((14, 1, 17, 47), 'click.option', 'click.option', (), '', False, 'import click\n'), ((18, 1, 21, 50), 'click.option', 'click.option', (), '', False, 'import click\n'), ((42, 1, 42, 69), 'click.option', 'click.option', (), '', False, 'import click\n'), ((43, 1, 43, 76), 'click.option', 'click.option', (), '', False, 'import click\n'), ((44, 1, 47, 50), 'click.option', 'click.option', (), '', False, 'import click\n'), ((58, 1, 61, 50), 'click.option', 'click.option', (), '', False, 'import click\n')]
MrKaStep/csc230-grader
src/grader/machine.py
559846f4d921c5c4be6b6e9ba8629fb24b448e41
import getpass from plumbum import local from plumbum.machines.paramiko_machine import ParamikoMachine from plumbum.path.utils import copy def _once(f): res = None def wrapped(*args, **kwargs): nonlocal res if res is None: res = f(*args, **kwargs) return res return wrapped @_once def get_remote_machine_with_password(host, user): password = getpass.getpass(prompt=f"Password for {user}@{host}: ", stream=None) rem = ParamikoMachine(host, user=user, password=password) return rem @_once def get_remote_machine(host, user, keyfile): rem = ParamikoMachine(host, user=user, keyfile=keyfile) return rem def get_local_machine(): return local def with_machine_rule(cls): old_init = cls.__init__ def new_init(self, config): if "machine" not in config: machine_type = "local" else: machine_type = config["machine"]["type"] if machine_type == "local": self.machine = get_local_machine() self.files_to_copy = None elif machine_type == "remote": if "keyfile" in config["machine"]: self.machine = get_remote_machine(config["machine"]["host"], config["machine"]["user"], config["machine"]["keyfile"]) else: self.machine = get_remote_machine_with_password(config["machine"]["host"], config["machine"]["user"]) self.files_to_copy = config["machine"].get("files_to_copy") else: raise ValueError(f"Invalid machine type: {config['machine']['type']}") self.machine_type = machine_type old_init(self, config) cls.__init__ = new_init old_apply = cls.apply def new_apply(self, project): with self.machine.tempdir() as tempdir: project_path = tempdir / "project" project_path.mkdir() existing_files = set([f.name for f in project.root.list()]) if self.files_to_copy: for fname in self.files_to_copy: if fname in existing_files: copy(project.root / fname, project_path / fname) else: for f in project.files(): if f.name in existing_files: copy(f.path, project_path / f.name) with self.machine.cwd(project_path): self.session = self.machine.session() self.session.run(f"cd {project_path}") return old_apply(self, project) cls.apply = new_apply return cls
[((17, 15, 17, 83), 'getpass.getpass', 'getpass.getpass', (), '', False, 'import getpass\n'), ((19, 10, 19, 61), 'plumbum.machines.paramiko_machine.ParamikoMachine', 'ParamikoMachine', (), '', False, 'from plumbum.machines.paramiko_machine import ParamikoMachine\n'), ((25, 10, 25, 59), 'plumbum.machines.paramiko_machine.ParamikoMachine', 'ParamikoMachine', (), '', False, 'from plumbum.machines.paramiko_machine import ParamikoMachine\n'), ((66, 24, 66, 72), 'plumbum.path.utils.copy', 'copy', ({(66, 29, 66, 49): '(project.root / fname)', (66, 51, 66, 71): '(project_path / fname)'}, {}), '(project.root / fname, project_path / fname)', False, 'from plumbum.path.utils import copy\n'), ((70, 24, 70, 59), 'plumbum.path.utils.copy', 'copy', ({(70, 29, 70, 35): 'f.path', (70, 37, 70, 58): '(project_path / f.name)'}, {}), '(f.path, project_path / f.name)', False, 'from plumbum.path.utils import copy\n')]
legna7/Python
Mundo 1/Ex33.py
52e0b642d1b7acc592ec82dd360c5697fb0765db
salario = float(input('digite o seu salario: ')) aumento = (salario + (salario * 15)/100 if salario <= 1250 else salario + (salario * 10)/100) print(aumento)
[]
andreax79/airflow-code-editor
tests/test_tree.py
031170387496bbc6d540179c6c2f1765e1e70694
#!/usr/bin/env python import os import os.path import airflow import airflow.plugins_manager from airflow import configuration from flask import Flask from unittest import TestCase, main from airflow_code_editor.commons import PLUGIN_NAME from airflow_code_editor.tree import ( get_tree, ) assert airflow.plugins_manager app = Flask(__name__) class TestTree(TestCase): def setUp(self): self.root_dir = os.path.dirname(os.path.realpath(__file__)) configuration.conf.set(PLUGIN_NAME, 'git_init_repo', 'False') configuration.conf.set(PLUGIN_NAME, 'root_directory', self.root_dir) def test_tree(self): with app.app_context(): t = get_tree() self.assertTrue(len(t) > 0) self.assertTrue('git' in (x['id'] for x in t)) def test_tags(self): with app.app_context(): t = get_tree("tags") self.assertIsNotNone(t) def test_local_branches(self): with app.app_context(): t = get_tree("local-branches") self.assertIsNotNone(t) def test_remote_branches(self): with app.app_context(): t = get_tree("remote-branches") self.assertIsNotNone(t) def test_files(self): with app.app_context(): t = get_tree("files") self.assertTrue( len([x.get('id') for x in t if x.get('id') == 'test_utils.py']) == 1 ) t = get_tree("files/folder") self.assertTrue(len([x.get('id') for x in t if x.get('id') == '1']) == 1) def test_git(self): with app.app_context(): t = get_tree("git/HEAD") self.assertTrue(t is not None) class TestTreeGitDisabled(TestCase): def setUp(self): self.root_dir = os.path.dirname(os.path.realpath(__file__)) configuration.conf.set(PLUGIN_NAME, 'git_init_repo', 'False') configuration.conf.set(PLUGIN_NAME, 'root_directory', self.root_dir) configuration.conf.set(PLUGIN_NAME, 'git_enabled', 'False') def test_tree(self): with app.app_context(): t = get_tree() self.assertTrue(len(t) > 0) self.assertTrue('git' not in (x['id'] for x in t)) t = get_tree("tags") self.assertEqual(t, []) t = get_tree("local-branches") self.assertEqual(t, []) t = get_tree("remote-branches") self.assertEqual(t, []) t = get_tree("files") self.assertTrue( len([x.get('id') for x in t if x.get('id') == 'test_utils.py']) == 1 ) t = get_tree("files/folder") self.assertTrue(len([x.get('id') for x in t if x.get('id') == '1']) == 1) if __name__ == '__main__': main()
[((16, 6, 16, 21), 'flask.Flask', 'Flask', ({(16, 12, 16, 20): '__name__'}, {}), '(__name__)', False, 'from flask import Flask\n'), ((88, 4, 88, 10), 'unittest.main', 'main', ({}, {}), '()', False, 'from unittest import TestCase, main\n'), ((22, 8, 22, 69), 'airflow.configuration.conf.set', 'configuration.conf.set', ({(22, 31, 22, 42): 'PLUGIN_NAME', (22, 44, 22, 59): '"""git_init_repo"""', (22, 61, 22, 68): '"""False"""'}, {}), "(PLUGIN_NAME, 'git_init_repo', 'False')", False, 'from airflow import configuration\n'), ((23, 8, 23, 76), 'airflow.configuration.conf.set', 'configuration.conf.set', ({(23, 31, 23, 42): 'PLUGIN_NAME', (23, 44, 23, 60): '"""root_directory"""', (23, 62, 23, 75): 'self.root_dir'}, {}), "(PLUGIN_NAME, 'root_directory', self.root_dir)", False, 'from airflow import configuration\n'), ((64, 8, 64, 69), 'airflow.configuration.conf.set', 'configuration.conf.set', ({(64, 31, 64, 42): 'PLUGIN_NAME', (64, 44, 64, 59): '"""git_init_repo"""', (64, 61, 64, 68): '"""False"""'}, {}), "(PLUGIN_NAME, 'git_init_repo', 'False')", False, 'from airflow import configuration\n'), ((65, 8, 65, 76), 'airflow.configuration.conf.set', 'configuration.conf.set', ({(65, 31, 65, 42): 'PLUGIN_NAME', (65, 44, 65, 60): '"""root_directory"""', (65, 62, 65, 75): 'self.root_dir'}, {}), "(PLUGIN_NAME, 'root_directory', self.root_dir)", False, 'from airflow import configuration\n'), ((66, 8, 66, 67), 'airflow.configuration.conf.set', 'configuration.conf.set', ({(66, 31, 66, 42): 'PLUGIN_NAME', (66, 44, 66, 57): '"""git_enabled"""', (66, 59, 66, 66): '"""False"""'}, {}), "(PLUGIN_NAME, 'git_enabled', 'False')", False, 'from airflow import configuration\n'), ((21, 40, 21, 66), 'os.path.realpath', 'os.path.realpath', ({(21, 57, 21, 65): '__file__'}, {}), '(__file__)', False, 'import os\n'), ((27, 16, 27, 26), 'airflow_code_editor.tree.get_tree', 'get_tree', ({}, {}), '()', False, 'from airflow_code_editor.tree import get_tree\n'), ((33, 16, 33, 32), 'airflow_code_editor.tree.get_tree', 'get_tree', ({(33, 25, 33, 31): '"""tags"""'}, {}), "('tags')", False, 'from airflow_code_editor.tree import get_tree\n'), ((38, 16, 38, 42), 'airflow_code_editor.tree.get_tree', 'get_tree', ({(38, 25, 38, 41): '"""local-branches"""'}, {}), "('local-branches')", False, 'from airflow_code_editor.tree import get_tree\n'), ((43, 16, 43, 43), 'airflow_code_editor.tree.get_tree', 'get_tree', ({(43, 25, 43, 42): '"""remote-branches"""'}, {}), "('remote-branches')", False, 'from airflow_code_editor.tree import get_tree\n'), ((48, 16, 48, 33), 'airflow_code_editor.tree.get_tree', 'get_tree', ({(48, 25, 48, 32): '"""files"""'}, {}), "('files')", False, 'from airflow_code_editor.tree import get_tree\n'), ((52, 16, 52, 40), 'airflow_code_editor.tree.get_tree', 'get_tree', ({(52, 25, 52, 39): '"""files/folder"""'}, {}), "('files/folder')", False, 'from airflow_code_editor.tree import get_tree\n'), ((57, 16, 57, 36), 'airflow_code_editor.tree.get_tree', 'get_tree', ({(57, 25, 57, 35): '"""git/HEAD"""'}, {}), "('git/HEAD')", False, 'from airflow_code_editor.tree import get_tree\n'), ((63, 40, 63, 66), 'os.path.realpath', 'os.path.realpath', ({(63, 57, 63, 65): '__file__'}, {}), '(__file__)', False, 'import os\n'), ((70, 16, 70, 26), 'airflow_code_editor.tree.get_tree', 'get_tree', ({}, {}), '()', False, 'from airflow_code_editor.tree import get_tree\n'), ((73, 16, 73, 32), 'airflow_code_editor.tree.get_tree', 'get_tree', ({(73, 25, 73, 31): '"""tags"""'}, {}), "('tags')", False, 'from airflow_code_editor.tree import get_tree\n'), ((75, 16, 75, 42), 'airflow_code_editor.tree.get_tree', 'get_tree', ({(75, 25, 75, 41): '"""local-branches"""'}, {}), "('local-branches')", False, 'from airflow_code_editor.tree import get_tree\n'), ((77, 16, 77, 43), 'airflow_code_editor.tree.get_tree', 'get_tree', ({(77, 25, 77, 42): '"""remote-branches"""'}, {}), "('remote-branches')", False, 'from airflow_code_editor.tree import get_tree\n'), ((79, 16, 79, 33), 'airflow_code_editor.tree.get_tree', 'get_tree', ({(79, 25, 79, 32): '"""files"""'}, {}), "('files')", False, 'from airflow_code_editor.tree import get_tree\n'), ((83, 16, 83, 40), 'airflow_code_editor.tree.get_tree', 'get_tree', ({(83, 25, 83, 39): '"""files/folder"""'}, {}), "('files/folder')", False, 'from airflow_code_editor.tree import get_tree\n')]
greenape/flask-jwt-extended
examples/token_freshness.py
11ac3bf0937ee199aea7d6dc47c748bef9bf1d2f
from quart import Quart, jsonify, request from quart_jwt_extended import ( JWTManager, jwt_required, create_access_token, jwt_refresh_token_required, create_refresh_token, get_jwt_identity, fresh_jwt_required, ) app = Quart(__name__) app.config["JWT_SECRET_KEY"] = "super-secret" # Change this! jwt = JWTManager(app) # Standard login endpoint. Will return a fresh access token and # a refresh token @app.route("/login", methods=["POST"]) async def login(): username = (await request.get_json()).get("username", None) password = (await request.get_json()).get("password", None) if username != "test" or password != "test": return {"msg": "Bad username or password"}, 401 # create_access_token supports an optional 'fresh' argument, # which marks the token as fresh or non-fresh accordingly. # As we just verified their username and password, we are # going to mark the token as fresh here. ret = { "access_token": create_access_token(identity=username, fresh=True), "refresh_token": create_refresh_token(identity=username), } return ret, 200 # Refresh token endpoint. This will generate a new access token from # the refresh token, but will mark that access token as non-fresh, # as we do not actually verify a password in this endpoint. @app.route("/refresh", methods=["POST"]) @jwt_refresh_token_required async def refresh(): current_user = get_jwt_identity() new_token = create_access_token(identity=current_user, fresh=False) ret = {"access_token": new_token} return ret, 200 # Fresh login endpoint. This is designed to be used if we need to # make a fresh token for a user (by verifying they have the # correct username and password). Unlike the standard login endpoint, # this will only return a new access token, so that we don't keep # generating new refresh tokens, which entirely defeats their point. @app.route("/fresh-login", methods=["POST"]) async def fresh_login(): username = (await request.get_json()).get("username", None) password = (await request.get_json()).get("password", None) if username != "test" or password != "test": return {"msg": "Bad username or password"}, 401 new_token = create_access_token(identity=username, fresh=True) ret = {"access_token": new_token} return ret, 200 # Any valid JWT can access this endpoint @app.route("/protected", methods=["GET"]) @jwt_required async def protected(): username = get_jwt_identity() return dict(logged_in_as=username), 200 # Only fresh JWTs can access this endpoint @app.route("/protected-fresh", methods=["GET"]) @fresh_jwt_required async def protected_fresh(): username = get_jwt_identity() return dict(fresh_logged_in_as=username), 200 if __name__ == "__main__": app.run()
[((12, 6, 12, 21), 'quart.Quart', 'Quart', ({(12, 12, 12, 20): '__name__'}, {}), '(__name__)', False, 'from quart import Quart, jsonify, request\n'), ((15, 6, 15, 21), 'quart_jwt_extended.JWTManager', 'JWTManager', ({(15, 17, 15, 20): 'app'}, {}), '(app)', False, 'from quart_jwt_extended import JWTManager, jwt_required, create_access_token, jwt_refresh_token_required, create_refresh_token, get_jwt_identity, fresh_jwt_required\n'), ((44, 19, 44, 37), 'quart_jwt_extended.get_jwt_identity', 'get_jwt_identity', ({}, {}), '()', False, 'from quart_jwt_extended import JWTManager, jwt_required, create_access_token, jwt_refresh_token_required, create_refresh_token, get_jwt_identity, fresh_jwt_required\n'), ((45, 16, 45, 71), 'quart_jwt_extended.create_access_token', 'create_access_token', (), '', False, 'from quart_jwt_extended import JWTManager, jwt_required, create_access_token, jwt_refresh_token_required, create_refresh_token, get_jwt_identity, fresh_jwt_required\n'), ((62, 16, 62, 66), 'quart_jwt_extended.create_access_token', 'create_access_token', (), '', False, 'from quart_jwt_extended import JWTManager, jwt_required, create_access_token, jwt_refresh_token_required, create_refresh_token, get_jwt_identity, fresh_jwt_required\n'), ((71, 15, 71, 33), 'quart_jwt_extended.get_jwt_identity', 'get_jwt_identity', ({}, {}), '()', False, 'from quart_jwt_extended import JWTManager, jwt_required, create_access_token, jwt_refresh_token_required, create_refresh_token, get_jwt_identity, fresh_jwt_required\n'), ((79, 15, 79, 33), 'quart_jwt_extended.get_jwt_identity', 'get_jwt_identity', ({}, {}), '()', False, 'from quart_jwt_extended import JWTManager, jwt_required, create_access_token, jwt_refresh_token_required, create_refresh_token, get_jwt_identity, fresh_jwt_required\n'), ((32, 24, 32, 74), 'quart_jwt_extended.create_access_token', 'create_access_token', (), '', False, 'from quart_jwt_extended import JWTManager, jwt_required, create_access_token, jwt_refresh_token_required, create_refresh_token, get_jwt_identity, fresh_jwt_required\n'), ((33, 25, 33, 64), 'quart_jwt_extended.create_refresh_token', 'create_refresh_token', (), '', False, 'from quart_jwt_extended import JWTManager, jwt_required, create_access_token, jwt_refresh_token_required, create_refresh_token, get_jwt_identity, fresh_jwt_required\n'), ((22, 22, 22, 40), 'quart.request.get_json', 'request.get_json', ({}, {}), '()', False, 'from quart import Quart, jsonify, request\n'), ((23, 22, 23, 40), 'quart.request.get_json', 'request.get_json', ({}, {}), '()', False, 'from quart import Quart, jsonify, request\n'), ((57, 22, 57, 40), 'quart.request.get_json', 'request.get_json', ({}, {}), '()', False, 'from quart import Quart, jsonify, request\n'), ((58, 22, 58, 40), 'quart.request.get_json', 'request.get_json', ({}, {}), '()', False, 'from quart import Quart, jsonify, request\n')]
umchemurziev/Practics
env/lib/python3.7/site-packages/tinvest/typedefs.py
82b49f9d58e67f1ecff9e6303e7d914bc1905730
from datetime import datetime from typing import Any, Dict, Union __all__ = 'AnyDict' AnyDict = Dict[str, Any] # pragma: no mutate datetime_or_str = Union[datetime, str] # pragma: no mutate
[]
PipelineAI/models
keras/linear/model/pipeline_train.py
d8df07877aa8b10ce9b84983bb440af75e84dca7
import os os.environ['KERAS_BACKEND'] = 'theano' os.environ['THEANO_FLAGS'] = 'floatX=float32,device=cpu' import cloudpickle as pickle import pipeline_invoke import pandas as pd import numpy as np import keras from keras.layers import Input, Dense from keras.models import Model from keras.models import save_model, load_model from sklearn.preprocessing import StandardScaler, MinMaxScaler, Normalizer if __name__ == '__main__': df = pd.read_csv("../input/training/training.csv") df["People per Television"] = pd.to_numeric(df["People per Television"],errors='coerce') df = df.dropna() x = df["People per Television"].values.reshape(-1,1).astype(np.float64) y = df["People per Physician"].values.reshape(-1,1).astype(np.float64) # min-max -1,1 sc = MinMaxScaler(feature_range=(-1,1)) x_ = sc.fit_transform(x) y_ = sc.fit_transform(y) inputs = Input(shape=(1,)) preds = Dense(1,activation='linear')(inputs) model = Model(inputs=inputs,outputs=preds) sgd = keras.optimizers.SGD() model.compile(optimizer=sgd ,loss='mse') model.fit(x_,y_, batch_size=1, verbose=1, epochs=10, shuffle=False) save_model(model, 'state/keras_theano_linear_model_state.h5') # model_pkl_path = 'model.pkl' # with open(model_pkl_path, 'wb') as fh: # pickle.dump(pipeline_invoke, fh)
[((17, 9, 17, 54), 'pandas.read_csv', 'pd.read_csv', ({(17, 21, 17, 53): '"""../input/training/training.csv"""'}, {}), "('../input/training/training.csv')", True, 'import pandas as pd\n'), ((18, 34, 18, 92), 'pandas.to_numeric', 'pd.to_numeric', (), '', True, 'import pandas as pd\n'), ((25, 9, 25, 43), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', (), '', False, 'from sklearn.preprocessing import StandardScaler, MinMaxScaler, Normalizer\n'), ((30, 13, 30, 30), 'keras.layers.Input', 'Input', (), '', False, 'from keras.layers import Input, Dense\n'), ((33, 12, 33, 46), 'keras.models.Model', 'Model', (), '', False, 'from keras.models import Model\n'), ((34, 10, 34, 32), 'keras.optimizers.SGD', 'keras.optimizers.SGD', ({}, {}), '()', False, 'import keras\n'), ((38, 4, 38, 65), 'keras.models.save_model', 'save_model', ({(38, 15, 38, 20): 'model', (38, 22, 38, 64): '"""state/keras_theano_linear_model_state.h5"""'}, {}), "(model, 'state/keras_theano_linear_model_state.h5')", False, 'from keras.models import save_model, load_model\n'), ((31, 12, 31, 40), 'keras.layers.Dense', 'Dense', (), '', False, 'from keras.layers import Input, Dense\n')]
RatJuggler/led-shim-effects
tests/effects/test_cheerlights.py
3c63f5f2ce3f35f52e784489deb9212757c18cd2
from unittest import TestCase from unittest.mock import Mock, patch import sys sys.modules['smbus'] = Mock() # Mock the hardware layer to avoid errors. from ledshimdemo.canvas import Canvas from ledshimdemo.effects.cheerlights import CheerLightsEffect class TestCheerLights(TestCase): TEST_CANVAS_SIZE = 3 # type: int def test_cheerlight_call(self): canvas = Canvas(self.TEST_CANVAS_SIZE) effect = CheerLightsEffect(canvas) self.assertIsNone(effect.get_colour_from_channel("http://ejiferfneciudwedwojcmeiocnw.com")) @patch('ledshimdemo.effects.cheerlights.CheerLightsEffect.get_colour_from_channel', return_value=None) def test_effect_failed_cheerlights(self, patch_function): canvas = Canvas(self.TEST_CANVAS_SIZE) effect = CheerLightsEffect(canvas) effect.compose() patch_function.assert_called_once() for i in range(canvas.get_size()): self.assertEqual(canvas.get_pixel(i), canvas.BLANK_PIXEL) def test_effect_working_cheerlights(self): canvas = Canvas(self.TEST_CANVAS_SIZE) effect = CheerLightsEffect(canvas) # Must check before and after in case it changes during the test. before = effect.get_colour_from_channel(effect.URL) effect.compose() after = effect.get_colour_from_channel(effect.URL) self.assertRegex(repr(effect), "^CheerLights\\(Colour:({0}|{1})\\)$".format(before, after))
[((5, 23, 5, 29), 'unittest.mock.Mock', 'Mock', ({}, {}), '()', False, 'from unittest.mock import Mock, patch\n'), ((20, 5, 20, 106), 'unittest.mock.patch', 'patch', (), '', False, 'from unittest.mock import Mock, patch\n'), ((16, 17, 16, 46), 'ledshimdemo.canvas.Canvas', 'Canvas', ({(16, 24, 16, 45): 'self.TEST_CANVAS_SIZE'}, {}), '(self.TEST_CANVAS_SIZE)', False, 'from ledshimdemo.canvas import Canvas\n'), ((17, 17, 17, 42), 'ledshimdemo.effects.cheerlights.CheerLightsEffect', 'CheerLightsEffect', ({(17, 35, 17, 41): 'canvas'}, {}), '(canvas)', False, 'from ledshimdemo.effects.cheerlights import CheerLightsEffect\n'), ((22, 17, 22, 46), 'ledshimdemo.canvas.Canvas', 'Canvas', ({(22, 24, 22, 45): 'self.TEST_CANVAS_SIZE'}, {}), '(self.TEST_CANVAS_SIZE)', False, 'from ledshimdemo.canvas import Canvas\n'), ((23, 17, 23, 42), 'ledshimdemo.effects.cheerlights.CheerLightsEffect', 'CheerLightsEffect', ({(23, 35, 23, 41): 'canvas'}, {}), '(canvas)', False, 'from ledshimdemo.effects.cheerlights import CheerLightsEffect\n'), ((30, 17, 30, 46), 'ledshimdemo.canvas.Canvas', 'Canvas', ({(30, 24, 30, 45): 'self.TEST_CANVAS_SIZE'}, {}), '(self.TEST_CANVAS_SIZE)', False, 'from ledshimdemo.canvas import Canvas\n'), ((31, 17, 31, 42), 'ledshimdemo.effects.cheerlights.CheerLightsEffect', 'CheerLightsEffect', ({(31, 35, 31, 41): 'canvas'}, {}), '(canvas)', False, 'from ledshimdemo.effects.cheerlights import CheerLightsEffect\n')]
Jhsmit/ColiCoords-Paper
figures/Figure_7/02_generate_images.py
7b92e67600930f64859d14867113b6de3edf1379
from colicoords.synthetic_data import add_readout_noise, draw_poisson from colicoords import load import numpy as np import mahotas as mh from tqdm import tqdm import os import tifffile def chunk_list(l, sizes): prev = 0 for s in sizes: result = l[prev:prev+s] prev += s yield result def generate_images(cell_list, num_images, cell_per_img, cell_per_img_std, shape): nums = np.round(np.random.normal(cell_per_img, cell_per_img_std, num_images)).astype(int) nums = nums[nums > 0] assert sum(nums) < len(cell_list), 'Not enough cells' chunked = [chunk for chunk in tqdm(chunk_list(cell_list, nums))] dicts = [generate_image(cells, shape) for cells in tqdm(chunked)] out_dict = {} for i, d in enumerate(dicts): for k, v in d.items(): if 'storm' in k: v['frame'] = i + 1 if k in out_dict: out_dict[k] = np.append(out_dict[k], v) else: out_dict[k] = v else: if k in out_dict: out_dict[k][i] = v else: out_dict[k] = np.zeros((num_images, *shape)) out_dict[k][i] = v return out_dict def generate_image(cells, shape, max_dist=5): thetas = 360 * np.random.rand(len(cells)) data_list = [cell.data.rotate(theta) for cell, theta in zip(cells, thetas)] assert all([data.names == data_list[0].names for data in data_list]), 'All cells must have the same data elements' out_dict = {name: np.zeros(shape) for name, dclass in zip(data_list[0].names, data_list[0].dclasses) if dclass != 'storm'} for i, data in enumerate(data_list): valid_position = False while not valid_position: pos_x = int(np.round(shape[1] * np.random.rand())) pos_y = int(np.round(shape[0] * np.random.rand())) min1 = pos_y - int(np.floor(data.shape[0])) max1 = min1 + data.shape[0] min2 = pos_x - int(np.floor(data.shape[1])) max2 = min2 + data.shape[1] # Crop the data for when the cell is on the border of the image d_min1 = np.max([0 - min1, 0]) d_max1 = np.min([data.shape[0] + (shape[0] - pos_y), data.shape[0]]) d_min2 = np.max([0 - min2, 0]) d_max2 = np.min([data.shape[1] + (shape[1] - pos_x), data.shape[1]]) data_cropped = data[d_min1:d_max1, d_min2:d_max2] # Limit image position to the edges of the image min1 = np.max([min1, 0]) max1 = np.min([max1, shape[0]]) min2 = np.max([min2, 0]) max2 = np.min([max2, shape[1]]) temp_binary = np.zeros(shape) temp_binary[min1:max1, min2:max2] = data_cropped.binary_img out_binary = (out_dict['binary'] > 0).astype(int) distance_map = mh.distance(1 - out_binary, metric='euclidean') if np.any(distance_map[temp_binary.astype(bool)] < max_dist): continue valid_position = True for name in data.names: data_elem = data_cropped.data_dict[name] if data_elem.dclass == 'storm': data_elem['x'] += min2 data_elem['y'] += min1 xmax, ymax = shape[1], shape[0] bools = (data_elem['x'] < 0) + (data_elem['x'] > xmax) + (data_elem['y'] < 0) + (data_elem['y'] > ymax) data_out = data_elem[~bools].copy() if name in out_dict: out_dict[name] = np.append(out_dict[name], data_out) else: out_dict[name] = data_out continue elif data_elem.dclass == 'binary': out_dict[name][min1:max1, min2:max2] += ((i+1)*data_elem) else: out_dict[name][min1:max1, min2:max2] += data_elem return out_dict def gen_image_from_storm(storm_table, shape, sigma=1.54, sigma_std=0.3): xmax = shape[1] ymax = shape[0] step = 1 xi = np.arange(step / 2, xmax, step) yi = np.arange(step / 2, ymax, step) x_coords = np.repeat(xi, len(yi)).reshape(len(xi), len(yi)).T y_coords = np.repeat(yi, len(xi)).reshape(len(yi), len(xi)) x, y = storm_table['x'], storm_table['y'] img = np.zeros_like(x_coords) intensities = storm_table['intensity'] sigma = sigma * np.ones_like(x) if not sigma_std else np.random.normal(sigma, sigma_std, size=len(x)) for _sigma, _int, _x, _y in zip(sigma, intensities, x, y): img += _int * np.exp(-(((_x - x_coords) / _sigma) ** 2 + ((_y - y_coords) / _sigma) ** 2) / 2) return img def gen_im(data_dir): """Generate microscopy images from a list of cell objects by placing them randomly oriented in the image.""" cell_list = load(os.path.join(data_dir, 'cell_obj', 'cells_final_selected.hdf5')) out_dict = generate_images(cell_list, 1000, 10, 3, (512, 512)) if not os.path.exists(os.path.join(data_dir, 'images')): os.mkdir(os.path.join(data_dir, 'images')) np.save(os.path.join(data_dir, 'images', 'binary.npy'), out_dict['binary']) np.save(os.path.join(data_dir, 'images', 'brightfield.npy'), out_dict['brightfield']) np.save(os.path.join(data_dir, 'images', 'foci_inner.npy'), out_dict['foci_inner']) np.save(os.path.join(data_dir, 'images', 'foci_outer.npy'), out_dict['foci_outer']) np.save(os.path.join(data_dir, 'images', 'storm_inner.npy'), out_dict['storm_inner']) np.save(os.path.join(data_dir, 'images', 'storm_outer.npy'), out_dict['storm_outer']) tifffile.imsave(os.path.join(data_dir, 'images', 'binary.tif'), out_dict['binary']) tifffile.imsave(os.path.join(data_dir, 'images', 'brightfield.tif'), out_dict['brightfield']) tifffile.imsave(os.path.join(data_dir, 'images', 'foci_inner.tif'), out_dict['foci_inner']) tifffile.imsave(os.path.join(data_dir, 'images', 'foci_outer.tif'), out_dict['foci_outer']) np.savetxt(os.path.join(data_dir, 'images', 'storm_inner.txt'), out_dict['storm_inner']) np.savetxt(os.path.join(data_dir, 'images', 'storm_outer.txt'), out_dict['storm_inner']) def noise_bf(data_dir): """add poissonian and readout noise to brightfield images""" noise = 20 img_stack = np.load(os.path.join(data_dir, 'images', 'brightfield.npy')) for photons in [10000, 1000, 500]: ratio = 1.0453 # ratio between 'background' (no cells) and cell wall img = (photons*(ratio-1))*img_stack + photons img = draw_poisson(img) img = add_readout_noise(img, noise) tifffile.imsave(os.path.join(data_dir, 'images', 'bf_noise_{}_photons.tif'.format(photons)), img) np.save(os.path.join(data_dir, 'images', 'bf_noise_{}_photons.npy'.format(photons)), img) if __name__ == '__main__': np.random.seed(42) data_dir = r'.' if not os.path.exists(os.path.join(data_dir, 'images')): os.mkdir(os.path.join(data_dir, 'images')) gen_im(data_dir) noise_bf(data_dir)
[((113, 9, 113, 40), 'numpy.arange', 'np.arange', ({(113, 19, 113, 27): 'step / 2', (113, 29, 113, 33): 'xmax', (113, 35, 113, 39): 'step'}, {}), '(step / 2, xmax, step)', True, 'import numpy as np\n'), ((114, 9, 114, 40), 'numpy.arange', 'np.arange', ({(114, 19, 114, 27): 'step / 2', (114, 29, 114, 33): 'ymax', (114, 35, 114, 39): 'step'}, {}), '(step / 2, ymax, step)', True, 'import numpy as np\n'), ((120, 10, 120, 33), 'numpy.zeros_like', 'np.zeros_like', ({(120, 24, 120, 32): 'x_coords'}, {}), '(x_coords)', True, 'import numpy as np\n'), ((167, 4, 167, 22), 'numpy.random.seed', 'np.random.seed', ({(167, 19, 167, 21): '(42)'}, {}), '(42)', True, 'import numpy as np\n'), ((48, 22, 48, 37), 'numpy.zeros', 'np.zeros', ({(48, 31, 48, 36): 'shape'}, {}), '(shape)', True, 'import numpy as np\n'), ((131, 21, 131, 84), 'os.path.join', 'os.path.join', ({(131, 34, 131, 42): 'data_dir', (131, 44, 131, 54): '"""cell_obj"""', (131, 56, 131, 83): '"""cells_final_selected.hdf5"""'}, {}), "(data_dir, 'cell_obj', 'cells_final_selected.hdf5')", False, 'import os\n'), ((138, 12, 138, 58), 'os.path.join', 'os.path.join', ({(138, 25, 138, 33): 'data_dir', (138, 35, 138, 43): '"""images"""', (138, 45, 138, 57): '"""binary.npy"""'}, {}), "(data_dir, 'images', 'binary.npy')", False, 'import os\n'), ((139, 12, 139, 63), 'os.path.join', 'os.path.join', ({(139, 25, 139, 33): 'data_dir', (139, 35, 139, 43): '"""images"""', (139, 45, 139, 62): '"""brightfield.npy"""'}, {}), "(data_dir, 'images', 'brightfield.npy')", False, 'import os\n'), ((140, 12, 140, 62), 'os.path.join', 'os.path.join', ({(140, 25, 140, 33): 'data_dir', (140, 35, 140, 43): '"""images"""', (140, 45, 140, 61): '"""foci_inner.npy"""'}, {}), "(data_dir, 'images', 'foci_inner.npy')", False, 'import os\n'), ((141, 12, 141, 62), 'os.path.join', 'os.path.join', ({(141, 25, 141, 33): 'data_dir', (141, 35, 141, 43): '"""images"""', (141, 45, 141, 61): '"""foci_outer.npy"""'}, {}), "(data_dir, 'images', 'foci_outer.npy')", False, 'import os\n'), ((142, 12, 142, 63), 'os.path.join', 'os.path.join', ({(142, 25, 142, 33): 'data_dir', (142, 35, 142, 43): '"""images"""', (142, 45, 142, 62): '"""storm_inner.npy"""'}, {}), "(data_dir, 'images', 'storm_inner.npy')", False, 'import os\n'), ((143, 12, 143, 63), 'os.path.join', 'os.path.join', ({(143, 25, 143, 33): 'data_dir', (143, 35, 143, 43): '"""images"""', (143, 45, 143, 62): '"""storm_outer.npy"""'}, {}), "(data_dir, 'images', 'storm_outer.npy')", False, 'import os\n'), ((145, 20, 145, 66), 'os.path.join', 'os.path.join', ({(145, 33, 145, 41): 'data_dir', (145, 43, 145, 51): '"""images"""', (145, 53, 145, 65): '"""binary.tif"""'}, {}), "(data_dir, 'images', 'binary.tif')", False, 'import os\n'), ((146, 20, 146, 71), 'os.path.join', 'os.path.join', ({(146, 33, 146, 41): 'data_dir', (146, 43, 146, 51): '"""images"""', (146, 53, 146, 70): '"""brightfield.tif"""'}, {}), "(data_dir, 'images', 'brightfield.tif')", False, 'import os\n'), ((147, 20, 147, 70), 'os.path.join', 'os.path.join', ({(147, 33, 147, 41): 'data_dir', (147, 43, 147, 51): '"""images"""', (147, 53, 147, 69): '"""foci_inner.tif"""'}, {}), "(data_dir, 'images', 'foci_inner.tif')", False, 'import os\n'), ((148, 20, 148, 70), 'os.path.join', 'os.path.join', ({(148, 33, 148, 41): 'data_dir', (148, 43, 148, 51): '"""images"""', (148, 53, 148, 69): '"""foci_outer.tif"""'}, {}), "(data_dir, 'images', 'foci_outer.tif')", False, 'import os\n'), ((149, 15, 149, 66), 'os.path.join', 'os.path.join', ({(149, 28, 149, 36): 'data_dir', (149, 38, 149, 46): '"""images"""', (149, 48, 149, 65): '"""storm_inner.txt"""'}, {}), "(data_dir, 'images', 'storm_inner.txt')", False, 'import os\n'), ((150, 15, 150, 66), 'os.path.join', 'os.path.join', ({(150, 28, 150, 36): 'data_dir', (150, 38, 150, 46): '"""images"""', (150, 48, 150, 65): '"""storm_outer.txt"""'}, {}), "(data_dir, 'images', 'storm_outer.txt')", False, 'import os\n'), ((156, 24, 156, 75), 'os.path.join', 'os.path.join', ({(156, 37, 156, 45): 'data_dir', (156, 47, 156, 55): '"""images"""', (156, 57, 156, 74): '"""brightfield.npy"""'}, {}), "(data_dir, 'images', 'brightfield.npy')", False, 'import os\n'), ((160, 14, 160, 31), 'colicoords.synthetic_data.draw_poisson', 'draw_poisson', ({(160, 27, 160, 30): 'img'}, {}), '(img)', False, 'from colicoords.synthetic_data import add_readout_noise, draw_poisson\n'), ((161, 14, 161, 43), 'colicoords.synthetic_data.add_readout_noise', 'add_readout_noise', ({(161, 32, 161, 35): 'img', (161, 37, 161, 42): 'noise'}, {}), '(img, noise)', False, 'from colicoords.synthetic_data import add_readout_noise, draw_poisson\n'), ((24, 55, 24, 68), 'tqdm.tqdm', 'tqdm', ({(24, 60, 24, 67): 'chunked'}, {}), '(chunked)', False, 'from tqdm import tqdm\n'), ((62, 21, 62, 42), 'numpy.max', 'np.max', ({(62, 28, 62, 41): '[0 - min1, 0]'}, {}), '([0 - min1, 0])', True, 'import numpy as np\n'), ((63, 21, 63, 80), 'numpy.min', 'np.min', ({(63, 28, 63, 79): '[data.shape[0] + (shape[0] - pos_y), data.shape[0]]'}, {}), '([data.shape[0] + (shape[0] - pos_y), data.shape[0]])', True, 'import numpy as np\n'), ((65, 21, 65, 42), 'numpy.max', 'np.max', ({(65, 28, 65, 41): '[0 - min2, 0]'}, {}), '([0 - min2, 0])', True, 'import numpy as np\n'), ((66, 21, 66, 80), 'numpy.min', 'np.min', ({(66, 28, 66, 79): '[data.shape[1] + (shape[1] - pos_x), data.shape[1]]'}, {}), '([data.shape[1] + (shape[1] - pos_x), data.shape[1]])', True, 'import numpy as np\n'), ((71, 19, 71, 36), 'numpy.max', 'np.max', ({(71, 26, 71, 35): '[min1, 0]'}, {}), '([min1, 0])', True, 'import numpy as np\n'), ((72, 19, 72, 43), 'numpy.min', 'np.min', ({(72, 26, 72, 42): '[max1, shape[0]]'}, {}), '([max1, shape[0]])', True, 'import numpy as np\n'), ((73, 19, 73, 36), 'numpy.max', 'np.max', ({(73, 26, 73, 35): '[min2, 0]'}, {}), '([min2, 0])', True, 'import numpy as np\n'), ((74, 19, 74, 43), 'numpy.min', 'np.min', ({(74, 26, 74, 42): '[max2, shape[1]]'}, {}), '([max2, shape[1]])', True, 'import numpy as np\n'), ((76, 26, 76, 41), 'numpy.zeros', 'np.zeros', ({(76, 35, 76, 40): 'shape'}, {}), '(shape)', True, 'import numpy as np\n'), ((79, 27, 79, 74), 'mahotas.distance', 'mh.distance', (), '', True, 'import mahotas as mh\n'), ((122, 20, 122, 35), 'numpy.ones_like', 'np.ones_like', ({(122, 33, 122, 34): 'x'}, {}), '(x)', True, 'import numpy as np\n'), ((124, 22, 124, 102), 'numpy.exp', 'np.exp', ({(124, 29, 124, 101): '(-(((_x - x_coords) / _sigma) ** 2 + ((_y - y_coords) / _sigma) ** 2) / 2)'}, {}), '(-(((_x - x_coords) / _sigma) ** 2 + ((_y - y_coords) / _sigma) ** 2) / 2\n )', True, 'import numpy as np\n'), ((135, 26, 135, 58), 'os.path.join', 'os.path.join', ({(135, 39, 135, 47): 'data_dir', (135, 49, 135, 57): '"""images"""'}, {}), "(data_dir, 'images')", False, 'import os\n'), ((136, 17, 136, 49), 'os.path.join', 'os.path.join', ({(136, 30, 136, 38): 'data_dir', (136, 40, 136, 48): '"""images"""'}, {}), "(data_dir, 'images')", False, 'import os\n'), ((169, 26, 169, 58), 'os.path.join', 'os.path.join', ({(169, 39, 169, 47): 'data_dir', (169, 49, 169, 57): '"""images"""'}, {}), "(data_dir, 'images')", False, 'import os\n'), ((170, 17, 170, 49), 'os.path.join', 'os.path.join', ({(170, 30, 170, 38): 'data_dir', (170, 40, 170, 48): '"""images"""'}, {}), "(data_dir, 'images')", False, 'import os\n'), ((19, 20, 19, 80), 'numpy.random.normal', 'np.random.normal', ({(19, 37, 19, 49): 'cell_per_img', (19, 51, 19, 67): 'cell_per_img_std', (19, 69, 19, 79): 'num_images'}, {}), '(cell_per_img, cell_per_img_std, num_images)', True, 'import numpy as np\n'), ((31, 34, 31, 59), 'numpy.append', 'np.append', ({(31, 44, 31, 55): 'out_dict[k]', (31, 57, 31, 58): 'v'}, {}), '(out_dict[k], v)', True, 'import numpy as np\n'), ((38, 34, 38, 64), 'numpy.zeros', 'np.zeros', ({(38, 43, 38, 63): '(num_images, *shape)'}, {}), '((num_images, *shape))', True, 'import numpy as np\n'), ((55, 31, 55, 54), 'numpy.floor', 'np.floor', ({(55, 40, 55, 53): 'data.shape[0]'}, {}), '(data.shape[0])', True, 'import numpy as np\n'), ((58, 31, 58, 54), 'numpy.floor', 'np.floor', ({(58, 40, 58, 53): 'data.shape[1]'}, {}), '(data.shape[1])', True, 'import numpy as np\n'), ((96, 37, 96, 72), 'numpy.append', 'np.append', ({(96, 47, 96, 61): 'out_dict[name]', (96, 63, 96, 71): 'data_out'}, {}), '(out_dict[name], data_out)', True, 'import numpy as np\n'), ((52, 44, 52, 60), 'numpy.random.rand', 'np.random.rand', ({}, {}), '()', True, 'import numpy as np\n'), ((53, 44, 53, 60), 'numpy.random.rand', 'np.random.rand', ({}, {}), '()', True, 'import numpy as np\n')]
epiphan-video/epiphancloud_api
epiphancloud/models/settings.py
8799591ea4d3a7285976e0038716b23c9ffe7061
class DeviceSettings: def __init__(self, settings): self._id = settings["id"] self._title = settings["title"] self._type = settings["type"]["name"] self._value = settings["value"] @property def id(self): return self._id @property def value(self): return self._value
[]
SchmitzAndrew/OSS-101-example
dictionary.py
1efecd4c5bfef4495904568d11e3f8d0a5ed9bd0
word = input("Enter a word: ") if word == "a": print("one; any") elif word == "apple": print("familiar, round fleshy fruit") elif word == "rhinoceros": print("large thick-skinned animal with one or two horns on its nose") else: print("That word must not exist. This dictionary is very comprehensive.")
[]
ilmntr/white_study
solved_bronze/num11720.py
51d69d122b07e9a0922dddb134bff4ec79077eb9
cnt = int(input()) num = list(map(int, input())) sum = 0 for i in range(len(num)): sum = sum + num[i] print(sum)
[]
sdnhub/kube-navi
setup.py
d16a9289ba7261011e6c8d19c48cdc9bd533e629
from distutils.core import setup setup( name = 'kube_navi', packages = ['kube_navi'], # this must be the same as the name above version = '0.1', description = 'Kubernetes resource discovery toolkit', author = 'Srini Seetharaman', author_email = '[email protected]', url = 'https://github.com/sdnhub/kube-navi', # use the URL to the github repo download_url = 'https://github.com/sdnhub/kube-navi/archive/0.1.tar.gz', # I'll explain this in a second keywords = ['testing', 'logging', 'example'], # arbitrary keywords classifiers = [], )
[((2, 0, 13, 1), 'distutils.core.setup', 'setup', (), '', False, 'from distutils.core import setup\n')]
MarvinMiao/flink-ai-extended
flink-ai-flow/ai_flow/metric/utils.py
e45eecf2deea6976ba3d7ba821ffb8d9ce0a17f4
# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # import ast from typing import Text, Optional, Union, List from ai_flow.rest_endpoint.protobuf.metric_service_pb2 import MetricMetaResponse, ListMetricMetaResponse, \ MetricSummaryResponse, ListMetricSummaryResponse from ai_flow.rest_endpoint.service import int64Value, stringValue from ai_flow.common.properties import Properties from ai_flow.meta.metric_meta import MetricMeta, MetricType, MetricSummary from ai_flow.rest_endpoint.protobuf.message_pb2 import MetricMetaProto, MetricSummaryProto, MetricTypeProto, ReturnCode, \ SUCCESS, RESOURCE_DOES_NOT_EXIST from ai_flow.store.db.db_model import SqlMetricMeta, SqlMetricSummary from ai_flow.store.db.db_model import MongoMetricSummary, MongoMetricMeta def table_to_metric_meta(metric_meta_result) -> MetricMeta: properties = metric_meta_result.properties if properties is not None: properties = ast.literal_eval(properties) return MetricMeta(uuid=metric_meta_result.uuid, name=metric_meta_result.name, dataset_id=metric_meta_result.dataset_id, model_name=metric_meta_result.model_name, model_version=metric_meta_result.model_version, job_id=metric_meta_result.job_id, start_time=metric_meta_result.start_time, end_time=metric_meta_result.end_time, metric_type=MetricType.value_of(metric_meta_result.metric_type), uri=metric_meta_result.uri, tags=metric_meta_result.tags, metric_description=metric_meta_result.metric_description, properties=properties) def table_to_metric_summary(metric_summary_result) -> MetricSummary: return MetricSummary(uuid=metric_summary_result.uuid, metric_id=metric_summary_result.metric_id, metric_key=metric_summary_result.metric_key, metric_value=metric_summary_result.metric_value) def metric_meta_to_table(name: Text, dataset_id: int, model_name: Optional[Text], model_version: Optional[Text], job_id: int, start_time: int, end_time: int, metric_type: MetricType, uri: Text, tags: Text, metric_description: Text, properties: Properties, store_type: Text = 'SqlAlchemyStore'): if properties is not None: properties = str(properties) if store_type == 'MongoStore': _class = MongoMetricMeta else: _class = SqlMetricMeta return _class(name=name, dataset_id=dataset_id, model_name=model_name, model_version=model_version, job_id=job_id, start_time=start_time, end_time=end_time, metric_type=metric_type.value, uri=uri, tags=tags, metric_description=metric_description, properties=properties) def metric_summary_to_table(metric_id: int, metric_key: Text, metric_value: Text, store_type: Text = 'SqlAlchemyStore'): if store_type == 'MongoStore': _class = MongoMetricSummary else: _class = SqlMetricSummary return _class(metric_id=metric_id, metric_key=metric_key, metric_value=metric_value) def metric_meta_to_proto(metric_meta: MetricMeta) -> MetricMetaProto: if metric_meta.metric_type == MetricType.DATASET: metric_type = MetricTypeProto.DATASET else: metric_type = MetricTypeProto.MODEL return MetricMetaProto(uuid=metric_meta.uuid, name=stringValue(metric_meta.name), dataset_id=int64Value(metric_meta.dataset_id), model_name=stringValue(metric_meta.model_name), model_version=stringValue(metric_meta.model_version), job_id=int64Value(metric_meta.job_id), start_time=int64Value(metric_meta.start_time), end_time=int64Value(metric_meta.end_time), metric_type=metric_type, uri=stringValue(metric_meta.uri), tags=stringValue(metric_meta.tags), metric_description=stringValue(metric_meta.metric_description), properties=metric_meta.properties) def metric_summary_to_proto(metric_summary: MetricSummary) -> MetricSummaryProto: return MetricSummaryProto(uuid=metric_summary.uuid, metric_id=int64Value(metric_summary.metric_id), metric_key=stringValue(metric_summary.metric_key), metric_value=stringValue(metric_summary.metric_value)) def proto_to_metric_meta(metric_meta_proto: MetricMetaProto) -> MetricMeta: if MetricTypeProto.DATASET == metric_meta_proto.metric_type: metric_type = MetricType.DATASET else: metric_type = MetricType.MODEL return MetricMeta(uuid=metric_meta_proto.uuid, name=metric_meta_proto.name.value, dataset_id=metric_meta_proto.dataset_id.value, model_name=metric_meta_proto.model_name.value, model_version=metric_meta_proto.model_version.value, job_id=metric_meta_proto.job_id.value, start_time=metric_meta_proto.start_time.value, end_time=metric_meta_proto.end_time.value, metric_type=metric_type, uri=metric_meta_proto.uri.value if metric_meta_proto.HasField('uri') else None, tags=metric_meta_proto.tags.value if metric_meta_proto.HasField('tags') else None, metric_description=metric_meta_proto.metric_description.value if metric_meta_proto.HasField('metric_description') else None, properties=metric_meta_proto.properties ) def proto_to_metric_summary(metric_summary_proto: MetricSummaryProto) -> MetricSummary: return MetricSummary(uuid=metric_summary_proto.uuid, metric_id=metric_summary_proto.metric_id.value, metric_key=metric_summary_proto.metric_key.value if metric_summary_proto.HasField('metric_key') else None, metric_value=metric_summary_proto.metric_value.value if metric_summary_proto.HasField('metric_value') else None ) def _warp_metric_meta_response(metric_meta: Optional[MetricMeta]) -> MetricMetaResponse: if metric_meta is not None: return MetricMetaResponse(return_code=0, return_msg=ReturnCode.Name(SUCCESS).lower(), metric_meta=metric_meta_to_proto(metric_meta)) else: return MetricMetaResponse(return_code=1, return_msg=ReturnCode.Name(RESOURCE_DOES_NOT_EXIST).lower(), metric_meta=None) def _warp_list_metric_meta_response(metric_meta: Union[None, MetricMeta, List[MetricMeta]]) -> MetricMetaResponse: if metric_meta is not None: if isinstance(metric_meta, MetricMeta): return ListMetricMetaResponse(return_code=0, return_msg=ReturnCode.Name(SUCCESS).lower(), metric_meta=[metric_meta_to_proto(metric_meta)]) else: res = [] for meta in metric_meta: res.append(metric_meta_to_proto(meta)) return ListMetricMetaResponse(return_code=0, return_msg=ReturnCode.Name(SUCCESS).lower(), metric_meta=res) else: return ListMetricMetaResponse(return_code=1, return_msg=ReturnCode.Name(RESOURCE_DOES_NOT_EXIST).lower(), metric_meta=None) def _warp_metric_summary_response(metric_summary: Optional[MetricSummary]) -> MetricSummaryResponse: if metric_summary is not None: return MetricSummaryResponse(return_code=0, return_msg=ReturnCode.Name(SUCCESS).lower(), metric_summary=metric_summary_to_proto(metric_summary)) else: return MetricSummaryResponse(return_code=1, return_msg=ReturnCode.Name(RESOURCE_DOES_NOT_EXIST).lower(), metric_summary=None) def _warp_list_metric_summary_response(metric_summary: Optional[List[MetricSummary]]) -> ListMetricSummaryResponse: if metric_summary is not None: res = [] for summary in metric_summary: res.append(metric_summary_to_proto(summary)) return ListMetricSummaryResponse(return_code=0, return_msg=ReturnCode.Name(SUCCESS).lower(), metric_summary=res) else: return ListMetricSummaryResponse(return_code=1, return_msg=ReturnCode.Name(RESOURCE_DOES_NOT_EXIST).lower(), metric_summary=None)
[((52, 11, 55, 73), 'ai_flow.meta.metric_meta.MetricSummary', 'MetricSummary', (), '', False, 'from ai_flow.meta.metric_meta import MetricMeta, MetricType, MetricSummary\n'), ((35, 21, 35, 49), 'ast.literal_eval', 'ast.literal_eval', ({(35, 38, 35, 48): 'properties'}, {}), '(properties)', False, 'import ast\n'), ((44, 34, 44, 85), 'ai_flow.meta.metric_meta.MetricType.value_of', 'MetricType.value_of', ({(44, 54, 44, 84): 'metric_meta_result.metric_type'}, {}), '(metric_meta_result.metric_type)', False, 'from ai_flow.meta.metric_meta import MetricMeta, MetricType, MetricSummary\n'), ((111, 32, 111, 61), 'ai_flow.rest_endpoint.service.stringValue', 'stringValue', ({(111, 44, 111, 60): 'metric_meta.name'}, {}), '(metric_meta.name)', False, 'from ai_flow.rest_endpoint.service import int64Value, stringValue\n'), ((112, 38, 112, 72), 'ai_flow.rest_endpoint.service.int64Value', 'int64Value', ({(112, 49, 112, 71): 'metric_meta.dataset_id'}, {}), '(metric_meta.dataset_id)', False, 'from ai_flow.rest_endpoint.service import int64Value, stringValue\n'), ((113, 38, 113, 73), 'ai_flow.rest_endpoint.service.stringValue', 'stringValue', ({(113, 50, 113, 72): 'metric_meta.model_name'}, {}), '(metric_meta.model_name)', False, 'from ai_flow.rest_endpoint.service import int64Value, stringValue\n'), ((114, 41, 114, 79), 'ai_flow.rest_endpoint.service.stringValue', 'stringValue', ({(114, 53, 114, 78): 'metric_meta.model_version'}, {}), '(metric_meta.model_version)', False, 'from ai_flow.rest_endpoint.service import int64Value, stringValue\n'), ((115, 34, 115, 64), 'ai_flow.rest_endpoint.service.int64Value', 'int64Value', ({(115, 45, 115, 63): 'metric_meta.job_id'}, {}), '(metric_meta.job_id)', False, 'from ai_flow.rest_endpoint.service import int64Value, stringValue\n'), ((116, 38, 116, 72), 'ai_flow.rest_endpoint.service.int64Value', 'int64Value', ({(116, 49, 116, 71): 'metric_meta.start_time'}, {}), '(metric_meta.start_time)', False, 'from ai_flow.rest_endpoint.service import int64Value, stringValue\n'), ((117, 36, 117, 68), 'ai_flow.rest_endpoint.service.int64Value', 'int64Value', ({(117, 47, 117, 67): 'metric_meta.end_time'}, {}), '(metric_meta.end_time)', False, 'from ai_flow.rest_endpoint.service import int64Value, stringValue\n'), ((119, 31, 119, 59), 'ai_flow.rest_endpoint.service.stringValue', 'stringValue', ({(119, 43, 119, 58): 'metric_meta.uri'}, {}), '(metric_meta.uri)', False, 'from ai_flow.rest_endpoint.service import int64Value, stringValue\n'), ((120, 32, 120, 61), 'ai_flow.rest_endpoint.service.stringValue', 'stringValue', ({(120, 44, 120, 60): 'metric_meta.tags'}, {}), '(metric_meta.tags)', False, 'from ai_flow.rest_endpoint.service import int64Value, stringValue\n'), ((121, 46, 121, 89), 'ai_flow.rest_endpoint.service.stringValue', 'stringValue', ({(121, 58, 121, 88): 'metric_meta.metric_description'}, {}), '(metric_meta.metric_description)', False, 'from ai_flow.rest_endpoint.service import int64Value, stringValue\n'), ((127, 40, 127, 76), 'ai_flow.rest_endpoint.service.int64Value', 'int64Value', ({(127, 51, 127, 75): 'metric_summary.metric_id'}, {}), '(metric_summary.metric_id)', False, 'from ai_flow.rest_endpoint.service import int64Value, stringValue\n'), ((128, 41, 128, 79), 'ai_flow.rest_endpoint.service.stringValue', 'stringValue', ({(128, 53, 128, 78): 'metric_summary.metric_key'}, {}), '(metric_summary.metric_key)', False, 'from ai_flow.rest_endpoint.service import int64Value, stringValue\n'), ((129, 43, 129, 83), 'ai_flow.rest_endpoint.service.stringValue', 'stringValue', ({(129, 55, 129, 82): 'metric_summary.metric_value'}, {}), '(metric_summary.metric_value)', False, 'from ai_flow.rest_endpoint.service import int64Value, stringValue\n'), ((167, 60, 167, 84), 'ai_flow.rest_endpoint.protobuf.message_pb2.ReturnCode.Name', 'ReturnCode.Name', ({(167, 76, 167, 83): 'SUCCESS'}, {}), '(SUCCESS)', False, 'from ai_flow.rest_endpoint.protobuf.message_pb2 import MetricMetaProto, MetricSummaryProto, MetricTypeProto, ReturnCode, SUCCESS, RESOURCE_DOES_NOT_EXIST\n'), ((171, 45, 171, 85), 'ai_flow.rest_endpoint.protobuf.message_pb2.ReturnCode.Name', 'ReturnCode.Name', ({(171, 61, 171, 84): 'RESOURCE_DOES_NOT_EXIST'}, {}), '(RESOURCE_DOES_NOT_EXIST)', False, 'from ai_flow.rest_endpoint.protobuf.message_pb2 import MetricMetaProto, MetricSummaryProto, MetricTypeProto, ReturnCode, SUCCESS, RESOURCE_DOES_NOT_EXIST\n'), ((188, 49, 188, 89), 'ai_flow.rest_endpoint.protobuf.message_pb2.ReturnCode.Name', 'ReturnCode.Name', ({(188, 65, 188, 88): 'RESOURCE_DOES_NOT_EXIST'}, {}), '(RESOURCE_DOES_NOT_EXIST)', False, 'from ai_flow.rest_endpoint.protobuf.message_pb2 import MetricMetaProto, MetricSummaryProto, MetricTypeProto, ReturnCode, SUCCESS, RESOURCE_DOES_NOT_EXIST\n'), ((194, 63, 194, 87), 'ai_flow.rest_endpoint.protobuf.message_pb2.ReturnCode.Name', 'ReturnCode.Name', ({(194, 79, 194, 86): 'SUCCESS'}, {}), '(SUCCESS)', False, 'from ai_flow.rest_endpoint.protobuf.message_pb2 import MetricMetaProto, MetricSummaryProto, MetricTypeProto, ReturnCode, SUCCESS, RESOURCE_DOES_NOT_EXIST\n'), ((198, 48, 198, 88), 'ai_flow.rest_endpoint.protobuf.message_pb2.ReturnCode.Name', 'ReturnCode.Name', ({(198, 64, 198, 87): 'RESOURCE_DOES_NOT_EXIST'}, {}), '(RESOURCE_DOES_NOT_EXIST)', False, 'from ai_flow.rest_endpoint.protobuf.message_pb2 import MetricMetaProto, MetricSummaryProto, MetricTypeProto, ReturnCode, SUCCESS, RESOURCE_DOES_NOT_EXIST\n'), ((207, 67, 207, 91), 'ai_flow.rest_endpoint.protobuf.message_pb2.ReturnCode.Name', 'ReturnCode.Name', ({(207, 83, 207, 90): 'SUCCESS'}, {}), '(SUCCESS)', False, 'from ai_flow.rest_endpoint.protobuf.message_pb2 import MetricMetaProto, MetricSummaryProto, MetricTypeProto, ReturnCode, SUCCESS, RESOURCE_DOES_NOT_EXIST\n'), ((211, 52, 211, 92), 'ai_flow.rest_endpoint.protobuf.message_pb2.ReturnCode.Name', 'ReturnCode.Name', ({(211, 68, 211, 91): 'RESOURCE_DOES_NOT_EXIST'}, {}), '(RESOURCE_DOES_NOT_EXIST)', False, 'from ai_flow.rest_endpoint.protobuf.message_pb2 import MetricMetaProto, MetricSummaryProto, MetricTypeProto, ReturnCode, SUCCESS, RESOURCE_DOES_NOT_EXIST\n'), ((178, 68, 178, 92), 'ai_flow.rest_endpoint.protobuf.message_pb2.ReturnCode.Name', 'ReturnCode.Name', ({(178, 84, 178, 91): 'SUCCESS'}, {}), '(SUCCESS)', False, 'from ai_flow.rest_endpoint.protobuf.message_pb2 import MetricMetaProto, MetricSummaryProto, MetricTypeProto, ReturnCode, SUCCESS, RESOURCE_DOES_NOT_EXIST\n'), ((184, 68, 184, 92), 'ai_flow.rest_endpoint.protobuf.message_pb2.ReturnCode.Name', 'ReturnCode.Name', ({(184, 84, 184, 91): 'SUCCESS'}, {}), '(SUCCESS)', False, 'from ai_flow.rest_endpoint.protobuf.message_pb2 import MetricMetaProto, MetricSummaryProto, MetricTypeProto, ReturnCode, SUCCESS, RESOURCE_DOES_NOT_EXIST\n')]
HaujetZhao/Caps_Writer
src/moduels/gui/Tab_Help.py
f2b2038a2c0984a1d356f024cbac421fe594601a
# -*- coding: UTF-8 -*- from PySide2.QtWidgets import QWidget, QPushButton, QVBoxLayout from PySide2.QtCore import Signal from moduels.component.NormalValue import 常量 from moduels.component.SponsorDialog import SponsorDialog import os, webbrowser class Tab_Help(QWidget): 状态栏消息 = Signal(str, int) def __init__(self): super().__init__() self.initElement() # 先初始化各个控件 self.initSlots() # 再将各个控件连接到信号槽 self.initLayout() # 然后布局 self.initValue() # 再定义各个控件的值 def initElement(self): self.打开帮助按钮 = QPushButton(self.tr('打开帮助文档')) self.ffmpegMannualNoteButton = QPushButton(self.tr('查看作者的 FFmpeg 笔记')) self.openVideoHelpButtone = QPushButton(self.tr('查看视频教程')) self.openGiteePage = QPushButton(self.tr(f'当前版本是 v{常量.软件版本},到 Gitee 检查新版本')) self.openGithubPage = QPushButton(self.tr(f'当前版本是 v{常量.软件版本},到 Github 检查新版本')) self.linkToDiscussPage = QPushButton(self.tr('加入 QQ 群')) self.tipButton = QPushButton(self.tr('打赏作者')) self.masterLayout = QVBoxLayout() def initSlots(self): self.打开帮助按钮.clicked.connect(self.openHelpDocument) self.ffmpegMannualNoteButton.clicked.connect(lambda: webbrowser.open(self.tr(r'https://hacpai.com/article/1595480295489'))) self.openVideoHelpButtone.clicked.connect(lambda: webbrowser.open(self.tr(r'https://www.bilibili.com/video/BV12A411p73r/'))) self.openGiteePage.clicked.connect(lambda: webbrowser.open(self.tr(r'https://gitee.com/haujet/CapsWriter/releases'))) self.openGithubPage.clicked.connect(lambda: webbrowser.open(self.tr(r'https://github.com/HaujetZhao/CapsWriter/releases'))) self.linkToDiscussPage.clicked.connect(lambda: webbrowser.open( self.tr(r'https://qm.qq.com/cgi-bin/qm/qr?k=DgiFh5cclAElnELH4mOxqWUBxReyEVpm&jump_from=webapi'))) self.tipButton.clicked.connect(lambda: SponsorDialog(self)) def initLayout(self): self.setLayout(self.masterLayout) # self.masterLayout.addWidget(self.打开帮助按钮) # self.masterLayout.addWidget(self.ffmpegMannualNoteButton) self.masterLayout.addWidget(self.openVideoHelpButtone) self.masterLayout.addWidget(self.openGiteePage) self.masterLayout.addWidget(self.openGithubPage) self.masterLayout.addWidget(self.linkToDiscussPage) self.masterLayout.addWidget(self.tipButton) def initValue(self): self.打开帮助按钮.setMaximumHeight(100) self.ffmpegMannualNoteButton.setMaximumHeight(100) self.openVideoHelpButtone.setMaximumHeight(100) self.openGiteePage.setMaximumHeight(100) self.openGithubPage.setMaximumHeight(100) self.linkToDiscussPage.setMaximumHeight(100) self.tipButton.setMaximumHeight(100) def openHelpDocument(self): try: if 常量.系统平台 == 'Darwin': import shlex os.system("open " + shlex.quote(self.tr("./misc/Docs/README_zh.html"))) elif 常量.系统平台 == 'Windows': os.startfile(os.path.realpath(self.tr('./misc/Docs/README_zh.html'))) except: print('未能打开帮助文档')
[((12, 22, 12, 38), 'PySide2.QtCore.Signal', 'Signal', ({(12, 29, 12, 32): 'str', (12, 34, 12, 37): 'int'}, {}), '(str, int)', False, 'from PySide2.QtCore import Signal\n'), ((30, 28, 30, 41), 'PySide2.QtWidgets.QVBoxLayout', 'QVBoxLayout', ({}, {}), '()', False, 'from PySide2.QtWidgets import QWidget, QPushButton, QVBoxLayout\n'), ((40, 47, 40, 66), 'moduels.component.SponsorDialog.SponsorDialog', 'SponsorDialog', ({(40, 61, 40, 65): 'self'}, {}), '(self)', False, 'from moduels.component.SponsorDialog import SponsorDialog\n')]
AuFeld/COAG
app/routes/register.py
3874a9c1c6ceb908a6bbabfb49e2c701d8e54f20
from typing import Callable, Optional, Type, cast from fastapi import APIRouter, HTTPException, Request, status from app.models import users from app.common.user import ErrorCode, run_handler from app.users.user import ( CreateUserProtocol, InvalidPasswordException, UserAlreadyExists, ValidatePasswordProtocol, ) def get_register_router( create_user: CreateUserProtocol, user_model: Type[users.BaseUser], user_create_model: Type[users.BaseUserCreate], after_register: Optional[Callable[[users.UD, Request], None]] = None, validate_password: Optional[ValidatePasswordProtocol] = None, ) -> APIRouter: """Generate a router with the register route.""" router = APIRouter() @router.post( "/register", response_model=user_model, status_code=status.HTTP_201_CREATED ) async def register(request: Request, user: user_create_model): # type: ignore user = cast(users.BaseUserCreate, user) # Prevent mypy complain if validate_password: try: await validate_password(user.password, user) except InvalidPasswordException as e: raise HTTPException( status_code=status.HTTP_400_BAD_REQUEST, detail={ "code": ErrorCode.REGISTER_INVALID_PASSWORD, "reason": e.reason, }, ) try: created_user = await create_user(user, safe=True) except UserAlreadyExists: raise HTTPException( status_code=status.HTTP_400_BAD_REQUEST, detail=ErrorCode.REGISTER_USER_ALREADY_EXISTS, ) if after_register: await run_handler(after_register, created_user, request) return created_user return router
[((23, 13, 23, 24), 'fastapi.APIRouter', 'APIRouter', ({}, {}), '()', False, 'from fastapi import APIRouter, HTTPException, Request, status\n'), ((29, 15, 29, 47), 'typing.cast', 'cast', ({(29, 20, 29, 40): 'users.BaseUserCreate', (29, 42, 29, 46): 'user'}, {}), '(users.BaseUserCreate, user)', False, 'from typing import Callable, Optional, Type, cast\n'), ((45, 18, 48, 13), 'fastapi.HTTPException', 'HTTPException', (), '', False, 'from fastapi import APIRouter, HTTPException, Request, status\n'), ((51, 18, 51, 68), 'app.common.user.run_handler', 'run_handler', ({(51, 30, 51, 44): 'after_register', (51, 46, 51, 58): 'created_user', (51, 60, 51, 67): 'request'}, {}), '(after_register, created_user, request)', False, 'from app.common.user import ErrorCode, run_handler\n'), ((34, 22, 40, 17), 'fastapi.HTTPException', 'HTTPException', (), '', False, 'from fastapi import APIRouter, HTTPException, Request, status\n')]
xizaoqu/Panoptic-PolarNet
utils/visual.py
8ce05f437f54e030eac7de150f43caab2810cfbb
#!/usr/bin/env python3 # -*- coding: utf-8 -*- import cv2 import numpy as np def flow_to_img(flow, normalize=True): """Convert flow to viewable image, using color hue to encode flow vector orientation, and color saturation to encode vector length. This is similar to the OpenCV tutorial on dense optical flow, except that they map vector length to the value plane of the HSV color model, instead of the saturation plane, as we do here. Args: flow: optical flow normalize: Normalize flow to 0..255 Returns: img: viewable representation of the dense optical flow in RGB format Ref: https://github.com/philferriere/tfoptflow/blob/33e8a701e34c8ce061f17297d40619afbd459ade/tfoptflow/optflow.py """ hsv = np.zeros((flow.shape[0], flow.shape[1], 3), dtype=np.uint8) flow_magnitude, flow_angle = cv2.cartToPolar(flow[..., 0].astype(np.float32), flow[..., 1].astype(np.float32)) # A couple times, we've gotten NaNs out of the above... nans = np.isnan(flow_magnitude) if np.any(nans): nans = np.where(nans) flow_magnitude[nans] = 0. # Normalize hsv[..., 0] = flow_angle * 180 / np.pi / 2 if normalize is True: hsv[..., 1] = cv2.normalize(flow_magnitude, None, 0, 255, cv2.NORM_MINMAX) else: hsv[..., 1] = flow_magnitude hsv[..., 2] = 255 img = cv2.cvtColor(hsv, cv2.COLOR_HSV2RGB) return img
[((18, 10, 18, 69), 'numpy.zeros', 'np.zeros', (), '', True, 'import numpy as np\n'), ((22, 11, 22, 35), 'numpy.isnan', 'np.isnan', ({(22, 20, 22, 34): 'flow_magnitude'}, {}), '(flow_magnitude)', True, 'import numpy as np\n'), ((23, 7, 23, 19), 'numpy.any', 'np.any', ({(23, 14, 23, 18): 'nans'}, {}), '(nans)', True, 'import numpy as np\n'), ((34, 10, 34, 46), 'cv2.cvtColor', 'cv2.cvtColor', ({(34, 23, 34, 26): 'hsv', (34, 28, 34, 45): 'cv2.COLOR_HSV2RGB'}, {}), '(hsv, cv2.COLOR_HSV2RGB)', False, 'import cv2\n'), ((24, 15, 24, 29), 'numpy.where', 'np.where', ({(24, 24, 24, 28): 'nans'}, {}), '(nans)', True, 'import numpy as np\n'), ((30, 22, 30, 82), 'cv2.normalize', 'cv2.normalize', ({(30, 36, 30, 50): 'flow_magnitude', (30, 52, 30, 56): 'None', (30, 58, 30, 59): '0', (30, 61, 30, 64): '255', (30, 66, 30, 81): 'cv2.NORM_MINMAX'}, {}), '(flow_magnitude, None, 0, 255, cv2.NORM_MINMAX)', False, 'import cv2\n')]
zhkmxx9302013/SoftwarePilot
DistributedRL/Gateway/build/Code/sim/Parser/LAI/GreenIndex.py
826098465b800085774946c20a7a283f369f1d21
import argparse from PIL import Image, ImageStat import math parser = argparse.ArgumentParser() parser.add_argument('fname') parser.add_argument('pref', default="", nargs="?") args = parser.parse_args() im = Image.open(args.fname) RGB = im.convert('RGB') imWidth, imHeight = im.size ratg = 1.2 ratgb = 1.66 ming = 10 ratr = 2 speed = 8 leafcount = 0 total = 0 for i in range(0, int(imWidth/speed)): for j in range(0, int(imHeight/speed)): R,G,B = RGB.getpixel((i*speed,j*speed)) if R*ratg < G and B*ratgb < G and B*ratr < R: leafcount = leafcount + 1 total = total+1 print("LAI="+str(float(leafcount)/total))
[((5, 9, 5, 34), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ({}, {}), '()', False, 'import argparse\n'), ((10, 5, 10, 27), 'PIL.Image.open', 'Image.open', ({(10, 16, 10, 26): 'args.fname'}, {}), '(args.fname)', False, 'from PIL import Image, ImageStat\n')]
ped998/scripts
reports/heliosV1/python/heliosStorageStats/heliosStorageStats.py
0dcaaf47f9676210e1c972a5d59d8d0de82a1d93
#!/usr/bin/env python """cluster storage stats for python""" # import pyhesity wrapper module from pyhesity import * from datetime import datetime import codecs # command line arguments import argparse parser = argparse.ArgumentParser() parser.add_argument('-v', '--vip', type=str, default='helios.cohesity.com') # cluster to connect to parser.add_argument('-u', '--username', type=str, required=True) # username parser.add_argument('-d', '--domain', type=str, default='local') # (optional) domain - defaults to local parser.add_argument('-pwd', '--password', type=str, default=None) # optional password parser.add_argument('-n', '--unit', type=str, choices=['GiB', 'TiB', 'gib', 'tib'], default='TiB') args = parser.parse_args() vip = args.vip username = args.username domain = args.domain password = args.password unit = args.unit if unit.lower() == 'tib': multiplier = 1024 * 1024 * 1024 * 1024 unit = 'TiB' else: multiplier = 1024 * 1024 * 1024 unit = 'GiB' def toUnits(value): return round(float(value) / multiplier, 1) # authenticate apiauth(vip=vip, username=username, domain=domain, password=password, useApiKey=True, noretry=True) # outfile now = datetime.now() # cluster = api('get', 'cluster') dateString = now.strftime("%Y-%m-%d") outfile = 'heliosStorageStats-%s.csv' % dateString f = codecs.open(outfile, 'w') # headings f.write('Date,Capacity (%s),Consumed (%s),Free (%s),Used %%,Data In (%s),Data Written (%s),Storage Reduction,Data Reduction\n' % (unit, unit, unit, unit, unit)) stats = {} def parseStats(clusterName, dataPoint, statName): if clusterName not in stats.keys(): stats[clusterName] = {} stats[clusterName][statName] = dataPoint['data']['int64Value'] endMsecs = dateToUsecs(now.strftime("%Y-%m-%d %H:%M:%S")) / 1000 startMsecs = (timeAgo(2, 'days')) / 1000 print('\nGathering cluster stats:\n') for cluster in heliosClusters(): heliosCluster(cluster) print(' %s' % cluster['name']) capacityStats = api('get', 'statistics/timeSeriesStats?endTimeMsecs=%s&entityId=%s&metricName=kCapacityBytes&metricUnitType=0&range=day&rollupFunction=average&rollupIntervalSecs=86400&schemaName=kBridgeClusterStats&startTimeMsecs=%s' % (endMsecs, cluster['clusterId'], startMsecs)) consumedStats = api('get', 'statistics/timeSeriesStats?startTimeMsecs=%s&schemaName=kBridgeClusterTierPhysicalStats&metricName=kMorphedUsageBytes&rollupIntervalSecs=86400&rollupFunction=latest&entityIdList=%s:Local&endTimeMsecs=%s' % (startMsecs, cluster['clusterId'], endMsecs)) dataInStats = api('get', 'statistics/timeSeriesStats?startTimeMsecs=%s&schemaName=ApolloV2ClusterStats&metricName=BrickBytesLogical&rollupIntervalSecs=86400&rollupFunction=latest&entityIdList=%s (ID %s)&endTimeMsecs=%s' % (startMsecs, cluster['name'], cluster['clusterId'], endMsecs)) dataWrittenStats = api('get', 'statistics/timeSeriesStats?startTimeMsecs=%s&schemaName=ApolloV2ClusterStats&metricName=ChunkBytesMorphed&rollupIntervalSecs=86400&rollupFunction=latest&entityIdList=%s (ID %s)&endTimeMsecs=%s' % (startMsecs, cluster['name'], cluster['clusterId'], endMsecs)) logicalSizeStats = api('get', 'statistics/timeSeriesStats?startTimeMsecs=%s&schemaName=kBridgeClusterLogicalStats&metricName=kUnmorphedUsageBytes&rollupIntervalSecs=86400&rollupFunction=latest&entityIdList=%s&endTimeMsecs=%s' % (startMsecs, cluster['clusterId'], endMsecs)) parseStats(cluster['name'], capacityStats['dataPointVec'][0], 'capacity') parseStats(cluster['name'], consumedStats['dataPointVec'][0], 'consumed') parseStats(cluster['name'], dataInStats['dataPointVec'][0], 'dataIn') parseStats(cluster['name'], dataWrittenStats['dataPointVec'][0], 'dataWritten') parseStats(cluster['name'], logicalSizeStats['dataPointVec'][0], 'logicalSize') for clusterName in sorted(stats.keys()): capacity = stats[clusterName]['capacity'] consumed = stats[clusterName]['consumed'] dataIn = stats[clusterName]['dataIn'] dataWritten = stats[clusterName]['dataWritten'] logicalSize = stats[clusterName]['logicalSize'] free = capacity - consumed pctUsed = round(100 * consumed / capacity, 0) storageReduction = round(float(logicalSize) / consumed, 1) dataReduction = round(float(dataIn) / dataWritten, 1) f.write('"%s","%s","%s","%s","%s","%s","%s","%s","%s"\n' % (clusterName, toUnits(capacity), toUnits(consumed), toUnits(free), pctUsed, toUnits(dataIn), toUnits(dataWritten), storageReduction, dataReduction)) f.close() print('\nOutput saved to %s\n' % outfile)
[((11, 9, 11, 34), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ({}, {}), '()', False, 'import argparse\n'), ((41, 6, 41, 20), 'datetime.datetime.now', 'datetime.now', ({}, {}), '()', False, 'from datetime import datetime\n'), ((45, 4, 45, 29), 'codecs.open', 'codecs.open', ({(45, 16, 45, 23): 'outfile', (45, 25, 45, 28): '"""w"""'}, {}), "(outfile, 'w')", False, 'import codecs\n')]
zengrx/S.M.A.R.T
src/advanceoperate/malimgthread.py
47a9abe89008e9b34f9b9d057656dbf3fb286456
#coding=utf-8 from PyQt4 import QtCore import os, glob, numpy, sys from PIL import Image from sklearn.cross_validation import StratifiedKFold from sklearn.metrics import confusion_matrix from sklearn.neighbors import KNeighborsClassifier from sklearn.neighbors import BallTree from sklearn import cross_validation from sklearn.utils import shuffle import sklearn import leargist import cPickle import random import sys reload(sys) sys.setdefaultencoding( "utf-8" ) class ValidationResult(QtCore.QThread): finishSignal = QtCore.pyqtSignal(list) def __init__(self, parent=None): super(ValidationResult, self).__init__(parent) def getClassifyLabel(self): X = numpy.load("./datafiles/img_features.npy") # 特征 y = numpy.load("./datafiles/img_labels.npy") # 标签 n = cPickle.load(open("./datafiles/img.p","rb")) # 标号 l = cPickle.load(open("./datafiles/imglabel.p", "rb")) # [家族号, 家族中序号, 文件名, 总序号] return X, y, n ,l ''' 准备绘制矩阵的数据 @X:特征矩阵 @y:标签 @n:所有样本家族名称 @l:对应家族个数 ''' def prepareData2Matrix(self, X, y, n, l): n_samples, useless = X.shape p = range(n_samples) random.seed(random.random()) random.shuffle(p) X, y = X[p], y[p] # 打乱数组 kfold = 10 # 10重 skf = StratifiedKFold(y,kfold) skfind = [None] * len(skf) cnt = 0 for train_index in skf: skfind[cnt] = train_index cnt += 1 list_fams = n cache = [] no_imgs = [] for l_list in l: if 0 == l_list[1]: # print l[l_list[3] - 1] # print l_list cache.append(l[l_list[3] - 1][1] + 1) no_imgs = cache[1:len(cache)] no_imgs.append(cache[0]) # print no_imgs # 输出所有家族包含文件个数 conf_mat = numpy.zeros((len(no_imgs), len(no_imgs))) # 初始化矩阵 n_neighbors = 5 # 10-fold Cross Validation for i in range(kfold): train_indices = skfind[i][0] test_indices = skfind[i][1] clf = [] clf = KNeighborsClassifier(n_neighbors, weights='distance') X_train = X[train_indices] y_train = y[train_indices] X_test = X[test_indices] y_test = y[test_indices] # Training import time tic = time.time() clf.fit(X_train,y_train) toc = time.time() print "training time= ", toc-tic # roughly 2.5 secs # Testing y_predict = [] tic = time.time() y_predict = clf.predict(X_test) # output is labels and not indices toc = time.time() print "testing time = ", toc-tic # roughly 0.3 secs # Compute confusion matrix cm = [] cm = confusion_matrix(y_test,y_predict) conf_mat = conf_mat + cm return conf_mat, no_imgs, list_fams def run(self): print "start draw" X, y, n, l = self.getClassifyLabel() cm, nimg, listf = self.prepareData2Matrix(X, y, n, l) msg = [cm, nimg, listf] self.finishSignal.emit(msg) class MalwareImageClass(QtCore.QThread): malwarSignal = QtCore.pyqtSignal(int, list) concluSignal = QtCore.pyqtSignal(int, list) def __init__(self, filename, parent=None): super(MalwareImageClass, self).__init__(parent) self.filename = str(filename)#.encode('cp936') self.feature = '' ''' 获取训练结果 特征,标签,文件名称及相应的序号 ''' def getClassifyLabel(self): X = numpy.load("./datafiles/img_features.npy") # 特征 y = numpy.load("./datafiles/img_labels.npy") # 标签 n = cPickle.load(open("./datafiles/img.p","rb")) # 标号 l = cPickle.load(open("./datafiles/imglabel.p", "rb")) # [家族号, 家族中序号, 文件名, 总序号] return X, y, n ,l ''' 对图片进行分类 train@训练集特征 label@训练集标签 ''' def classifyImage(self, feature_X, label_y, number): im = Image.open(self.filename) im1 = im.resize((64,64), Image.ANTIALIAS); # 转换为64x64 des = leargist.color_gist(im1); # 960 values feature = des[0:320]; # 生成灰阶图,只需要前320内容 query_feature = feature.reshape(1, -1) self.feature = query_feature # 获取特征和标签 X = feature_X y = label_y n = number n_neighbors = 5; # better to have this at the start of the code knn = KNeighborsClassifier(n_neighbors, weights='distance') knn.fit(X, y) num = int(knn.predict(query_feature)) classname = n[num] proba = knn.predict_proba(query_feature) msg = [num, classname, proba] self.malwarSignal.emit(1, msg) ''' balltrees寻找数据集中最相近的样本 返回距离值及样本标签号 ''' def findMostSimilarImg(self, feature_X, serial): X = feature_X b = BallTree(X) # 5个最相近的样本 dist, ind = b.query(self.feature, k=3) print dist, ind ind = ind[0] # print ind l = serial imgs = [] for rank in ind: # print rank for name in l: if rank == name[3]: # print name imgs.append(name[2]) self.concluSignal.emit(2, imgs) def run(self): X, y, n ,l = self.getClassifyLabel() self.classifyImage(X, y, n) self.findMostSimilarImg(X, l)
[]
itamarhaber/iredis
tests/unittests/command_parse/test_stream.py
61208aab34c731f88232abd2cacdf0e075e701f2
def test_xrange(judge_command): judge_command( "XRANGE somestream - +", {"command": "XRANGE", "key": "somestream", "stream_id": ["-", "+"]}, ) judge_command( "XRANGE somestream 1526985054069 1526985055069", { "command": "XRANGE", "key": "somestream", "stream_id": ["1526985054069", "1526985055069"], }, ) judge_command( "XRANGE somestream 1526985054069 1526985055069-10", { "command": "XRANGE", "key": "somestream", "stream_id": ["1526985054069", "1526985055069-10"], }, ) judge_command( "XRANGE somestream 1526985054069 1526985055069-10 count 10", { "command": "XRANGE", "key": "somestream", "stream_id": ["1526985054069", "1526985055069-10"], "count_const": "count", "count": "10", }, ) def test_xgroup_create(judge_command): judge_command( "XGROUP CREATE mykey mygroup 123", { "command": "XGROUP", "stream_create": "CREATE", "key": "mykey", "group": "mygroup", "stream_id": "123", }, ) judge_command( "XGROUP CREATE mykey mygroup $", { "command": "XGROUP", "stream_create": "CREATE", "key": "mykey", "group": "mygroup", "stream_id": "$", }, ) # short of a parameter judge_command("XGROUP CREATE mykey mygroup", None) judge_command("XGROUP CREATE mykey", None) def test_xgroup_setid(judge_command): judge_command( "XGROUP SETID mykey mygroup 123", { "command": "XGROUP", "stream_setid": "SETID", "key": "mykey", "group": "mygroup", "stream_id": "123", }, ) judge_command( "XGROUP SETID mykey mygroup $", { "command": "XGROUP", "stream_setid": "SETID", "key": "mykey", "group": "mygroup", "stream_id": "$", }, ) # two subcommand together shouldn't match judge_command("XGROUP CREATE mykey mygroup 123 SETID mykey mygroup $", None) def test_xgroup_destroy(judge_command): judge_command( "XGROUP destroy mykey mygroup", { "command": "XGROUP", "stream_destroy": "destroy", "key": "mykey", "group": "mygroup", }, ) judge_command("XGROUP destroy mykey", None) judge_command("XGROUP DESTROY mykey mygroup $", None) def test_xgroup_delconsumer(judge_command): judge_command( "XGROUP delconsumer mykey mygroup myconsumer", { "command": "XGROUP", "stream_delconsumer": "delconsumer", "key": "mykey", "group": "mygroup", "consumer": "myconsumer", }, ) judge_command( "XGROUP delconsumer mykey mygroup $", { "command": "XGROUP", "stream_delconsumer": "delconsumer", "key": "mykey", "group": "mygroup", "consumer": "$", }, ) judge_command("XGROUP delconsumer mykey mygroup", None) def test_xgroup_stream(judge_command): judge_command( "XACK mystream group1 123123", { "command": "XACK", "key": "mystream", "group": "group1", "stream_id": "123123", }, ) judge_command( "XACK mystream group1 123123 111", {"command": "XACK", "key": "mystream", "group": "group1", "stream_id": "111"}, ) def test_xinfo(judge_command): judge_command( "XINFO consumers mystream mygroup", { "command": "XINFO", "stream_consumers": "consumers", "key": "mystream", "group": "mygroup", }, ) judge_command( "XINFO GROUPS mystream", {"command": "XINFO", "stream_groups": "GROUPS", "key": "mystream"}, ) judge_command( "XINFO STREAM mystream", {"command": "XINFO", "stream": "STREAM", "key": "mystream"}, ) judge_command("XINFO HELP", {"command": "XINFO", "help": "HELP"}) judge_command("XINFO consumers mystream mygroup GROUPS mystream", None) judge_command("XINFO groups mystream mygroup", None) def test_xinfo_with_full(judge_command): judge_command( "XINFO STREAM mystream FULL", { "command": "XINFO", "stream": "STREAM", "key": "mystream", "full_const": "FULL", }, ) judge_command( "XINFO STREAM mystream FULL count 10", { "command": "XINFO", "stream": "STREAM", "key": "mystream", "full_const": "FULL", "count_const": "count", "count": "10", }, ) def test_xpending(judge_command): judge_command( "XPENDING mystream group55", {"command": "XPENDING", "key": "mystream", "group": "group55"}, ) judge_command( "XPENDING mystream group55 myconsumer", { "command": "XPENDING", "key": "mystream", "group": "group55", "consumer": "myconsumer", }, ) judge_command( "XPENDING mystream group55 - + 10", { "command": "XPENDING", "key": "mystream", "group": "group55", "stream_id": ["-", "+"], "count": "10", }, ) judge_command( "XPENDING mystream group55 - + 10 myconsumer", { "command": "XPENDING", "key": "mystream", "group": "group55", "stream_id": ["-", "+"], "count": "10", "consumer": "myconsumer", }, ) judge_command("XPENDING mystream group55 - + ", None) def test_xadd(judge_command): judge_command( "xadd mystream MAXLEN ~ 1000 * key value", { "command": "xadd", "key": "mystream", "maxlen": "MAXLEN", "approximately": "~", "count": "1000", "sfield": "key", "svalue": "value", "stream_id": "*", }, ) # test for MAXLEN option judge_command( "xadd mystream MAXLEN 1000 * key value", { "command": "xadd", "key": "mystream", "maxlen": "MAXLEN", "count": "1000", "sfield": "key", "svalue": "value", "stream_id": "*", }, ) judge_command( "xadd mystream * key value", { "command": "xadd", "key": "mystream", "sfield": "key", "svalue": "value", "stream_id": "*", }, ) # spcify stream id judge_command( "xadd mystream 123-123 key value", { "command": "xadd", "key": "mystream", "sfield": "key", "svalue": "value", "stream_id": "123-123", }, ) judge_command( "xadd mystream 123-123 key value foo bar hello world", { "command": "xadd", "key": "mystream", "sfield": "hello", "svalue": "world", "stream_id": "123-123", }, ) def test_xtrim(judge_command): judge_command( " XTRIM mystream MAXLEN 2", {"command": "XTRIM", "key": "mystream", "maxlen": "MAXLEN", "count": "2"}, ) judge_command( " XTRIM mystream MAXLEN ~ 2", { "command": "XTRIM", "key": "mystream", "maxlen": "MAXLEN", "count": "2", "approximately": "~", }, ) judge_command(" XTRIM mystream", None) def test_xdel(judge_command): judge_command( "XDEL mystream 1581165000000 1549611229000 1581060831000", {"command": "XDEL", "key": "mystream", "stream_id": "1581060831000"}, ) judge_command( "XDEL mystream 1581165000000", {"command": "XDEL", "key": "mystream", "stream_id": "1581165000000"}, ) def test_xclaim(judge_command): judge_command( "XCLAIM mystream mygroup Alice 3600000 1526569498055-0", { "command": "XCLAIM", "key": "mystream", "group": "mygroup", "consumer": "Alice", "millisecond": "3600000", "stream_id": "1526569498055-0", }, ) judge_command( "XCLAIM mystream mygroup Alice 3600000 1526569498055-0 123 456 789", { "command": "XCLAIM", "key": "mystream", "group": "mygroup", "consumer": "Alice", "millisecond": "3600000", "stream_id": "789", }, ) judge_command( "XCLAIM mystream mygroup Alice 3600000 1526569498055-0 IDEL 300", { "command": "XCLAIM", "key": "mystream", "group": "mygroup", "consumer": "Alice", "millisecond": ["3600000", "300"], "stream_id": "1526569498055-0", "idel": "IDEL", }, ) judge_command( "XCLAIM mystream mygroup Alice 3600000 1526569498055-0 retrycount 7", { "command": "XCLAIM", "key": "mystream", "group": "mygroup", "consumer": "Alice", "millisecond": "3600000", "stream_id": "1526569498055-0", "retrycount": "retrycount", "count": "7", }, ) judge_command( "XCLAIM mystream mygroup Alice 3600000 1526569498055-0 TIME 123456789", { "command": "XCLAIM", "key": "mystream", "group": "mygroup", "consumer": "Alice", "millisecond": "3600000", "stream_id": "1526569498055-0", "time": "TIME", "timestamp": "123456789", }, ) judge_command( "XCLAIM mystream mygroup Alice 3600000 1526569498055-0 FORCE", { "command": "XCLAIM", "key": "mystream", "group": "mygroup", "consumer": "Alice", "millisecond": "3600000", "stream_id": "1526569498055-0", "force": "FORCE", }, ) judge_command( "XCLAIM mystream mygroup Alice 3600000 1526569498055-0 JUSTID", { "command": "XCLAIM", "key": "mystream", "group": "mygroup", "consumer": "Alice", "millisecond": "3600000", "stream_id": "1526569498055-0", "justid": "JUSTID", }, ) def test_xread(judge_command): judge_command( "XREAD COUNT 2 STREAMS mystream writers 0-0 0-0", { "command": "XREAD", "count_const": "COUNT", "count": "2", "streams": "STREAMS", # FIXME current grammar can't support multiple tokens # so the ids will be recongized to keys. "keys": "mystream writers 0-0", "stream_id": "0-0", }, ) judge_command( "XREAD COUNT 2 BLOCK 1000 STREAMS mystream writers 0-0 0-0", { "command": "XREAD", "count_const": "COUNT", "count": "2", "streams": "STREAMS", "keys": "mystream writers 0-0", "block": "BLOCK", "millisecond": "1000", "stream_id": "0-0", }, ) def test_xreadgroup(judge_command): judge_command( "XREADGROUP GROUP mygroup1 Bob COUNT 1 BLOCK 100 NOACK STREAMS key1 1 key2 2", { "command": "XREADGROUP", "stream_group": "GROUP", "group": "mygroup1", "consumer": "Bob", "count_const": "COUNT", "count": "1", "block": "BLOCK", "millisecond": "100", "noack": "NOACK", "streams": "STREAMS", "keys": "key1 1 key2", "stream_id": "2", }, ) judge_command( "XREADGROUP GROUP mygroup1 Bob STREAMS key1 1 key2 2", { "command": "XREADGROUP", "stream_group": "GROUP", "group": "mygroup1", "consumer": "Bob", "streams": "STREAMS", "keys": "key1 1 key2", "stream_id": "2", }, ) judge_command("XREADGROUP GROUP group consumer", None)
[]
ivan2kh/find_forks
tests/test_find_forks/test_find_forks.py
409251282a85da48445afc03c5a1797df393ca95
# coding: utf-8 """test_find_fork.""" # pylint: disable=no-self-use from __future__ import absolute_import, division, print_function, unicode_literals from os import path import unittest from six import PY3 from find_forks.__init__ import CONFIG from find_forks.find_forks import add_forks, determine_names, find_forks, main from .__init__ import BASEPATH if PY3: from unittest.mock import patch, MagicMock, Mock # pylint: disable=no-name-in-module else: from mock import patch, MagicMock, Mock class FindForksCommon(unittest.TestCase): @staticmethod def make_mock(json_response): """Used in test_interesting.py.""" response_mock = MagicMock() response_mock.read = Mock(return_value=json_response) if PY3: response_mock.status = 200 response_mock.getheader = Mock(return_value='<https://api.github.com/repos/frost-nzcr4/find_forks/forks?page=2>; rel="next", ' '<https://api.github.com/repos/frost-nzcr4/find_forks/forks?page=3>; rel="last"') else: response_mock.code = 200 response_mock.info = Mock(return_value=(('link', '<https://api.github.com/repos/frost-nzcr4/find_forks/forks?page=2>; rel="next", ' '<https://api.github.com/repos/frost-nzcr4/find_forks/forks?page=3>; rel="last"'), )) return response_mock def make_test(self, response_mock): """Used in test_interesting.py.""" url = 'https://github.com/frost-nzcr4/find_forks' with patch('find_forks.find_forks.urllib.request.urlopen', return_value=response_mock) as urlopen_mock: with patch('find_forks.git_wrapper.subprocess.call', return_value=None): self.assertEqual(add_forks(url), 'https://api.github.com/repos/frost-nzcr4/find_forks/forks?page=2') urlopen_mock.assert_called_once_with(url, timeout=6) if PY3: response_mock.status = 404 else: response_mock.code = 404 self.assertIsNone(add_forks(url)) class FindForksTest(FindForksCommon): def test_add_forks(self): self.assertIsNone(add_forks('httttps://unavailable!url')) with open(path.join(BASEPATH, 'fixture/response.json'), 'rb') as fixture: json_response = fixture.read() response_mock = self.make_mock(json_response) self.make_test(response_mock) def test_determine_names(self): """To run this test you'll need to prepare git first, run: git remote add test-origin-1 https://github.com/frost-nzcr4/find_forks.git git remote add test-origin-2 https://github.com/yagmort/symfony1.git git remote add test-origin-3 [email protected]:tjerkw/Android-SlideExpandableListView.git """ user, repo = determine_names() self.assertEqual(user, 'frost-nzcr4') self.assertEqual(repo, 'find_forks') user, repo = determine_names('test-origin-1') self.assertEqual(user, 'frost-nzcr4') self.assertEqual(repo, 'webmoney') user, repo = determine_names('test-origin-2') self.assertEqual(user, 'yagmort') self.assertEqual(repo, 'symfony1') user, repo = determine_names('test-origin-3') self.assertEqual(user, 'tjerkw') self.assertEqual(repo, 'Android-SlideExpandableListView') with self.assertRaises(RuntimeError): user, repo = determine_names('name-with-an-error') def test_find_forks(self): sent_args = { 'per_page': 99, 'start_page': 3 } url = 'https://api.github.com/repos/frost-nzcr4/find_forks/forks?per_page=%s&page=%s' % (sent_args['per_page'], sent_args['start_page']) with patch('find_forks.git_wrapper.subprocess.call', return_value=None) as call_mock: with patch('find_forks.find_forks.add_forks', return_value=None) as add_forks_mock: find_forks(**sent_args) add_forks_mock.assert_called_once_with(url) call_mock.assert_called_once() def test_main(self): with patch('find_forks.find_forks.find_forks', return_value=None) as find_forks_mock: main() sent_args = CONFIG.copy() sent_args.update({'user': None, 'repo': None, 'no_fetch': False}) find_forks_mock.assert_called_once_with(**sent_args) # Test __version__ exceptions. find_forks_mock = MagicMock(side_effect=SystemError()) del find_forks_mock.__version__ modules = { 'find_forks.__init__': find_forks_mock } with patch.dict('sys.modules', modules): self.assertRaises(ImportError, main)
[((26, 24, 26, 35), 'mock.MagicMock', 'MagicMock', ({}, {}), '()', False, 'from mock import patch, MagicMock, Mock\n'), ((27, 29, 27, 61), 'mock.Mock', 'Mock', (), '', False, 'from mock import patch, MagicMock, Mock\n'), ((69, 21, 69, 38), 'find_forks.find_forks.determine_names', 'determine_names', ({}, {}), '()', False, 'from find_forks.find_forks import add_forks, determine_names, find_forks, main\n'), ((73, 21, 73, 53), 'find_forks.find_forks.determine_names', 'determine_names', ({(73, 37, 73, 52): '"""test-origin-1"""'}, {}), "('test-origin-1')", False, 'from find_forks.find_forks import add_forks, determine_names, find_forks, main\n'), ((77, 21, 77, 53), 'find_forks.find_forks.determine_names', 'determine_names', ({(77, 37, 77, 52): '"""test-origin-2"""'}, {}), "('test-origin-2')", False, 'from find_forks.find_forks import add_forks, determine_names, find_forks, main\n'), ((81, 21, 81, 53), 'find_forks.find_forks.determine_names', 'determine_names', ({(81, 37, 81, 52): '"""test-origin-3"""'}, {}), "('test-origin-3')", False, 'from find_forks.find_forks import add_forks, determine_names, find_forks, main\n'), ((30, 38, 31, 124), 'mock.Mock', 'Mock', (), '', False, 'from mock import patch, MagicMock, Mock\n'), ((34, 33, 35, 146), 'mock.Mock', 'Mock', (), '', False, 'from mock import patch, MagicMock, Mock\n'), ((42, 13, 42, 94), 'mock.patch', 'patch', (), '', False, 'from mock import patch, MagicMock, Mock\n'), ((55, 26, 55, 64), 'find_forks.find_forks.add_forks', 'add_forks', ({(55, 36, 55, 63): '"""httttps://unavailable!url"""'}, {}), "('httttps://unavailable!url')", False, 'from find_forks.find_forks import add_forks, determine_names, find_forks, main\n'), ((86, 25, 86, 62), 'find_forks.find_forks.determine_names', 'determine_names', ({(86, 41, 86, 61): '"""name-with-an-error"""'}, {}), "('name-with-an-error')", False, 'from find_forks.find_forks import add_forks, determine_names, find_forks, main\n'), ((95, 13, 95, 79), 'mock.patch', 'patch', (), '', False, 'from mock import patch, MagicMock, Mock\n'), ((102, 13, 102, 73), 'mock.patch', 'patch', (), '', False, 'from mock import patch, MagicMock, Mock\n'), ((103, 12, 103, 18), 'find_forks.find_forks.main', 'main', ({}, {}), '()', False, 'from find_forks.find_forks import add_forks, determine_names, find_forks, main\n'), ((104, 24, 104, 37), 'find_forks.__init__.CONFIG.copy', 'CONFIG.copy', ({}, {}), '()', False, 'from find_forks.__init__ import CONFIG\n'), ((43, 17, 43, 83), 'mock.patch', 'patch', (), '', False, 'from mock import patch, MagicMock, Mock\n'), ((57, 18, 57, 62), 'os.path.join', 'path.join', ({(57, 28, 57, 36): 'BASEPATH', (57, 38, 57, 61): '"""fixture/response.json"""'}, {}), "(BASEPATH, 'fixture/response.json')", False, 'from os import path\n'), ((96, 17, 96, 76), 'mock.patch', 'patch', (), '', False, 'from mock import patch, MagicMock, Mock\n'), ((97, 16, 97, 39), 'find_forks.find_forks.find_forks', 'find_forks', ({}, {}), '(**sent_args)', False, 'from find_forks.find_forks import add_forks, determine_names, find_forks, main\n'), ((114, 17, 114, 51), 'mock.patch.dict', 'patch.dict', ({(114, 28, 114, 41): '"""sys.modules"""', (114, 43, 114, 50): 'modules'}, {}), "('sys.modules', modules)", False, 'from mock import patch, MagicMock, Mock\n'), ((44, 33, 44, 47), 'find_forks.find_forks.add_forks', 'add_forks', ({(44, 43, 44, 46): 'url'}, {}), '(url)', False, 'from find_forks.find_forks import add_forks, determine_names, find_forks, main\n'), ((50, 34, 50, 48), 'find_forks.find_forks.add_forks', 'add_forks', ({(50, 44, 50, 47): 'url'}, {}), '(url)', False, 'from find_forks.find_forks import add_forks, determine_names, find_forks, main\n')]
insequent/neutron
neutron/agent/l3/dvr_router.py
2b1c4f121e3e8ba1c5eb2ba6661bf6326e1507c5
# Copyright (c) 2015 Openstack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import binascii import netaddr from oslo_log import log as logging from oslo_utils import excutils from neutron.agent.l3 import dvr_fip_ns from neutron.agent.l3 import dvr_snat_ns from neutron.agent.l3 import router_info as router from neutron.agent.linux import ip_lib from neutron.common import constants as l3_constants from neutron.common import utils as common_utils from neutron.i18n import _LE LOG = logging.getLogger(__name__) # xor-folding mask used for IPv6 rule index MASK_30 = 0x3fffffff class DvrRouter(router.RouterInfo): def __init__(self, agent, host, *args, **kwargs): super(DvrRouter, self).__init__(*args, **kwargs) self.agent = agent self.host = host self.floating_ips_dict = {} self.snat_iptables_manager = None # Linklocal subnet for router and floating IP namespace link self.rtr_fip_subnet = None self.dist_fip_count = None self.snat_namespace = None def get_floating_ips(self): """Filter Floating IPs to be hosted on this agent.""" floating_ips = super(DvrRouter, self).get_floating_ips() return [i for i in floating_ips if i['host'] == self.host] def get_snat_interfaces(self): return self.router.get(l3_constants.SNAT_ROUTER_INTF_KEY, []) def get_snat_int_device_name(self, port_id): long_name = dvr_snat_ns.SNAT_INT_DEV_PREFIX + port_id return long_name[:self.driver.DEV_NAME_LEN] def _handle_fip_nat_rules(self, interface_name, action): """Configures NAT rules for Floating IPs for DVR. Remove all the rules. This is safe because if use_namespaces is set as False then the agent can only configure one router, otherwise each router's NAT rules will be in their own namespace. """ self.iptables_manager.ipv4['nat'].empty_chain('POSTROUTING') self.iptables_manager.ipv4['nat'].empty_chain('snat') # Add back the jump to float-snat self.iptables_manager.ipv4['nat'].add_rule('snat', '-j $float-snat') # And add them back if the action is add_rules if action == 'add_rules' and interface_name: rule = ('POSTROUTING', '! -i %(interface_name)s ' '! -o %(interface_name)s -m conntrack ! ' '--ctstate DNAT -j ACCEPT' % {'interface_name': interface_name}) self.iptables_manager.ipv4['nat'].add_rule(*rule) self.iptables_manager.apply() def floating_ip_added_dist(self, fip, fip_cidr): """Add floating IP to FIP namespace.""" floating_ip = fip['floating_ip_address'] fixed_ip = fip['fixed_ip_address'] rule_pr = self.fip_ns.allocate_rule_priority() self.floating_ips_dict[floating_ip] = rule_pr fip_2_rtr_name = self.fip_ns.get_int_device_name(self.router_id) ip_rule = ip_lib.IPRule(namespace=self.ns_name) ip_rule.rule.add(fixed_ip, dvr_fip_ns.FIP_RT_TBL, rule_pr) #Add routing rule in fip namespace fip_ns_name = self.fip_ns.get_name() rtr_2_fip, _ = self.rtr_fip_subnet.get_pair() device = ip_lib.IPDevice(fip_2_rtr_name, namespace=fip_ns_name) device.route.add_route(fip_cidr, str(rtr_2_fip.ip)) interface_name = ( self.fip_ns.get_ext_device_name( self.fip_ns.agent_gateway_port['id'])) ip_lib.send_garp_for_proxyarp(fip_ns_name, interface_name, floating_ip, self.agent_conf.send_arp_for_ha) # update internal structures self.dist_fip_count = self.dist_fip_count + 1 def floating_ip_removed_dist(self, fip_cidr): """Remove floating IP from FIP namespace.""" floating_ip = fip_cidr.split('/')[0] rtr_2_fip_name = self.fip_ns.get_rtr_ext_device_name(self.router_id) fip_2_rtr_name = self.fip_ns.get_int_device_name(self.router_id) if self.rtr_fip_subnet is None: self.rtr_fip_subnet = self.fip_ns.local_subnets.allocate( self.router_id) rtr_2_fip, fip_2_rtr = self.rtr_fip_subnet.get_pair() fip_ns_name = self.fip_ns.get_name() if floating_ip in self.floating_ips_dict: rule_pr = self.floating_ips_dict[floating_ip] ip_rule = ip_lib.IPRule(namespace=self.ns_name) ip_rule.rule.delete(floating_ip, dvr_fip_ns.FIP_RT_TBL, rule_pr) self.fip_ns.deallocate_rule_priority(rule_pr) #TODO(rajeev): Handle else case - exception/log? device = ip_lib.IPDevice(fip_2_rtr_name, namespace=fip_ns_name) device.route.delete_route(fip_cidr, str(rtr_2_fip.ip)) # check if this is the last FIP for this router self.dist_fip_count = self.dist_fip_count - 1 if self.dist_fip_count == 0: #remove default route entry device = ip_lib.IPDevice(rtr_2_fip_name, namespace=self.ns_name) ns_ip = ip_lib.IPWrapper(namespace=fip_ns_name) device.route.delete_gateway(str(fip_2_rtr.ip), table=dvr_fip_ns.FIP_RT_TBL) self.fip_ns.local_subnets.release(self.router_id) self.rtr_fip_subnet = None ns_ip.del_veth(fip_2_rtr_name) is_last = self.fip_ns.unsubscribe(self.router_id) if is_last: # TODO(Carl) I can't help but think that another router could # come in and want to start using this namespace while this is # destroying it. The two could end up conflicting on # creating/destroying interfaces and such. I think I'd like a # semaphore to sync creation/deletion of this namespace. self.fip_ns.delete() self.fip_ns = None def add_floating_ip(self, fip, interface_name, device): if not self._add_fip_addr_to_device(fip, device): return l3_constants.FLOATINGIP_STATUS_ERROR # Special Handling for DVR - update FIP namespace ip_cidr = common_utils.ip_to_cidr(fip['floating_ip_address']) self.floating_ip_added_dist(fip, ip_cidr) return l3_constants.FLOATINGIP_STATUS_ACTIVE def remove_floating_ip(self, device, ip_cidr): super(DvrRouter, self).remove_floating_ip(device, ip_cidr) self.floating_ip_removed_dist(ip_cidr) def create_snat_namespace(self): # TODO(mlavalle): in the near future, this method should contain the # code in the L3 agent that creates a gateway for a dvr. The first step # is to move the creation of the snat namespace here self.snat_namespace = dvr_snat_ns.SnatNamespace(self.router['id'], self.agent_conf, self.driver, self.use_ipv6) self.snat_namespace.create() return self.snat_namespace def delete_snat_namespace(self): # TODO(mlavalle): in the near future, this method should contain the # code in the L3 agent that removes an external gateway for a dvr. The # first step is to move the deletion of the snat namespace here self.snat_namespace.delete() self.snat_namespace = None def _get_internal_port(self, subnet_id): """Return internal router port based on subnet_id.""" router_ports = self.router.get(l3_constants.INTERFACE_KEY, []) for port in router_ports: fips = port['fixed_ips'] for f in fips: if f['subnet_id'] == subnet_id: return port def _update_arp_entry(self, ip, mac, subnet_id, operation): """Add or delete arp entry into router namespace for the subnet.""" port = self._get_internal_port(subnet_id) # update arp entry only if the subnet is attached to the router if not port: return try: # TODO(mrsmith): optimize the calls below for bulk calls interface_name = self.get_internal_device_name(port['id']) device = ip_lib.IPDevice(interface_name, namespace=self.ns_name) if operation == 'add': device.neigh.add(ip, mac) elif operation == 'delete': device.neigh.delete(ip, mac) except Exception: with excutils.save_and_reraise_exception(): LOG.exception(_LE("DVR: Failed updating arp entry")) def _set_subnet_arp_info(self, port): """Set ARP info retrieved from Plugin for existing ports.""" if 'id' not in port['subnet']: return subnet_id = port['subnet']['id'] # TODO(Carl) Can we eliminate the need to make this RPC while # processing a router. subnet_ports = self.agent.get_ports_by_subnet(subnet_id) for p in subnet_ports: if p['device_owner'] not in l3_constants.ROUTER_INTERFACE_OWNERS: for fixed_ip in p['fixed_ips']: self._update_arp_entry(fixed_ip['ip_address'], p['mac_address'], subnet_id, 'add') def _map_internal_interfaces(self, int_port, snat_ports): """Return the SNAT port for the given internal interface port.""" fixed_ip = int_port['fixed_ips'][0] subnet_id = fixed_ip['subnet_id'] match_port = [p for p in snat_ports if p['fixed_ips'][0]['subnet_id'] == subnet_id] if match_port: return match_port[0] else: LOG.error(_LE('DVR: no map match_port found!')) @staticmethod def _get_snat_idx(ip_cidr): """Generate index for DVR snat rules and route tables. The index value has to be 32 bits or less but more than the system generated entries i.e. 32768. For IPv4 use the numeric value of the cidr. For IPv6 generate a crc32 bit hash and xor-fold to 30 bits. Use the freed range to extend smaller values so that they become greater than system generated entries. """ net = netaddr.IPNetwork(ip_cidr) if net.version == 6: # the crc32 & 0xffffffff is for Python 2.6 and 3.0 compatibility snat_idx = binascii.crc32(ip_cidr) & 0xffffffff # xor-fold the hash to reserve upper range to extend smaller values snat_idx = (snat_idx >> 30) ^ (snat_idx & MASK_30) if snat_idx < 32768: snat_idx = snat_idx + MASK_30 else: snat_idx = net.value return snat_idx def _snat_redirect_add(self, gateway, sn_port, sn_int): """Adds rules and routes for SNAT redirection.""" try: ip_cidr = sn_port['ip_cidr'] snat_idx = self._get_snat_idx(ip_cidr) ns_ipr = ip_lib.IPRule(namespace=self.ns_name) ns_ipd = ip_lib.IPDevice(sn_int, namespace=self.ns_name) ns_ipwrapr = ip_lib.IPWrapper(namespace=self.ns_name) ns_ipd.route.add_gateway(gateway, table=snat_idx) ns_ipr.rule.add(ip_cidr, snat_idx, snat_idx) ns_ipwrapr.netns.execute(['sysctl', '-w', 'net.ipv4.conf.%s.' 'send_redirects=0' % sn_int]) except Exception: LOG.exception(_LE('DVR: error adding redirection logic')) def _snat_redirect_remove(self, gateway, sn_port, sn_int): """Removes rules and routes for SNAT redirection.""" try: ip_cidr = sn_port['ip_cidr'] snat_idx = self._get_snat_idx(ip_cidr) ns_ipr = ip_lib.IPRule(namespace=self.ns_name) ns_ipd = ip_lib.IPDevice(sn_int, namespace=self.ns_name) ns_ipd.route.delete_gateway(gateway, table=snat_idx) ns_ipr.rule.delete(ip_cidr, snat_idx, snat_idx) except Exception: LOG.exception(_LE('DVR: removed snat failed')) def get_gw_port_host(self): host = self.router.get('gw_port_host') if not host: LOG.debug("gw_port_host missing from router: %s", self.router['id']) return host def internal_network_added(self, port): super(DvrRouter, self).internal_network_added(port) ex_gw_port = self.get_ex_gw_port() if not ex_gw_port: return snat_ports = self.get_snat_interfaces() sn_port = self._map_internal_interfaces(port, snat_ports) if not sn_port: return interface_name = self.get_internal_device_name(port['id']) self._snat_redirect_add(sn_port['fixed_ips'][0]['ip_address'], port, interface_name) # TODO(Carl) This is a sign that dvr needs two router classes. is_this_snat_host = (self.agent_conf.agent_mode == 'dvr_snat' and self.get_gw_port_host() == self.host) if not is_this_snat_host: return ns_name = dvr_snat_ns.SnatNamespace.get_snat_ns_name(self.router['id']) self._set_subnet_info(sn_port) interface_name = self.get_snat_int_device_name(sn_port['id']) self._internal_network_added( ns_name, sn_port['network_id'], sn_port['id'], sn_port['ip_cidr'], sn_port['mac_address'], interface_name, dvr_snat_ns.SNAT_INT_DEV_PREFIX) self._set_subnet_arp_info(port) def _dvr_internal_network_removed(self, port): if not self.ex_gw_port: return sn_port = self._map_internal_interfaces(port, self.snat_ports) if not sn_port: return # DVR handling code for SNAT interface_name = self.get_internal_device_name(port['id']) self._snat_redirect_remove(sn_port['fixed_ips'][0]['ip_address'], port, interface_name) is_this_snat_host = (self.agent_conf.agent_mode == 'dvr_snat' and self.ex_gw_port['binding:host_id'] == self.host) if not is_this_snat_host: return snat_interface = ( self.get_snat_int_device_name(sn_port['id'])) ns_name = self.snat_namespace.name prefix = dvr_snat_ns.SNAT_INT_DEV_PREFIX if ip_lib.device_exists(snat_interface, namespace=ns_name): self.driver.unplug(snat_interface, namespace=ns_name, prefix=prefix) def internal_network_removed(self, port): self._dvr_internal_network_removed(port) super(DvrRouter, self).internal_network_removed(port) def get_floating_agent_gw_interface(self, ext_net_id): """Filter Floating Agent GW port for the external network.""" fip_ports = self.router.get(l3_constants.FLOATINGIP_AGENT_INTF_KEY, []) return next( (p for p in fip_ports if p['network_id'] == ext_net_id), None)
[((29, 6, 29, 33), 'oslo_log.log.getLogger', 'logging.getLogger', ({(29, 24, 29, 32): '__name__'}, {}), '(__name__)', True, 'from oslo_log import log as logging\n'), ((90, 18, 90, 55), 'neutron.agent.linux.ip_lib.IPRule', 'ip_lib.IPRule', (), '', False, 'from neutron.agent.linux import ip_lib\n'), ((95, 17, 95, 71), 'neutron.agent.linux.ip_lib.IPDevice', 'ip_lib.IPDevice', (), '', False, 'from neutron.agent.linux import ip_lib\n'), ((100, 8, 103, 70), 'neutron.agent.linux.ip_lib.send_garp_for_proxyarp', 'ip_lib.send_garp_for_proxyarp', ({(100, 38, 100, 49): 'fip_ns_name', (101, 38, 101, 52): 'interface_name', (102, 38, 102, 49): 'floating_ip', (103, 38, 103, 69): 'self.agent_conf.send_arp_for_ha'}, {}), '(fip_ns_name, interface_name, floating_ip,\n self.agent_conf.send_arp_for_ha)', False, 'from neutron.agent.linux import ip_lib\n'), ((125, 17, 125, 71), 'neutron.agent.linux.ip_lib.IPDevice', 'ip_lib.IPDevice', (), '', False, 'from neutron.agent.linux import ip_lib\n'), ((154, 18, 154, 69), 'neutron.common.utils.ip_to_cidr', 'common_utils.ip_to_cidr', ({(154, 42, 154, 68): "fip['floating_ip_address']"}, {}), "(fip['floating_ip_address'])", True, 'from neutron.common import utils as common_utils\n'), ((166, 30, 169, 70), 'neutron.agent.l3.dvr_snat_ns.SnatNamespace', 'dvr_snat_ns.SnatNamespace', ({(166, 56, 166, 73): "self.router['id']", (167, 56, 167, 71): 'self.agent_conf', (168, 56, 168, 67): 'self.driver', (169, 56, 169, 69): 'self.use_ipv6'}, {}), "(self.router['id'], self.agent_conf, self.driver,\n self.use_ipv6)", False, 'from neutron.agent.l3 import dvr_snat_ns\n'), ((248, 14, 248, 40), 'netaddr.IPNetwork', 'netaddr.IPNetwork', ({(248, 32, 248, 39): 'ip_cidr'}, {}), '(ip_cidr)', False, 'import netaddr\n'), ((317, 18, 317, 79), 'neutron.agent.l3.dvr_snat_ns.SnatNamespace.get_snat_ns_name', 'dvr_snat_ns.SnatNamespace.get_snat_ns_name', ({(317, 61, 317, 78): "self.router['id']"}, {}), "(self.router['id'])", False, 'from neutron.agent.l3 import dvr_snat_ns\n'), ((354, 11, 354, 66), 'neutron.agent.linux.ip_lib.device_exists', 'ip_lib.device_exists', (), '', False, 'from neutron.agent.linux import ip_lib\n'), ((120, 22, 120, 59), 'neutron.agent.linux.ip_lib.IPRule', 'ip_lib.IPRule', (), '', False, 'from neutron.agent.linux import ip_lib\n'), ((132, 21, 132, 76), 'neutron.agent.linux.ip_lib.IPDevice', 'ip_lib.IPDevice', (), '', False, 'from neutron.agent.linux import ip_lib\n'), ((133, 20, 133, 59), 'neutron.agent.linux.ip_lib.IPWrapper', 'ip_lib.IPWrapper', (), '', False, 'from neutron.agent.linux import ip_lib\n'), ((199, 21, 199, 76), 'neutron.agent.linux.ip_lib.IPDevice', 'ip_lib.IPDevice', (), '', False, 'from neutron.agent.linux import ip_lib\n'), ((265, 21, 265, 58), 'neutron.agent.linux.ip_lib.IPRule', 'ip_lib.IPRule', (), '', False, 'from neutron.agent.linux import ip_lib\n'), ((266, 21, 266, 68), 'neutron.agent.linux.ip_lib.IPDevice', 'ip_lib.IPDevice', (), '', False, 'from neutron.agent.linux import ip_lib\n'), ((267, 25, 267, 65), 'neutron.agent.linux.ip_lib.IPWrapper', 'ip_lib.IPWrapper', (), '', False, 'from neutron.agent.linux import ip_lib\n'), ((280, 21, 280, 58), 'neutron.agent.linux.ip_lib.IPRule', 'ip_lib.IPRule', (), '', False, 'from neutron.agent.linux import ip_lib\n'), ((281, 21, 281, 68), 'neutron.agent.linux.ip_lib.IPDevice', 'ip_lib.IPDevice', (), '', False, 'from neutron.agent.linux import ip_lib\n'), ((236, 22, 236, 58), 'neutron.i18n._LE', '_LE', ({(236, 26, 236, 57): '"""DVR: no map match_port found!"""'}, {}), "('DVR: no map match_port found!')", False, 'from neutron.i18n import _LE\n'), ((251, 23, 251, 46), 'binascii.crc32', 'binascii.crc32', ({(251, 38, 251, 45): 'ip_cidr'}, {}), '(ip_cidr)', False, 'import binascii\n'), ((205, 17, 205, 54), 'oslo_utils.excutils.save_and_reraise_exception', 'excutils.save_and_reraise_exception', ({}, {}), '()', False, 'from oslo_utils import excutils\n'), ((273, 26, 273, 68), 'neutron.i18n._LE', '_LE', ({(273, 30, 273, 67): '"""DVR: error adding redirection logic"""'}, {}), "('DVR: error adding redirection logic')", False, 'from neutron.i18n import _LE\n'), ((285, 26, 285, 57), 'neutron.i18n._LE', '_LE', ({(285, 30, 285, 56): '"""DVR: removed snat failed"""'}, {}), "('DVR: removed snat failed')", False, 'from neutron.i18n import _LE\n'), ((206, 30, 206, 67), 'neutron.i18n._LE', '_LE', ({(206, 34, 206, 66): '"""DVR: Failed updating arp entry"""'}, {}), "('DVR: Failed updating arp entry')", False, 'from neutron.i18n import _LE\n')]
PCManticore/sider
sider/warnings.py
cd11b38b2a1bf1ea3600eb287abfe3c2b40c67c1
""":mod:`sider.warnings` --- Warning categories ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This module defines several custom warning category classes. """ class SiderWarning(Warning): """All warning classes used by Sider extend this base class.""" class PerformanceWarning(SiderWarning, RuntimeWarning): """The category for warnings about performance worries. Operations that warn this category would work but be inefficient. """ class TransactionWarning(SiderWarning, RuntimeWarning): """The category for warnings about transactions."""
[]
Bucolo/Kadal
kadal/query.py
a0085f15df4f8ebbf5ec4cd4344e207773c6b498
MEDIA_SEARCH = """ query ($search: String, $type: MediaType, $exclude: MediaFormat, $isAdult: Boolean) { Media(search: $search, type: $type, format_not: $exclude, isAdult: $isAdult) { id type format title { english romaji native } synonyms status description startDate { year month day } endDate { year month day } episodes chapters volumes coverImage { large color } bannerImage genres averageScore siteUrl isAdult nextAiringEpisode { timeUntilAiring episode } } } """ MEDIA_BY_ID = """ query ($id: Int, $type: MediaType) { Media(id: $id, type: $type) { id type format title { english romaji native } synonyms status description startDate { year month day } endDate { year month day } episodes chapters coverImage { large color } bannerImage genres averageScore siteUrl isAdult nextAiringEpisode { timeUntilAiring episode } } } """ MEDIA_PAGED = """ query ( $id: Int, $page: Int, $perPage: Int, $search: String, $type: MediaType, $sort: [MediaSort] = [SEARCH_MATCH], $exclude: MediaFormat, $isAdult: Boolean ) { Page(page: $page, perPage: $perPage) { media(id: $id, search: $search, type: $type, sort: $sort, format_not: $exclude, isAdult: $isAdult) { id type format title { english romaji native } synonyms status description startDate { year month day } endDate { year month day } episodes chapters volumes coverImage { large color } bannerImage genres averageScore siteUrl isAdult popularity } } } """ USER_SEARCH = """ query ($search: String) { User(search: $search) { id name html_about: about(asHtml: true) about avatar { large } bannerImage siteUrl stats { watchedTime chaptersRead } } } """ USER_BY_ID = """ query ($id: Int) { User(id: $id) { id name html_about: about(asHtml: true) about avatar { large } bannerImage siteUrl stats { watchedTime chaptersRead } } } """
[]
turkeydonkey/nzmath3
sandbox/test/testChainop.py
a48ae9efcf0d9ad1485c2e9863c948a7f1b20311
import unittest import operator import sandbox.chainop as chainop class BasicChainTest (unittest.TestCase): def testBasicChain(self): double = lambda x: x * 2 self.assertEqual(62, chainop.basic_chain((operator.add, double), 2, 31)) square = lambda x: x ** 2 self.assertEqual(2**31, chainop.basic_chain((operator.mul, square), 2, 31)) class MultiChainTest (unittest.TestCase): def testMultiChain(self): double = lambda x: x * 2 self.assertEqual([62, 93], chainop.multi_chains((operator.add, double), (2, 3), 31)) square = lambda x: x ** 2 self.assertEqual([2**31, 3**31], chainop.multi_chains((operator.mul, square), [2, 3], 31)) def suite(suffix="Test"): suite = unittest.TestSuite() all_names = globals() for name in all_names: if name.endswith(suffix): suite.addTest(unittest.makeSuite(all_names[name], "test")) return suite if __name__ == '__main__': runner = unittest.TextTestRunner() runner.run(suite())
[((23, 12, 23, 32), 'unittest.TestSuite', 'unittest.TestSuite', ({}, {}), '()', False, 'import unittest\n'), ((31, 13, 31, 38), 'unittest.TextTestRunner', 'unittest.TextTestRunner', ({}, {}), '()', False, 'import unittest\n'), ((9, 29, 9, 79), 'sandbox.chainop.basic_chain', 'chainop.basic_chain', ({(9, 49, 9, 71): '(operator.add, double)', (9, 73, 9, 74): '(2)', (9, 76, 9, 78): '(31)'}, {}), '((operator.add, double), 2, 31)', True, 'import sandbox.chainop as chainop\n'), ((11, 32, 11, 82), 'sandbox.chainop.basic_chain', 'chainop.basic_chain', ({(11, 52, 11, 74): '(operator.mul, square)', (11, 76, 11, 77): '(2)', (11, 79, 11, 81): '(31)'}, {}), '((operator.mul, square), 2, 31)', True, 'import sandbox.chainop as chainop\n'), ((17, 35, 17, 91), 'sandbox.chainop.multi_chains', 'chainop.multi_chains', ({(17, 56, 17, 78): '(operator.add, double)', (17, 80, 17, 86): '(2, 3)', (17, 88, 17, 90): '(31)'}, {}), '((operator.add, double), (2, 3), 31)', True, 'import sandbox.chainop as chainop\n'), ((19, 41, 19, 97), 'sandbox.chainop.multi_chains', 'chainop.multi_chains', ({(19, 62, 19, 84): '(operator.mul, square)', (19, 86, 19, 92): '[2, 3]', (19, 94, 19, 96): '(31)'}, {}), '((operator.mul, square), [2, 3], 31)', True, 'import sandbox.chainop as chainop\n'), ((27, 26, 27, 69), 'unittest.makeSuite', 'unittest.makeSuite', ({(27, 45, 27, 60): 'all_names[name]', (27, 62, 27, 68): '"""test"""'}, {}), "(all_names[name], 'test')", False, 'import unittest\n')]
mrmotallebi/berkeley-deeprl-bootcamp
labs_final/lab5/experiments/run_trpo_pendulum.py
9257c693724c38edfa4571e3510667ca168b7ca1
#!/usr/bin/env python import chainer from algs import trpo from env_makers import EnvMaker from models import GaussianMLPPolicy, MLPBaseline from utils import SnapshotSaver import numpy as np import os import logger log_dir = "data/local/trpo-pendulum" np.random.seed(42) # Clean up existing logs os.system("rm -rf {}".format(log_dir)) with logger.session(log_dir): env_maker = EnvMaker('Pendulum-v0') env = env_maker.make() policy = GaussianMLPPolicy( observation_space=env.observation_space, action_space=env.action_space, env_spec=env.spec, hidden_sizes=(64, 64), hidden_nonlinearity=chainer.functions.tanh, ) baseline = MLPBaseline( observation_space=env.observation_space, action_space=env.action_space, env_spec=env.spec, hidden_sizes=(64, 64), hidden_nonlinearity=chainer.functions.tanh, ) trpo( env=env, env_maker=env_maker, n_envs=16, policy=policy, baseline=baseline, batch_size=10000, n_iters=100, snapshot_saver=SnapshotSaver(log_dir), )
[((14, 0, 14, 18), 'numpy.random.seed', 'np.random.seed', ({(14, 15, 14, 17): '(42)'}, {}), '(42)', True, 'import numpy as np\n'), ((19, 5, 19, 28), 'logger.session', 'logger.session', ({(19, 20, 19, 27): 'log_dir'}, {}), '(log_dir)', False, 'import logger\n'), ((20, 16, 20, 39), 'env_makers.EnvMaker', 'EnvMaker', ({(20, 25, 20, 38): '"""Pendulum-v0"""'}, {}), "('Pendulum-v0')", False, 'from env_makers import EnvMaker\n'), ((22, 13, 28, 5), 'models.GaussianMLPPolicy', 'GaussianMLPPolicy', (), '', False, 'from models import GaussianMLPPolicy, MLPBaseline\n'), ((29, 15, 35, 5), 'models.MLPBaseline', 'MLPBaseline', (), '', False, 'from models import GaussianMLPPolicy, MLPBaseline\n'), ((44, 23, 44, 45), 'utils.SnapshotSaver', 'SnapshotSaver', ({(44, 37, 44, 44): 'log_dir'}, {}), '(log_dir)', False, 'from utils import SnapshotSaver\n')]
yy1244/Jtyoui
jtyoui/regular/regexengine.py
d3c212ed9d6ffa6b37a8ca49098ab59c89216f09
#!/usr/bin/python3.7 # -*- coding: utf-8 -*- # @Time : 2019/12/2 10:17 # @Author: [email protected] """ 正则解析器 """ try: import xml.etree.cElementTree as et except ModuleNotFoundError: import xml.etree.ElementTree as et import re class RegexEngine: def __init__(self, xml, str_): """加载正则表。正则表为xml :param xml: 正则表的位置 :param str_: 要匹配的字符串 """ self._string = str_ self._root = et.parse(xml).getroot() self.re = '' self.data = [] def select(self, tag): """根据xml的tag来实现不同的正则提取 :param tag: xml的tag标签 :return: 正则提取的数据 """ root = self._root.find(tag) attrib = root.attrib if attrib.get('part', 'False').lower() == 'true': self._part_tag(root) return list(filter(lambda x: x[1], self.data)) else: sf = self._no_part(root) self.re = ''.join(self.data) + sf return re.findall(self.re, self._string) def _no_part(self, tags): """tag标签不分开抽取""" for tag in tags: if tag: if tag.attrib.get('must', 'true').lower() == 'true': self.data.append(self.re) self.re = '' self.re = '(?:' + self._no_part(tag) + ')' else: self.re = self._no_part(tag) else: attrib = tag.attrib text = tag.text.strip() if attrib.get('must', 'true').lower() == 'true': self.re = '(?:' + text + ')' else: self.re += '(?:' + text + ')?' return self.re def _part_tag(self, tags): """tag标签分开提取""" for tag in tags: if tag: self._part_tag(tag) else: self.data.append((tag.tag, re.findall(tag.text.strip(), self._string))) @property def string(self): return self._string @string.setter def string(self, str_): self._string = str_ self.re, self.data = '', []
[((40, 19, 40, 52), 're.findall', 're.findall', ({(40, 30, 40, 37): 'self.re', (40, 39, 40, 51): 'self._string'}, {}), '(self.re, self._string)', False, 'import re\n'), ((23, 21, 23, 34), 'xml.etree.ElementTree.parse', 'et.parse', ({(23, 30, 23, 33): 'xml'}, {}), '(xml)', True, 'import xml.etree.ElementTree as et\n')]
rflperry/ProgLearn
proglearn/transformers.py
9f799b4a8cf2157ba40b04842dc88eaf646e6420
""" Main Author: Will LeVine Corresponding Email: [email protected] """ import keras import numpy as np from sklearn.tree import DecisionTreeClassifier from sklearn.utils.validation import check_array, check_is_fitted, check_X_y from .base import BaseTransformer class NeuralClassificationTransformer(BaseTransformer): """ A class used to transform data from a category to a specialized representation. Parameters ---------- network : object A neural network used in the classification transformer. euclidean_layer_idx : int An integer to represent the final layer of the transformer. optimizer : str or keras.optimizers instance An optimizer used when compiling the neural network. loss : str, default="categorical_crossentropy" A loss function used when compiling the neural network. pretrained : bool, default=False A boolean used to identify if the network is pretrained. compile_kwargs : dict, default={"metrics": ["acc"]} A dictionary containing metrics for judging network performance. fit_kwargs : dict, default={ "epochs": 100, "callbacks": [keras.callbacks.EarlyStopping(patience=5, monitor="val_acc")], "verbose": False, "validation_split": 0.33, }, A dictionary to hold epochs, callbacks, verbose, and validation split for the network. Attributes ---------- encoder_ : object A Keras model with inputs and outputs based on the network attribute. Output layers are determined by the euclidean_layer_idx parameter. """ def __init__( self, network, euclidean_layer_idx, optimizer, loss="categorical_crossentropy", pretrained=False, compile_kwargs={"metrics": ["acc"]}, fit_kwargs={ "epochs": 100, "callbacks": [keras.callbacks.EarlyStopping(patience=5, monitor="val_acc")], "verbose": False, "validation_split": 0.33, }, ): self.network = keras.models.clone_model(network) self.encoder_ = keras.models.Model( inputs=self.network.inputs, outputs=self.network.layers[euclidean_layer_idx].output, ) self.pretrained = pretrained self.optimizer = optimizer self.loss = loss self.compile_kwargs = compile_kwargs self.fit_kwargs = fit_kwargs def fit(self, X, y): """ Fits the transformer to data X with labels y. Parameters ---------- X : ndarray Input data matrix. y : ndarray Output (i.e. response data matrix). Returns ------- self : NeuralClassificationTransformer The object itself. """ check_X_y(X, y) _, y = np.unique(y, return_inverse=True) # more typechecking self.network.compile( loss=self.loss, optimizer=self.optimizer, **self.compile_kwargs ) self.network.fit(X, keras.utils.to_categorical(y), **self.fit_kwargs) return self def transform(self, X): """ Performs inference using the transformer. Parameters ---------- X : ndarray Input data matrix. Returns ------- X_transformed : ndarray The transformed input. Raises ------ NotFittedError When the model is not fitted. """ check_is_fitted(self) check_array(X) return self.encoder_.predict(X) class TreeClassificationTransformer(BaseTransformer): """ A class used to transform data from a category to a specialized representation. Parameters ---------- kwargs : dict, default={} A dictionary to contain parameters of the tree. Attributes ---------- transformer : sklearn.tree.DecisionTreeClassifier an internal sklearn DecisionTreeClassifier """ def __init__(self, kwargs={}): self.kwargs = kwargs def fit(self, X, y): """ Fits the transformer to data X with labels y. Parameters ---------- X : ndarray Input data matrix. y : ndarray Output (i.e. response data matrix). Returns ------- self : TreeClassificationTransformer The object itself. """ X, y = check_X_y(X, y) self.transformer_ = DecisionTreeClassifier(**self.kwargs).fit(X, y) return self def transform(self, X): """ Performs inference using the transformer. Parameters ---------- X : ndarray Input data matrix. Returns ------- X_transformed : ndarray The transformed input. Raises ------ NotFittedError When the model is not fitted. """ check_is_fitted(self) X = check_array(X) return self.transformer_.apply(X)
[((67, 23, 67, 56), 'keras.models.clone_model', 'keras.models.clone_model', ({(67, 48, 67, 55): 'network'}, {}), '(network)', False, 'import keras\n'), ((68, 24, 71, 9), 'keras.models.Model', 'keras.models.Model', (), '', False, 'import keras\n'), ((94, 8, 94, 23), 'sklearn.utils.validation.check_X_y', 'check_X_y', ({(94, 18, 94, 19): 'X', (94, 21, 94, 22): 'y'}, {}), '(X, y)', False, 'from sklearn.utils.validation import check_array, check_is_fitted, check_X_y\n'), ((95, 15, 95, 48), 'numpy.unique', 'np.unique', (), '', True, 'import numpy as np\n'), ((125, 8, 125, 29), 'sklearn.utils.validation.check_is_fitted', 'check_is_fitted', ({(125, 24, 125, 28): 'self'}, {}), '(self)', False, 'from sklearn.utils.validation import check_array, check_is_fitted, check_X_y\n'), ((126, 8, 126, 22), 'sklearn.utils.validation.check_array', 'check_array', ({(126, 20, 126, 21): 'X'}, {}), '(X)', False, 'from sklearn.utils.validation import check_array, check_is_fitted, check_X_y\n'), ((164, 15, 164, 30), 'sklearn.utils.validation.check_X_y', 'check_X_y', ({(164, 25, 164, 26): 'X', (164, 28, 164, 29): 'y'}, {}), '(X, y)', False, 'from sklearn.utils.validation import check_array, check_is_fitted, check_X_y\n'), ((187, 8, 187, 29), 'sklearn.utils.validation.check_is_fitted', 'check_is_fitted', ({(187, 24, 187, 28): 'self'}, {}), '(self)', False, 'from sklearn.utils.validation import check_array, check_is_fitted, check_X_y\n'), ((188, 12, 188, 26), 'sklearn.utils.validation.check_array', 'check_array', ({(188, 24, 188, 25): 'X'}, {}), '(X)', False, 'from sklearn.utils.validation import check_array, check_is_fitted, check_X_y\n'), ((102, 28, 102, 57), 'keras.utils.to_categorical', 'keras.utils.to_categorical', ({(102, 55, 102, 56): 'y'}, {}), '(y)', False, 'import keras\n'), ((62, 26, 62, 86), 'keras.callbacks.EarlyStopping', 'keras.callbacks.EarlyStopping', (), '', False, 'import keras\n'), ((165, 28, 165, 65), 'sklearn.tree.DecisionTreeClassifier', 'DecisionTreeClassifier', ({}, {}), '(**self.kwargs)', False, 'from sklearn.tree import DecisionTreeClassifier\n')]
marx-alex/Morphelia
morphelia/external/saphire.py
809278b07f1a535789455d54df3cbddc850d609c
import numpy as np import matplotlib as mpl import matplotlib.pyplot as plt import matplotlib.collections as mcoll from matplotlib.ticker import MaxNLocator plt.style.use('seaborn-darkgrid') class BaseTraj: def __init__(self, model, X): self.model = model assert len(X.shape) == 2, f"X should be 2-d, instead got shape {X.shape}" self.X = X self.means = self.model.means_.copy() self.states = self.model.predict(X) self.n_states = len(np.unique(self.states)) self.trans = self.model.transmat_.copy() def rho_dt_bins(self, rho, theta, dt, bins=12): """ Bin rho values and dwell time on polar coordinates. :param rho: :param theta: :param dt: :param bins: :return: """ bins = np.linspace(-np.pi, np.pi, bins+1) bin_means = (bins[:-1] + bins[1:]) / 2 bin_ix = np.digitize(theta, bins) bin_rd = [rho[(bin_ix == i) & (rho > 0)].mean() if len(rho[(bin_ix == i) & (rho > 0)]) > 0 else 0 for i in range(1, len(bins))] bin_dt = [dt[(bin_ix == i) & (dt > 0)].sum() if len(dt[(bin_ix == i) & (dt > 0)]) > 0 else 0 for i in range(1, len(bins))] return bin_means, bin_rd, bin_dt def transition_vectors(self): """ Transition vectors between states on polar coordinates. :return: """ mu_x, mu_y = self.means[:, 0], self.means[:, 1] mu_x_dist = mu_x - mu_x[:, np.newaxis] mu_y_dist = mu_y - mu_y[:, np.newaxis] dist_vect = np.column_stack((mu_x_dist.flatten(), mu_y_dist.flatten())) trans_rho, trans_theta = self.cart2pol(dist_vect) trans_rho = (trans_rho.reshape((self.n_states, self.n_states)) * self.design_transition()).flatten() return trans_rho, trans_theta def design_transition(self, thresh=0.1): design_trans = self.trans diag_ix = np.diag_indices(len(design_trans)) design_trans[diag_ix] = 0 design_trans[design_trans < thresh] = 0 design_trans[design_trans >= thresh] = 1 return design_trans def norm_trans_time(self): """ Normalized transition time. :return: """ unique, counts = np.unique(self.states, return_counts=True) sort_ix = unique.argsort() counts = counts[sort_ix] # normalize by transition probability dt = (counts * self.design_transition()).flatten() return dt / dt.sum() def norm_state_time(self): """ Normalized state time. :return: """ unique, counts = np.unique(self.states, return_counts=True) sort_ix = unique.argsort() counts = counts[sort_ix] return counts / counts.sum() @staticmethod def cart2pol(arr): """ Cartesion space to polar space. Args: arr (numpy.array): Array of shape [n_state x dims] """ x, y = arr[:, 0], arr[:, 1] rho = np.sqrt(x ** 2 + y ** 2) theta = np.arctan2(y, x) return rho, theta class PhenoSign(BaseTraj): """Phenotypic Signature class.""" def __init__(self, model, X): super(PhenoSign, self).__init__(model, X) self.bin_means, self.signature = self.get_signature() def get_signature(self): """ Calculate phenotypic signature for a given model. :return: bin_means, array of shape [4 x n_bins] with 1. state radial distances 2. state dwell times 3. transition distances 3. transition dwell times """ # states mu_rho, mu_theta = self.cart2pol(self.means) state_dt = self.norm_state_time() bin_means_1, state_rd_bins, state_dt_bins = self.rho_dt_bins(mu_rho, mu_theta, state_dt) # transitions trans_rho, trans_theta = self.transition_vectors() trans_dt = self.norm_trans_time() bin_means_2, trans_rd_bins, trans_dt_bins = self.rho_dt_bins(trans_rho, trans_theta, trans_dt) assert (bin_means_1 == bin_means_2).all(), "state and transition vectors are binned differently and can" \ "not be concatenated." return bin_means_1, np.vstack((state_rd_bins, state_dt_bins, trans_rd_bins, trans_dt_bins)) class Saphire(PhenoSign): """Implementation of the SAPHIRE algorithm for plotting Hidden Markov Models. Gordonov S, Hwang MK, Wells A, Gertler FB, Lauffenburger DA, Bathe M. Time series modeling of live-cell shape dynamics for image-based phenotypic profiling. Integr Biol (Camb). 2016;8(1):73-90. """ def __init__(self, model, X): super(Saphire, self).__init__(model, X) def plot_traj(self, projection='cartesian', ymax=None): """ Plot cell trajectory. Args: projection (str): cartesian or polar. ymax (int) """ avail_proj = ['cartesian', 'polar'] projection = projection.lower() assert projection in avail_proj, f"projection unknown: {projection}" if projection == 'cartesian': projection = None cmap = plt.get_cmap('binary') cmap = truncate_colormap(cmap, minval=0.2) if projection == 'polar': y, x = self.cart2pol(self.X) y_mu, x_mu = self.cart2pol(self.means) else: x, y = self.X[:, 0], self.X[:, 1] x_mu, y_mu = self.means[:, 0], self.means[:, 1] fig, ax = plt.subplots(figsize=(5, 5), subplot_kw={'projection': projection}) ax.scatter(x, y, c=self.states, cmap='Set1', zorder=2) traj = ax.scatter(x_mu, y_mu, c=np.unique(self.states), cmap='Set1', s=200, zorder=2, edgecolor='black', alpha=0.6) legend = ax.legend(*traj.legend_elements(), loc="upper right", bbox_to_anchor=(1.2, 0.94), title="States") ax.add_artist(legend) if ymax is not None: ax.set_ylim(0, ymax) ax.yaxis.set_major_locator(MaxNLocator(integer=True)) colorline(x, y, cmap=cmap, zorder=1) norm = mpl.colors.Normalize(vmin=0, vmax=48) cax = fig.add_axes([0.94, 0.15, 0.05, 0.3]) fig.colorbar(mpl.cm.ScalarMappable(norm=norm, cmap=cmap), cax=cax, orientation='vertical', label='Time') plt.show() return fig, ax def plot_states(self, ymax=None): """ Plot cell states. """ bin_rd, bin_dt = self.signature[0, :], self.signature[1, :] fig, ax = plt.subplots(figsize=(5, 5), subplot_kw={'projection': 'polar'}) cmap = plt.get_cmap("Oranges") N = 12 width = (2 * np.pi) / N ax.bar(self.bin_means, bin_rd, width=width, color=cmap(bin_dt)) if ymax is not None: ax.set_ylim(0, ymax) ax.yaxis.set_major_locator(MaxNLocator(integer=True)) norm = mpl.colors.Normalize(vmin=0, vmax=1) cax = fig.add_axes([0.94, 0.15, 0.05, 0.3]) fig.colorbar(mpl.cm.ScalarMappable(norm=norm, cmap=cmap), cax=cax, orientation='vertical', label='Increasing state dwell time', ticks=[0, 0.5, 1]) return fig, ax def plot_transition(self, ymax=None): """ Plot transition between cell states. """ bin_rd, bin_dt = self.signature[2, :], self.signature[3, :] fig, ax = plt.subplots(figsize=(5, 5), subplot_kw={'projection': 'polar'}) cmap = plt.get_cmap("Blues") N = 12 width = (2 * np.pi) / N ax.bar(self.bin_means, bin_rd, width=width, color=cmap(bin_dt)) if ymax is not None: ax.set_ylim(0, ymax) ax.yaxis.set_major_locator(MaxNLocator(integer=True)) norm = mpl.colors.Normalize(vmin=0, vmax=1) cax = fig.add_axes([0.94, 0.15, 0.05, 0.3]) fig.colorbar(mpl.cm.ScalarMappable(norm=norm, cmap=cmap), cax=cax, orientation='vertical', label='Increasing transition dwell time', ticks=[0, 0.5, 1]) return fig, ax def colorline(x, y, z=None, cmap=plt.get_cmap('copper'), norm=plt.Normalize(0.0, 1.0), linewidth=3, alpha=1.0, zorder=1): """ Plot a colored line with coordinates x and y Optionally specify colors in the array z Optionally specify a colormap, a norm function and a line width """ # Default colors equally spaced on [0,1]: if z is None: z = np.linspace(0.0, 1.0, len(x)) # Special case if a single number: if not hasattr(z, "__iter__"): # to check for numerical input -- this is a hack z = np.array([z]) z = np.asarray(z) segments = make_segments(x, y) lc = mcoll.LineCollection(segments, array=z, cmap=cmap, norm=norm, linewidth=linewidth, alpha=alpha, zorder=zorder) ax = plt.gca() ax.add_collection(lc) return lc def make_segments(x, y): """ Create list of line segments from x and y coordinates, in the correct format for LineCollection: an array of the form numlines x (points per line) x 2 (x and y) array """ points = np.array([x, y]).T.reshape(-1, 1, 2) segments = np.concatenate([points[:-1], points[1:]], axis=1) return segments def truncate_colormap(cmap, minval=0.0, maxval=1.0, n=100): ''' https://stackoverflow.com/a/18926541 ''' if isinstance(cmap, str): cmap = plt.get_cmap(cmap) new_cmap = mpl.colors.LinearSegmentedColormap.from_list( 'trunc({n},{a:.2f},{b:.2f})'.format(n=cmap.name, a=minval, b=maxval), cmap(np.linspace(minval, maxval, n))) return new_cmap
[((7, 0, 7, 33), 'matplotlib.pyplot.style.use', 'plt.style.use', ({(7, 14, 7, 32): '"""seaborn-darkgrid"""'}, {}), "('seaborn-darkgrid')", True, 'import matplotlib.pyplot as plt\n'), ((239, 33, 239, 55), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', ({(239, 46, 239, 54): '"""copper"""'}, {}), "('copper')", True, 'import matplotlib.pyplot as plt\n'), ((239, 62, 239, 85), 'matplotlib.pyplot.Normalize', 'plt.Normalize', ({(239, 76, 239, 79): '(0.0)', (239, 81, 239, 84): '(1.0)'}, {}), '(0.0, 1.0)', True, 'import matplotlib.pyplot as plt\n'), ((255, 8, 255, 21), 'numpy.asarray', 'np.asarray', ({(255, 19, 255, 20): 'z'}, {}), '(z)', True, 'import numpy as np\n'), ((258, 9, 259, 78), 'matplotlib.collections.LineCollection', 'mcoll.LineCollection', (), '', True, 'import matplotlib.collections as mcoll\n'), ((261, 9, 261, 18), 'matplotlib.pyplot.gca', 'plt.gca', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n'), ((275, 15, 275, 64), 'numpy.concatenate', 'np.concatenate', (), '', True, 'import numpy as np\n'), ((30, 15, 30, 49), 'numpy.linspace', 'np.linspace', ({(30, 27, 30, 33): '-np.pi', (30, 35, 30, 40): 'np.pi', (30, 42, 30, 48): 'bins + 1'}, {}), '(-np.pi, np.pi, bins + 1)', True, 'import numpy as np\n'), ((32, 17, 32, 41), 'numpy.digitize', 'np.digitize', ({(32, 29, 32, 34): 'theta', (32, 36, 32, 40): 'bins'}, {}), '(theta, bins)', True, 'import numpy as np\n'), ((70, 25, 70, 67), 'numpy.unique', 'np.unique', (), '', True, 'import numpy as np\n'), ((84, 25, 84, 67), 'numpy.unique', 'np.unique', (), '', True, 'import numpy as np\n'), ((98, 14, 98, 38), 'numpy.sqrt', 'np.sqrt', ({(98, 22, 98, 37): 'x ** 2 + y ** 2'}, {}), '(x ** 2 + y ** 2)', True, 'import numpy as np\n'), ((99, 16, 99, 32), 'numpy.arctan2', 'np.arctan2', ({(99, 27, 99, 28): 'y', (99, 30, 99, 31): 'x'}, {}), '(y, x)', True, 'import numpy as np\n'), ((160, 15, 160, 37), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', ({(160, 28, 160, 36): '"""binary"""'}, {}), "('binary')", True, 'import matplotlib.pyplot as plt\n'), ((170, 18, 170, 85), 'matplotlib.pyplot.subplots', 'plt.subplots', (), '', True, 'import matplotlib.pyplot as plt\n'), ((184, 15, 184, 52), 'matplotlib.colors.Normalize', 'mpl.colors.Normalize', (), '', True, 'import matplotlib as mpl\n'), ((188, 8, 188, 18), 'matplotlib.pyplot.show', 'plt.show', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n'), ((199, 18, 199, 82), 'matplotlib.pyplot.subplots', 'plt.subplots', (), '', True, 'import matplotlib.pyplot as plt\n'), ((200, 15, 200, 38), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', ({(200, 28, 200, 37): '"""Oranges"""'}, {}), "('Oranges')", True, 'import matplotlib.pyplot as plt\n'), ((207, 15, 207, 51), 'matplotlib.colors.Normalize', 'mpl.colors.Normalize', (), '', True, 'import matplotlib as mpl\n'), ((222, 18, 222, 82), 'matplotlib.pyplot.subplots', 'plt.subplots', (), '', True, 'import matplotlib.pyplot as plt\n'), ((223, 15, 223, 36), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', ({(223, 28, 223, 35): '"""Blues"""'}, {}), "('Blues')", True, 'import matplotlib.pyplot as plt\n'), ((230, 15, 230, 51), 'matplotlib.colors.Normalize', 'mpl.colors.Normalize', (), '', True, 'import matplotlib as mpl\n'), ((253, 12, 253, 25), 'numpy.array', 'np.array', ({(253, 21, 253, 24): '[z]'}, {}), '([z])', True, 'import numpy as np\n'), ((284, 15, 284, 33), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', ({(284, 28, 284, 32): 'cmap'}, {}), '(cmap)', True, 'import matplotlib.pyplot as plt\n'), ((17, 28, 17, 50), 'numpy.unique', 'np.unique', ({(17, 38, 17, 49): 'self.states'}, {}), '(self.states)', True, 'import numpy as np\n'), ((133, 28, 133, 99), 'numpy.vstack', 'np.vstack', ({(133, 38, 133, 98): '(state_rd_bins, state_dt_bins, trans_rd_bins, trans_dt_bins)'}, {}), '((state_rd_bins, state_dt_bins, trans_rd_bins, trans_dt_bins))', True, 'import numpy as np\n'), ((182, 35, 182, 60), 'matplotlib.ticker.MaxNLocator', 'MaxNLocator', (), '', False, 'from matplotlib.ticker import MaxNLocator\n'), ((186, 21, 186, 64), 'matplotlib.cm.ScalarMappable', 'mpl.cm.ScalarMappable', (), '', True, 'import matplotlib as mpl\n'), ((206, 35, 206, 60), 'matplotlib.ticker.MaxNLocator', 'MaxNLocator', (), '', False, 'from matplotlib.ticker import MaxNLocator\n'), ((209, 21, 209, 64), 'matplotlib.cm.ScalarMappable', 'mpl.cm.ScalarMappable', (), '', True, 'import matplotlib as mpl\n'), ((229, 35, 229, 60), 'matplotlib.ticker.MaxNLocator', 'MaxNLocator', (), '', False, 'from matplotlib.ticker import MaxNLocator\n'), ((232, 21, 232, 64), 'matplotlib.cm.ScalarMappable', 'mpl.cm.ScalarMappable', (), '', True, 'import matplotlib as mpl\n'), ((287, 13, 287, 43), 'numpy.linspace', 'np.linspace', ({(287, 25, 287, 31): 'minval', (287, 33, 287, 39): 'maxval', (287, 41, 287, 42): 'n'}, {}), '(minval, maxval, n)', True, 'import numpy as np\n'), ((174, 28, 174, 50), 'numpy.unique', 'np.unique', ({(174, 38, 174, 49): 'self.states'}, {}), '(self.states)', True, 'import numpy as np\n'), ((274, 13, 274, 29), 'numpy.array', 'np.array', ({(274, 22, 274, 28): '[x, y]'}, {}), '([x, y])', True, 'import numpy as np\n')]
Stfuncode/food-beverage-investigator
account/views.py
0fea4943a5c2634068dc04118f83742327937c25
import imp from venv import create from django.shortcuts import render, redirect from django.views import View from django.views.generic import ( ListView, ) from account.models import * from account.forms import * from data.models import * from django.contrib.auth import login as auth_login from django.contrib.auth.models import auth from django.contrib import messages from django.contrib.auth.mixins import PermissionRequiredMixin, LoginRequiredMixin # Create your views here. def login(request): if request.method == "POST": form = loginForm(data=request.POST) if form.is_valid(): user = form.get_user() auth_login(request, user) print("succesful login") remember_me = form.cleaned_data["remember_me"] if remember_me: request.session.set_expiry(1209600) return redirect("home") else: messages.warning(request, 'There is an issue with your login processes') return redirect("login") else: form = loginForm() create_form = createUserForm() context = { "form": form, "create_form": create_form } return render(request, "login.html", context) def logout(request): auth.logout(request) return redirect("login") def register(request): if request.method == "POST": create_form = createUserForm(data=request.POST) if create_form.is_valid(): user = create_form.save(commit=False) user.save() messages.success(request, "User created successfully!") return redirect("login") else: messages.error(request, "User creation failed") else: create_form = createUserForm() return render(request, "login.html", {"create_form": create_form}) def homepage(request): user = Account.objects.filter(is_superuser=False).count() rest = Restaurant.objects.all().count() rating = RestaurantReview.objects.exclude(rating__isnull=True).count() review = RestaurantReview.objects.exclude(review__isnull=True).count() context = { "user_count" : user, "rest_count" : rest, "rating_count" : rating, "review_count" : review, } return render(request, "home.html", context) class ViewUserView(View, LoginRequiredMixin, PermissionRequiredMixin): permission_required = 'accounts.view_account' raise_exception = True def post(self, request, pk, *args, **kwargs): user = Account.objects.get(account_id=pk) form = viewUserForm(request.POST, instance=user) return redirect("userlist") def get(self, request, pk, *args, **kwargs): user = Account.objects.get(account_id=pk) form = viewUserForm(instance=user) context = { "form": form, "pk": pk } return render(request, "profile.html", context) class EditUserView(View, LoginRequiredMixin, PermissionRequiredMixin): permission_required = 'accounts.change_account' raise_exception = True def post(self, request, pk, *args, **kwargs): user = Account.objects.get(account_id=pk) form = editUserForm(request.POST, instance=user) if form.is_valid(): user = form.save() role = request.POST.get("role") user.save() messages.success(request, "Successfully updated profile!") return redirect(f'/viewUser/{user.account_id}') else: form = editUserForm(instance=user) extra_context = { "form": form, } print('something wrong') messages.error(request, "Invalid input! Please input a valid information.") return render(request, "editUser.html", extra_context) def get(self, request, pk, *args, **kwargs): user = Account.objects.get(account_id=pk) form = editUserForm(instance=user) extra_context = { "form": form, } return render(request, "editUser.html", extra_context) class UserListView(LoginRequiredMixin, PermissionRequiredMixin, ListView): permission_required = 'accounts.view_account' template_name = "userList.html" queryset = Account.objects.all() class UpdateProfilePicView(View, LoginRequiredMixin, PermissionRequiredMixin): permission_required = 'accounts.change_account' raise_exception = True def post(self, request, pk, *args, **kwargs): user = Account.objects.get(account_id=pk) user.profile_pic = request.FILES.get('profile-pic') user.save() return redirect('viewUser', pk) def deleteUser(request, event_id): event = Account.objects.get(pk=event_id) event.delete() return redirect('userlist')
[((42, 11, 42, 49), 'django.shortcuts.render', 'render', ({(42, 18, 42, 25): 'request', (42, 27, 42, 39): '"""login.html"""', (42, 41, 42, 48): 'context'}, {}), "(request, 'login.html', context)", False, 'from django.shortcuts import render, redirect\n'), ((45, 4, 45, 24), 'django.contrib.auth.models.auth.logout', 'auth.logout', ({(45, 16, 45, 23): 'request'}, {}), '(request)', False, 'from django.contrib.auth.models import auth\n'), ((46, 11, 46, 28), 'django.shortcuts.redirect', 'redirect', ({(46, 20, 46, 27): '"""login"""'}, {}), "('login')", False, 'from django.shortcuts import render, redirect\n'), ((62, 11, 62, 70), 'django.shortcuts.render', 'render', ({(62, 18, 62, 25): 'request', (62, 27, 62, 39): '"""login.html"""', (62, 41, 62, 69): "{'create_form': create_form}"}, {}), "(request, 'login.html', {'create_form': create_form})", False, 'from django.shortcuts import render, redirect\n'), ((75, 11, 75, 48), 'django.shortcuts.render', 'render', ({(75, 18, 75, 25): 'request', (75, 27, 75, 38): '"""home.html"""', (75, 40, 75, 47): 'context'}, {}), "(request, 'home.html', context)", False, 'from django.shortcuts import render, redirect\n'), ((144, 11, 144, 31), 'django.shortcuts.redirect', 'redirect', ({(144, 20, 144, 30): '"""userlist"""'}, {}), "('userlist')", False, 'from django.shortcuts import render, redirect\n'), ((84, 15, 84, 35), 'django.shortcuts.redirect', 'redirect', ({(84, 24, 84, 34): '"""userlist"""'}, {}), "('userlist')", False, 'from django.shortcuts import render, redirect\n'), ((93, 15, 93, 55), 'django.shortcuts.render', 'render', ({(93, 22, 93, 29): 'request', (93, 31, 93, 45): '"""profile.html"""', (93, 47, 93, 54): 'context'}, {}), "(request, 'profile.html', context)", False, 'from django.shortcuts import render, redirect\n'), ((124, 15, 124, 62), 'django.shortcuts.render', 'render', ({(124, 22, 124, 29): 'request', (124, 31, 124, 46): '"""editUser.html"""', (124, 48, 124, 61): 'extra_context'}, {}), "(request, 'editUser.html', extra_context)", False, 'from django.shortcuts import render, redirect\n'), ((139, 15, 139, 39), 'django.shortcuts.redirect', 'redirect', ({(139, 24, 139, 34): '"""viewUser"""', (139, 36, 139, 38): 'pk'}, {}), "('viewUser', pk)", False, 'from django.shortcuts import render, redirect\n'), ((24, 12, 24, 37), 'django.contrib.auth.login', 'auth_login', ({(24, 23, 24, 30): 'request', (24, 32, 24, 36): 'user'}, {}), '(request, user)', True, 'from django.contrib.auth import login as auth_login\n'), ((30, 19, 30, 35), 'django.shortcuts.redirect', 'redirect', ({(30, 28, 30, 34): '"""home"""'}, {}), "('home')", False, 'from django.shortcuts import render, redirect\n'), ((32, 12, 32, 84), 'django.contrib.messages.warning', 'messages.warning', ({(32, 29, 32, 36): 'request', (32, 38, 32, 83): '"""There is an issue with your login processes"""'}, {}), "(request, 'There is an issue with your login processes')", False, 'from django.contrib import messages\n'), ((33, 19, 33, 36), 'django.shortcuts.redirect', 'redirect', ({(33, 28, 33, 35): '"""login"""'}, {}), "('login')", False, 'from django.shortcuts import render, redirect\n'), ((56, 12, 56, 67), 'django.contrib.messages.success', 'messages.success', ({(56, 29, 56, 36): 'request', (56, 38, 56, 66): '"""User created successfully!"""'}, {}), "(request, 'User created successfully!')", False, 'from django.contrib import messages\n'), ((57, 19, 57, 36), 'django.shortcuts.redirect', 'redirect', ({(57, 28, 57, 35): '"""login"""'}, {}), "('login')", False, 'from django.shortcuts import render, redirect\n'), ((59, 12, 59, 59), 'django.contrib.messages.error', 'messages.error', ({(59, 27, 59, 34): 'request', (59, 36, 59, 58): '"""User creation failed"""'}, {}), "(request, 'User creation failed')", False, 'from django.contrib import messages\n'), ((107, 12, 107, 70), 'django.contrib.messages.success', 'messages.success', ({(107, 29, 107, 36): 'request', (107, 38, 107, 69): '"""Successfully updated profile!"""'}, {}), "(request, 'Successfully updated profile!')", False, 'from django.contrib import messages\n'), ((108, 19, 108, 59), 'django.shortcuts.redirect', 'redirect', ({(108, 28, 108, 58): 'f"""/viewUser/{user.account_id}"""'}, {}), "(f'/viewUser/{user.account_id}')", False, 'from django.shortcuts import render, redirect\n'), ((115, 12, 115, 87), 'django.contrib.messages.error', 'messages.error', ({(115, 27, 115, 34): 'request', (115, 36, 115, 86): '"""Invalid input! Please input a valid information."""'}, {}), "(request, 'Invalid input! Please input a valid information.')", False, 'from django.contrib import messages\n'), ((116, 19, 116, 66), 'django.shortcuts.render', 'render', ({(116, 26, 116, 33): 'request', (116, 35, 116, 50): '"""editUser.html"""', (116, 52, 116, 65): 'extra_context'}, {}), "(request, 'editUser.html', extra_context)", False, 'from django.shortcuts import render, redirect\n')]
mgradowski/aiproject
fpds/client.py
855332bd982bef2530ad935a209ae8be35963165
import cv2 import aiohttp import asyncio import concurrent.futures import argparse import numpy as np async def camera_source(ws: aiohttp.ClientWebSocketResponse, threadpool: concurrent.futures.ThreadPoolExecutor, src_id: int=0): loop = asyncio.get_running_loop() try: src = await loop.run_in_executor(threadpool, lambda: cv2.VideoCapture(src_id)) while True: _, im = await loop.run_in_executor(threadpool, src.read) im = cv2.resize(im, (640, 384)) enc_param = [int(cv2.IMWRITE_JPEG_QUALITY), 40] _, im = await loop.run_in_executor(threadpool, lambda: cv2.imencode('.jpg', im, enc_param)) await ws.send_bytes(im.tobytes()) except asyncio.CancelledError: pass finally: src.release() async def preview_window(queue: asyncio.Queue, threadpool: concurrent.futures.ThreadPoolExecutor): loop = asyncio.get_running_loop() try: while True: im = await queue.get() im = np.frombuffer(im, dtype=np.uint8) im = await loop.run_in_executor(threadpool, lambda: cv2.imdecode(im, cv2.IMREAD_ANYCOLOR)) cv2.imshow('fpds_remote_preview', im) cv2.waitKey(1) except asyncio.CancelledError: pass finally: cv2.destroyAllWindows() async def run_client( ws: aiohttp.ClientWebSocketResponse, threadpool: concurrent.futures.ThreadPoolExecutor ) -> None: # -- dst_queue = asyncio.Queue(maxsize=1) src_task = asyncio.create_task(camera_source(ws, threadpool)) dst_task = asyncio.create_task(preview_window(dst_queue, threadpool)) try: while True: im = await ws.receive_bytes() await dst_queue.put(im) except asyncio.CancelledError: await ws.send_str('close') src_task.cancel() dst_task.cancel() await asyncio.wait([src_task, dst_task]) async def amain(url: str): with concurrent.futures.ThreadPoolExecutor(max_workers=4) as threadpool: async with aiohttp.ClientSession() as session, session.ws_connect(url) as ws: await run_client(ws, threadpool) def main(): parser = argparse.ArgumentParser('fpds.client') parser.add_argument('url', type=str, help='WebSocket endpoint of fpds.server e.g. http://localhost:8181/fpds') args = parser.parse_args() loop = asyncio.get_event_loop() task = loop.create_task(amain(args.url)) try: loop.run_until_complete(task) except KeyboardInterrupt: task.cancel() loop.run_until_complete(asyncio.wait_for(task, timeout=None)) finally: loop.close() if __name__ == '__main__': main()
[((10, 11, 10, 37), 'asyncio.get_running_loop', 'asyncio.get_running_loop', ({}, {}), '()', False, 'import asyncio\n'), ((25, 11, 25, 37), 'asyncio.get_running_loop', 'asyncio.get_running_loop', ({}, {}), '()', False, 'import asyncio\n'), ((43, 16, 43, 40), 'asyncio.Queue', 'asyncio.Queue', (), '', False, 'import asyncio\n'), ((62, 13, 62, 51), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ({(62, 37, 62, 50): '"""fpds.client"""'}, {}), "('fpds.client')", False, 'import argparse\n'), ((65, 11, 65, 35), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ({}, {}), '()', False, 'import asyncio\n'), ((36, 8, 36, 31), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ({}, {}), '()', False, 'import cv2\n'), ((15, 17, 15, 43), 'cv2.resize', 'cv2.resize', ({(15, 28, 15, 30): 'im', (15, 32, 15, 42): '(640, 384)'}, {}), '(im, (640, 384))', False, 'import cv2\n'), ((29, 17, 29, 50), 'numpy.frombuffer', 'np.frombuffer', (), '', True, 'import numpy as np\n'), ((31, 12, 31, 49), 'cv2.imshow', 'cv2.imshow', ({(31, 23, 31, 44): '"""fpds_remote_preview"""', (31, 46, 31, 48): 'im'}, {}), "('fpds_remote_preview', im)", False, 'import cv2\n'), ((32, 12, 32, 26), 'cv2.waitKey', 'cv2.waitKey', ({(32, 24, 32, 25): '(1)'}, {}), '(1)', False, 'import cv2\n'), ((58, 19, 58, 42), 'aiohttp.ClientSession', 'aiohttp.ClientSession', ({}, {}), '()', False, 'import aiohttp\n'), ((54, 14, 54, 48), 'asyncio.wait', 'asyncio.wait', ({(54, 27, 54, 47): '[src_task, dst_task]'}, {}), '([src_task, dst_task])', False, 'import asyncio\n'), ((71, 32, 71, 68), 'asyncio.wait_for', 'asyncio.wait_for', (), '', False, 'import asyncio\n'), ((12, 61, 12, 85), 'cv2.VideoCapture', 'cv2.VideoCapture', ({(12, 78, 12, 84): 'src_id'}, {}), '(src_id)', False, 'import cv2\n'), ((17, 67, 17, 102), 'cv2.imencode', 'cv2.imencode', ({(17, 80, 17, 86): '""".jpg"""', (17, 88, 17, 90): 'im', (17, 92, 17, 101): 'enc_param'}, {}), "('.jpg', im, enc_param)", False, 'import cv2\n'), ((30, 64, 30, 101), 'cv2.imdecode', 'cv2.imdecode', ({(30, 77, 30, 79): 'im', (30, 81, 30, 100): 'cv2.IMREAD_ANYCOLOR'}, {}), '(im, cv2.IMREAD_ANYCOLOR)', False, 'import cv2\n')]
bkrrr/Giveme5W
Giveme5W1H/extractor/tools/key_value_cache.py
657738781fe387d76e6e0da35ed009ccf81f4290
import logging import os import pickle import sys import threading import time from typing import List from Giveme5W1H.extractor.root import path from Giveme5W1H.extractor.tools.util import bytes_2_human_readable class KeyValueCache(object): def __init__(self, cache_path): """ :param cache_path: path to cache, must be relative to the root.py file """ self.log = logging.getLogger('GiveMe5W') # resolve path relative to the path file self._cache_path = path(cache_path) # ad a meaningful extension self._cache_path = self._cache_path + '.prickle' self._cache = {} if cache_path and os.path.isfile(self._cache_path) and os.path.getsize(self._cache_path) > 0: # reload cache object form disc, if any with open(self._cache_path, 'rb') as ff: self._cache = pickle.load(ff) self.log.debug('KeyValueCache: ' + self._cache_path + ' restored') self.log_stats() else: self._cache = {} self._lock = threading.Lock() def log_stats(self): # size is not considering child's self.log.info(self._cache_path + ' entries: ' + str(len(self._cache)) + ' size: ' + bytes_2_human_readable( sys.getsizeof(self._cache))) def persist(self): with open(self._cache_path, 'wb') as f: pickle.dump(self._cache, f, pickle.HIGHEST_PROTOCOL) def cache(self, key: str, value: object): """ None values are considered as invalid results (ToughRequest) is producing none for exceptions set -1 if you want to store "No distance" :param key: :param value: :return: """ self._lock.acquire() if value is not None: self._cache[key] = self._pack(value); self.log.debug(self._cache_path + ' CACHED: ' + str(key) + ': ' + str(value)) self.persist() self._lock.release() def get(self, key): """ Read cache entries :param key: :return: """ self._lock.acquire() result = None value = self._cache.get(key) if value is not None: self.log.debug(self._cache_path + ' LOADED: ' + str(key) + ': ' + str(value)) result = self._unpack(value) self._lock.release() return result def get_complex(self, list_of_keys: List[str]): """ Read complex cache entries """ return self.get(self._get_id(list_of_keys)) def cache_complex(self, list_of_keys: List[str], value): """ helper to cache multi (string)key values. They are sorted before concatenation, therefore an order is determined. """ self.cache(self._get_id(list_of_keys), value) def _get_id(self, list_of_keys: List[str]): """ sorts list_of_keys, concatenates with # for readability :param list_of_keys: :return: """ sorted(list_of_keys) return "#".join(list_of_keys) def _pack(self, value): """ cache tracks the age of an entry, may be helpful in the future :param value: :return: """ return [value, str(time.time())] def _unpack(self, value): """ removes the timestamp around the cached value, if any :param value: :return: """ # there are some old entries without timestamp if isinstance(value, str) or isinstance(value, int): return value return value[0]
[((19, 19, 19, 48), 'logging.getLogger', 'logging.getLogger', ({(19, 37, 19, 47): '"""GiveMe5W"""'}, {}), "('GiveMe5W')", False, 'import logging\n'), ((21, 27, 21, 43), 'Giveme5W1H.extractor.root.path', 'path', ({(21, 32, 21, 42): 'cache_path'}, {}), '(cache_path)', False, 'from Giveme5W1H.extractor.root import path\n'), ((35, 21, 35, 37), 'threading.Lock', 'threading.Lock', ({}, {}), '()', False, 'import threading\n'), ((27, 26, 27, 58), 'os.path.isfile', 'os.path.isfile', ({(27, 41, 27, 57): 'self._cache_path'}, {}), '(self._cache_path)', False, 'import os\n'), ((44, 12, 44, 64), 'pickle.dump', 'pickle.dump', ({(44, 24, 44, 35): 'self._cache', (44, 37, 44, 38): 'f', (44, 40, 44, 63): 'pickle.HIGHEST_PROTOCOL'}, {}), '(self._cache, f, pickle.HIGHEST_PROTOCOL)', False, 'import pickle\n'), ((27, 63, 27, 96), 'os.path.getsize', 'os.path.getsize', ({(27, 79, 27, 95): 'self._cache_path'}, {}), '(self._cache_path)', False, 'import os\n'), ((30, 30, 30, 45), 'pickle.load', 'pickle.load', ({(30, 42, 30, 44): 'ff'}, {}), '(ff)', False, 'import pickle\n'), ((105, 27, 105, 38), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((40, 12, 40, 38), 'sys.getsizeof', 'sys.getsizeof', ({(40, 26, 40, 37): 'self._cache'}, {}), '(self._cache)', False, 'import sys\n')]
AlexanderJenke/nsst
nsst_translate_corpus.py
75f6afa39568c72c9c513ac0313db33b80bb67d5
from argparse import ArgumentParser from tqdm import tqdm import NSST from nsst_translate import best_transition_sequence if __name__ == '__main__': parser = ArgumentParser() parser.add_argument("--nsst_file", default="output/nsst_tss20_th4_nSt100_Q0.pkl", help="nsst file") parser.add_argument("--src_lang", default="output/europarl-v7.de-en.de.clean") parser.add_argument("--tgt_lang", default="output/europarl-v7.de-en.en.clean") parser.add_argument("--enforce_n_reg", default=True) parser.add_argument("--output", default=f"output/nsst_stat_nreg_100Q0.csv") args = parser.parse_args() args.enforce_n_final_reg = False # load NSST nsst = NSST.NSST() nsst.load(args.nsst_file) args.nsst = nsst # open files src_file = open(args.src_lang, 'r') tgt_file = open(args.tgt_lang, 'r') output_file = open(args.output, 'w') # iterate over sentences, first 4096 -> test sentences for src, tgt, _ in tqdm(list(zip(src_file, tgt_file, range(4096))), desc="Processing sentences"): # remove line breaks src = src[:-1] tgt = tgt[:-1] # try to translate try: # prepare tokenisations token_src = [nsst.tokenization_src[word] if word in nsst.tokenization_src else 0 for word in src.split(" ") if len(word)] token_tgt = [nsst.tokenization_tgt[word] if word in nsst.tokenization_tgt else 0 for word in tgt.split(" ") if len(word)] # run nsst args.input = src args.token_src = token_src result = best_transition_sequence(args) # get best result pred = sorted((k for k in result if ('Qf' in args.nsst_file or not args.enforce_n_final_reg or len(k[1]) == 1) and ('Q0' in args.nsst_file or k[0] == -1) ), key=lambda x: x[2], reverse=True)[0] n_res = len(result) q, reg, prob = pred # write to csv if not len(reg): # catch empty registers continue token_pred = [w for w in reg[0].split(' ') if len(w)] pred_str = "" for t in token_pred: pred_str += f"{nsst.tokenization_tgt_lut[int(t)]} " token_src_str = "" for t in token_src: token_src_str += f"{t} " token_tgt_str = "" for t in token_tgt: token_tgt_str += f"{t} " token_pred_str = "" for t in token_pred: token_pred_str += f"{t} " print(f"{src};{token_src_str[:-1]};" f"{tgt};{token_tgt_str[:-1]};" f"{pred_str};{token_pred_str[:-1]};" f"{prob};{len(reg)};{n_res}", file=output_file) output_file.flush() except RuntimeError: pass # close files src_file.close() tgt_file.close() output_file.close()
[((9, 13, 9, 29), 'argparse.ArgumentParser', 'ArgumentParser', ({}, {}), '()', False, 'from argparse import ArgumentParser\n'), ((19, 11, 19, 22), 'NSST.NSST', 'NSST.NSST', ({}, {}), '()', False, 'import NSST\n'), ((46, 21, 46, 51), 'nsst_translate.best_transition_sequence', 'best_transition_sequence', ({(46, 46, 46, 50): 'args'}, {}), '(args)', False, 'from nsst_translate import best_transition_sequence\n')]
dhyanpatel110/HACKERRANK
10 Days of Statistics/Day 1/Standard Deviation.py
949b1ff468ff3487663bf063a8fe6cdfb9dea26b
# Import library import math # Define functionts def mean(data): return sum(data) / len(data) def stddev(data, size): sum = 0 for i in range(size): sum = sum + (data[i] - mean(data)) ** 2 return math.sqrt(sum / size) # Set data size = int(input()) numbers = list(map(int, input().split())) # Get standard deviation print(round(stddev(numbers, size), 1))
[((12, 11, 12, 32), 'math.sqrt', 'math.sqrt', ({(12, 21, 12, 31): '(sum / size)'}, {}), '(sum / size)', False, 'import math\n')]
jmsevillam/Herramientas-Computacionales-UniAndes
Homework/Hw4/Solution/problem5a.py
957338873bd6a17201dfd4629c7edd5760e2271d
def decode(word1,word2,code): if len(word1)==1: code+=word1+word2 return code else: code+=word1[0]+word2[0] return decode(word1[1:],word2[1:],code) Alice='Ti rga eoe esg o h ore"ermetsCmuainls' Bob='hspormdcdsamsaefrtecus Hraina optcoae"' print(decode(Alice,Bob,''))
[]
ASUPychron/pychron
pychron/lasers/power/composite_calibration_manager.py
dfe551bdeb4ff8b8ba5cdea0edab336025e8cc76
# =============================================================================== # Copyright 2012 Jake Ross # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # =============================================================================== # ============= enthought library imports ======================= from __future__ import absolute_import from traits.api import HasTraits, Instance, DelegatesTo, Button, List, Any, Float from traitsui.api import View, Item, VGroup, HGroup, Group, spring, TabularEditor # ============= standard library imports ======================== import pickle import os from numpy import polyval # ============= local library imports ========================== from pychron.managers.manager import Manager from pychron.database.selectors.power_calibration_selector import ( PowerCalibrationSelector, ) from pychron.database.adapters.power_calibration_adapter import PowerCalibrationAdapter from pychron.paths import paths from pychron.graph.graph import Graph from pychron.hardware.meter_calibration import MeterCalibration """ use a dbselector to select data """ class BoundsSelector(HasTraits): graph = Instance(Graph) def traits_view(self): v = View( Item("graph", show_label=False, style="custom"), buttons=["OK", "Cancel"], kind="livemodal", ) return v class CompositeCalibrationManager(Manager): db = Instance(PowerCalibrationAdapter) selector = Instance(PowerCalibrationSelector) append = Button replace = Button load_graph = Button save = Button selected_calibrations = List selected = Any results = DelegatesTo("selector") graph = Instance(Graph) dclicked = Any parent_name = "FusionsDiode" power = Float input = Float def _power_changed(self): pc = self._load_calibration() pc if pc is not None: self.input, _ = pc.get_input(self.power) def _load_calibration(self): try: p = self._get_calibration_path() with open(p, "rb") as f: pc = pickle.load(f) except: return return pc def _dclicked_changed(self): s = self.selected if s is not None: s.bounds = None s.load_graph() s.graph.add_range_selector() bc = BoundsSelector(graph=s.graph) info = bc.edit_traits() if info.result: bounds = s.graph.plots[0].default_index.metadata["selections"] s.bounds = bounds s.calibration_bounds = ( polyval(s.coefficients, bounds[0]), polyval(s.coefficients, bounds[1]), ) def _append_fired(self): s = self.selector.selected if s is not None: for si in s: trs = list(si.traits().keys()).remove("graph") self.selected_calibrations.append(si.clone_traits(traits=trs)) def _replace_fired(self): s = self.selector.selected trs = list(s.traits().keys()).remove("graph") self.selected_calibrations = s.clone_traits(traits=trs) def _save_fired(self): self._dump_calibration() def _dump_calibration(self): pc = MeterCalibration() coeffs = [] bounds = [] for s in self.selected_calibrations: coeffs.append(s.coefficients) bounds.append(s.calibration_bounds) pc.coefficients = coeffs pc.bounds = bounds p = self._get_calibration_path() self.info("saving calibration to {}".format(p)) with open(p, "wb") as f: pickle.dump(pc, f) def _get_calibration_path(self): p = os.path.join( paths.hidden_dir, "{}_power_calibration".format(self.parent_name) ) return p def _load_graph_fired(self): g = self.graph g.clear() # g.new_plot(zoom=True, pan=True, # padding=[40, 10, 10, 40] # ) has_bounds = False for i, s in enumerate(self.selected_calibrations): if s.bounds: has_bounds = True elif has_bounds: g.clear() self._plot_factory(g) self.warning_dialog("{} does not have its bounds set".format(s.rid)) break s.load_graph(graph=g, new_plot=i == 0) g.redraw() def traits_view(self): selector_grp = Group(Item("selector", style="custom", show_label=False)) transfer_grp = VGroup( spring, VGroup(Item("append", show_label=False), Item("replace", show_label=False)), spring, ) editor = TabularEditor( adapter=self.selector.tabular_adapter(), editable=False, dclicked="object.dclicked", selected="object.selected", ) selected_grp = Item("selected_calibrations", editor=editor, show_label=False) data_tab = Group( HGroup(selector_grp, transfer_grp, selected_grp), show_border=True, label="Data", ) process_tab = Group( HGroup( Item("power"), Item("input", format_str=" %0.3f ", style="readonly"), spring, Item("save", show_label=False), Item("load_graph", show_label=False), ), Item("graph", style="custom", show_label=False), show_border=True, label="Process", ) v = View( VGroup(data_tab, process_tab), resizable=True, title="Composite {} Power Calibration".format(self.parent_name), ) return v def _graph_default(self): g = Graph( container_dict={ # 'fill_padding':True, # 'bgcolor':'red', "padding": 5 } ) self._plot_factory(g) return g def _plot_factory(self, graph): graph.new_plot( zoom=True, pan=True, padding=[50, 10, 10, 40], xtitle="Setpoint (%)", ytitle="Measured Power (W)", ) def _db_default(self): if self.parent_name == "FusionsDiode": name = paths.diodelaser_db else: name = paths.co2laser_db db = PowerCalibrationAdapter(name=name, kind="sqlite") db.connect() return db def _selector_default(self): return self.db._selector_factory() if __name__ == "__main__": ccm = CompositeCalibrationManager() ccm.configure_traits() # ============= EOF =============================================
[((43, 12, 43, 27), 'traits.api.Instance', 'Instance', ({(43, 21, 43, 26): 'Graph'}, {}), '(Graph)', False, 'from traits.api import HasTraits, Instance, DelegatesTo, Button, List, Any, Float\n'), ((55, 9, 55, 42), 'traits.api.Instance', 'Instance', ({(55, 18, 55, 41): 'PowerCalibrationAdapter'}, {}), '(PowerCalibrationAdapter)', False, 'from traits.api import HasTraits, Instance, DelegatesTo, Button, List, Any, Float\n'), ((56, 15, 56, 49), 'traits.api.Instance', 'Instance', ({(56, 24, 56, 48): 'PowerCalibrationSelector'}, {}), '(PowerCalibrationSelector)', False, 'from traits.api import HasTraits, Instance, DelegatesTo, Button, List, Any, Float\n'), ((66, 14, 66, 37), 'traits.api.DelegatesTo', 'DelegatesTo', ({(66, 26, 66, 36): '"""selector"""'}, {}), "('selector')", False, 'from traits.api import HasTraits, Instance, DelegatesTo, Button, List, Any, Float\n'), ((68, 12, 68, 27), 'traits.api.Instance', 'Instance', ({(68, 21, 68, 26): 'Graph'}, {}), '(Graph)', False, 'from traits.api import HasTraits, Instance, DelegatesTo, Button, List, Any, Float\n'), ((127, 13, 127, 31), 'pychron.hardware.meter_calibration.MeterCalibration', 'MeterCalibration', ({}, {}), '()', False, 'from pychron.hardware.meter_calibration import MeterCalibration\n'), ((181, 23, 181, 85), 'traitsui.api.Item', 'Item', (), '', False, 'from traitsui.api import View, Item, VGroup, HGroup, Group, spring, TabularEditor\n'), ((209, 12, 215, 9), 'pychron.graph.graph.Graph', 'Graph', (), '', False, 'from pychron.graph.graph import Graph\n'), ((234, 13, 234, 62), 'pychron.database.adapters.power_calibration_adapter.PowerCalibrationAdapter', 'PowerCalibrationAdapter', (), '', False, 'from pychron.database.adapters.power_calibration_adapter import PowerCalibrationAdapter\n'), ((47, 12, 47, 59), 'traitsui.api.Item', 'Item', (), '', False, 'from traitsui.api import View, Item, VGroup, HGroup, Group, spring, TabularEditor\n'), ((139, 12, 139, 30), 'pickle.dump', 'pickle.dump', ({(139, 24, 139, 26): 'pc', (139, 28, 139, 29): 'f'}, {}), '(pc, f)', False, 'import pickle\n'), ((169, 29, 169, 79), 'traitsui.api.Item', 'Item', (), '', False, 'from traitsui.api import View, Item, VGroup, HGroup, Group, spring, TabularEditor\n'), ((183, 12, 183, 60), 'traitsui.api.HGroup', 'HGroup', ({(183, 19, 183, 31): 'selector_grp', (183, 33, 183, 45): 'transfer_grp', (183, 47, 183, 59): 'selected_grp'}, {}), '(selector_grp, transfer_grp, selected_grp)', False, 'from traitsui.api import View, Item, VGroup, HGroup, Group, spring, TabularEditor\n'), ((196, 12, 196, 59), 'traitsui.api.Item', 'Item', (), '', False, 'from traitsui.api import View, Item, VGroup, HGroup, Group, spring, TabularEditor\n'), ((202, 12, 202, 41), 'traitsui.api.VGroup', 'VGroup', ({(202, 19, 202, 27): 'data_tab', (202, 29, 202, 40): 'process_tab'}, {}), '(data_tab, process_tab)', False, 'from traitsui.api import View, Item, VGroup, HGroup, Group, spring, TabularEditor\n'), ((87, 21, 87, 35), 'pickle.load', 'pickle.load', ({(87, 33, 87, 34): 'f'}, {}), '(f)', False, 'import pickle\n'), ((172, 19, 172, 51), 'traitsui.api.Item', 'Item', (), '', False, 'from traitsui.api import View, Item, VGroup, HGroup, Group, spring, TabularEditor\n'), ((172, 53, 172, 86), 'traitsui.api.Item', 'Item', (), '', False, 'from traitsui.api import View, Item, VGroup, HGroup, Group, spring, TabularEditor\n'), ((190, 16, 190, 29), 'traitsui.api.Item', 'Item', ({(190, 21, 190, 28): '"""power"""'}, {}), "('power')", False, 'from traitsui.api import View, Item, VGroup, HGroup, Group, spring, TabularEditor\n'), ((191, 16, 191, 74), 'traitsui.api.Item', 'Item', (), '', False, 'from traitsui.api import View, Item, VGroup, HGroup, Group, spring, TabularEditor\n'), ((193, 16, 193, 46), 'traitsui.api.Item', 'Item', (), '', False, 'from traitsui.api import View, Item, VGroup, HGroup, Group, spring, TabularEditor\n'), ((194, 16, 194, 52), 'traitsui.api.Item', 'Item', (), '', False, 'from traitsui.api import View, Item, VGroup, HGroup, Group, spring, TabularEditor\n'), ((107, 20, 107, 54), 'numpy.polyval', 'polyval', ({(107, 28, 107, 42): 's.coefficients', (107, 44, 107, 53): 'bounds[0]'}, {}), '(s.coefficients, bounds[0])', False, 'from numpy import polyval\n'), ((108, 20, 108, 54), 'numpy.polyval', 'polyval', ({(108, 28, 108, 42): 's.coefficients', (108, 44, 108, 53): 'bounds[1]'}, {}), '(s.coefficients, bounds[1])', False, 'from numpy import polyval\n')]