hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
sequencelengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
sequencelengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
sequencelengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4a1d9a438f2f3703f822efeaa971b9f5ab6ef6db | 1,381 | py | Python | pymocap/manager.py | markkorput/PyMoCap | b0fd963309326f7556ed4de9d4715af1b840a17e | [
"MIT"
] | 3 | 2016-12-15T23:08:54.000Z | 2021-02-28T08:46:36.000Z | pymocap/manager.py | markkorput/PyMoCap | b0fd963309326f7556ed4de9d4715af1b840a17e | [
"MIT"
] | null | null | null | pymocap/manager.py | markkorput/PyMoCap | b0fd963309326f7556ed4de9d4715af1b840a17e | [
"MIT"
] | null | null | null | from pymocap.event import Event
from pymocap.color_terminal import ColorTerminal
try:
import optirx as rx
except ImportError:
ColorTerminal().warn("importing embedded version of the optirx library for PyMoCap.Manager")
import pymocap.dependencies.optirx as rx
class Manager:
def __init__(self):
# events
self.resetEvent = Event()
self.frameDataEvent = Event()
self.frameEvent = Event()
# for natnet data unpacking
self._natnet_version = (2, 7, 0, 0)
self.reset(False)
# resets the current state of the manager (removes current frames and thus all rigid bodies)
def reset(self, notify=True):
self.frame = None
if notify:
self.resetEvent(self)
# takes raw, binary, packed natnet frame data
# it unpacks the data, stores it without further processing
# and triggers notification
def processFrameData(self, data):
# notify about new (raw, binary) frame data
self.frameDataEvent(data, self)
# unpack
packet = rx.unpack(data, version=self._natnet_version)
# change natnet version if necessary
if type(packet) is rx.SenderData:
self._natnet_version = packet.natnet_version
# store
self.frame = packet
# notify about new (unpacked) frame
self.frameEvent(packet, self)
| 32.880952 | 96 | 0.661115 |
4a1d9a79d0c32f0bf6cd0426c55a192e410c63f8 | 1,910 | py | Python | src/algorithms/pad.py | kristery/dmcontrol-generalization-benchmark | c732d3292e23ef70b9a5e1a9f573b51a5195d28d | [
"MIT"
] | 75 | 2020-11-30T07:59:09.000Z | 2022-03-30T21:26:53.000Z | src/algorithms/pad.py | kristery/dmcontrol-generalization-benchmark | c732d3292e23ef70b9a5e1a9f573b51a5195d28d | [
"MIT"
] | 12 | 2021-02-02T09:03:44.000Z | 2022-03-29T19:17:39.000Z | src/algorithms/pad.py | kristery/dmcontrol-generalization-benchmark | c732d3292e23ef70b9a5e1a9f573b51a5195d28d | [
"MIT"
] | 18 | 2020-12-04T03:48:34.000Z | 2022-03-21T15:10:08.000Z | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from copy import deepcopy
import utils
import algorithms.modules as m
from algorithms.sac import SAC
class PAD(SAC):
def __init__(self, obs_shape, action_shape, args):
super().__init__(obs_shape, action_shape, args)
self.aux_update_freq = args.aux_update_freq
self.aux_lr = args.aux_lr
self.aux_beta = args.aux_beta
shared_cnn = self.critic.encoder.shared_cnn
aux_cnn = m.HeadCNN(shared_cnn.out_shape, args.num_head_layers, args.num_filters).cuda()
aux_encoder = m.Encoder(
shared_cnn,
aux_cnn,
m.RLProjection(aux_cnn.out_shape, args.projection_dim)
)
self.pad_head = m.InverseDynamics(aux_encoder, action_shape, args.hidden_dim).cuda()
self.init_pad_optimizer()
self.train()
def train(self, training=True):
super().train(training)
if hasattr(self, 'pad_head'):
self.pad_head.train(training)
def init_pad_optimizer(self):
self.pad_optimizer = torch.optim.Adam(
self.pad_head.parameters(), lr=self.aux_lr, betas=(self.aux_beta, 0.999)
)
def update_inverse_dynamics(self, obs, obs_next, action, L=None, step=None):
assert obs.shape[-1] == 84 and obs_next.shape[-1] == 84
pred_action = self.pad_head(obs, obs_next)
pad_loss = F.mse_loss(pred_action, action)
self.pad_optimizer.zero_grad()
pad_loss.backward()
self.pad_optimizer.step()
if L is not None:
L.log('train/aux_loss', pad_loss, step)
def update(self, replay_buffer, L, step):
obs, action, reward, next_obs, not_done = replay_buffer.sample()
self.update_critic(obs, action, reward, next_obs, not_done, L, step)
if step % self.actor_update_freq == 0:
self.update_actor_and_alpha(obs, L, step)
if step % self.critic_target_update_freq == 0:
self.soft_update_critic_target()
if step % self.aux_update_freq == 0:
self.update_inverse_dynamics(obs, next_obs, action, L, step)
| 29.84375 | 90 | 0.743455 |
4a1d9aec3e7c7cdfbda54f92812392c9b457f015 | 4,616 | py | Python | src/qnet/printing/sreprprinter.py | amitkumarj441/QNET | 4d4818b25b7e8e4497d017b7c21e622945326c6a | [
"MIT"
] | 54 | 2015-02-12T15:13:41.000Z | 2022-03-04T06:53:14.000Z | src/qnet/printing/sreprprinter.py | amitkumarj441/QNET | 4d4818b25b7e8e4497d017b7c21e622945326c6a | [
"MIT"
] | 90 | 2015-03-24T23:39:32.000Z | 2021-09-25T11:45:43.000Z | src/qnet/printing/sreprprinter.py | amitkumarj441/QNET | 4d4818b25b7e8e4497d017b7c21e622945326c6a | [
"MIT"
] | 21 | 2017-07-05T18:03:17.000Z | 2021-11-19T17:51:04.000Z | """Provides printers for a full-structured representation"""
from textwrap import dedent, indent
from sympy.core.basic import Basic as SympyBasic
from .base import QnetBasePrinter
from .sympy import SympyReprPrinter
from ._render_head_repr import render_head_repr
from qnet.algebra.core.abstract_algebra import Expression
from ..utils.singleton import Singleton
__all__ = []
__private__ = [
'QnetSReprPrinter', 'IndentedSReprPrinter', 'IndentedSympyReprPrinter']
class QnetSReprPrinter(QnetBasePrinter):
"""Printer for a string (ASCII) representation."""
sympy_printer_cls = SympyReprPrinter
def _render_str(self, string):
return repr(string)
def emptyPrinter(self, expr):
"""Fallback printer"""
return render_head_repr(expr, sub_render=self.doprint)
def _print_ndarray(self, expr):
if len(expr.shape) == 2:
rows = []
for row in expr:
rows.append(
'[' + ", ".join([self.doprint(val) for val in row]) + ']')
return ("array([" + ", ".join(rows) +
"], dtype=%s)" % str(expr.dtype))
else:
raise ValueError("Cannot render %s" % expr)
class IndentedSympyReprPrinter(SympyReprPrinter):
"""Indented repr printer for Sympy objects"""
def doprint(self, expr):
res = super().doprint(expr)
return " " * (self._print_level - 1) + res
class IndentedSReprPrinter(QnetBasePrinter):
"""Printer for rendering an expression in such a way that the resulting
string can be evaluated in an appropriate context to re-instantiate an
identical object, using nested indentation (implementing
``srepr(expr, indented=True)``
"""
sympy_printer_cls = IndentedSympyReprPrinter
def _get_from_cache(self, expr):
"""Obtain cached result, prepend with the keyname if necessary, and
indent for the current level"""
is_cached, res = super()._get_from_cache(expr)
if is_cached:
indent_str = " " * self._print_level
return True, indent(res, indent_str)
else:
return False, None
def _write_to_cache(self, expr, res):
"""Store the cached result without indentation, and without the
keyname"""
res = dedent(res)
super()._write_to_cache(expr, res)
def _render_str(self, string):
return " " * self._print_level + repr(string)
def emptyPrinter(self, expr):
"""Fallback printer"""
indent_str = " " * (self._print_level - 1)
lines = []
if isinstance(expr.__class__, Singleton):
# We exploit that Singletons override __expr__ to directly return
# their name
return indent_str + repr(expr)
if isinstance(expr, Expression):
args = expr.args
keys = expr.minimal_kwargs.keys()
lines.append(indent_str + expr.__class__.__name__ + "(")
for arg in args:
lines.append(self.doprint(arg) + ",")
for key in keys:
arg = expr.kwargs[key]
lines.append(
(" " * self._print_level) + key + '=' +
self.doprint(arg).lstrip() + ",")
if len(args) > 0 or len(keys) > 0:
lines[-1] = lines[-1][:-1] # drop trailing comma for last arg
lines[-1] += ")"
elif isinstance(expr, (tuple, list)):
delims = ("(", ")") if isinstance(expr, tuple) else ("[", "]")
if len(expr) == 1:
delims = (delims[0], "," + delims[1])
lines.append(
indent_str + delims[0] +
", ".join([render_head_repr(v) for v in expr]) +
delims[1])
else:
lines.append(indent_str + SympyReprPrinter().doprint(expr))
return "\n".join(lines)
def _print_ndarray(self, expr):
indent_str = " " * (self._print_level - 1)
if len(expr.shape) == 2:
lines = [indent_str + "array([", ]
self._print_level += 1
for row in expr:
indent_str = " " * (self._print_level - 1)
lines.append(indent_str + '[')
for val in row:
lines.append(self.doprint(val) + ",")
lines[-1] = lines[-1][:-1]
lines.append(indent_str + '],')
lines[-1] = lines[-1][:-1] + "], dtype=%s)" % str(expr.dtype)
return "\n".join(lines)
else:
raise ValueError("Cannot render %s" % expr)
| 36.0625 | 78 | 0.566508 |
4a1d9b30e265617d32c4e357e624e00479f17527 | 593 | py | Python | src/dc/core/OutgoingMessage.py | dc-blockchain/dc-core | fc6af8ce04d7b52f94c069f6ec05b0e419e07d70 | [
"MIT"
] | 1 | 2021-03-05T14:24:32.000Z | 2021-03-05T14:24:32.000Z | src/dc/core/OutgoingMessage.py | dc-blockchain/dc-core | fc6af8ce04d7b52f94c069f6ec05b0e419e07d70 | [
"MIT"
] | null | null | null | src/dc/core/OutgoingMessage.py | dc-blockchain/dc-core | fc6af8ce04d7b52f94c069f6ec05b0e419e07d70 | [
"MIT"
] | null | null | null | # coding=utf-8
# Distributed under the MIT software license, see the accompanying
# file LICENSE or http://www.opensource.org/licenses/mit-license.php.
from dc.core import config
from dc.core.misc import ntp
class OutgoingMessage:
def __init__(self, priority, message):
self.priority = priority
self.timestamp = int(ntp.getTime())
self.message = message
def is_expired(self):
return self.timestamp - ntp.getTime() > config.user.outgoing_message_expiry
def __lt__(self, outgoing_message):
return self.priority < outgoing_message.priority
| 31.210526 | 83 | 0.721754 |
4a1d9b4be3833573fe419c2c3fc8ea6fa6186958 | 1,003 | py | Python | kubernetes/test/test_authentication_v1_api.py | L3T/python | b6e4ae81a2afb49f668a142eb7d1c6e2571ef478 | [
"Apache-2.0"
] | 2 | 2020-06-21T08:03:18.000Z | 2020-06-21T09:53:29.000Z | kubernetes/test/test_authentication_v1_api.py | L3T/python | b6e4ae81a2afb49f668a142eb7d1c6e2571ef478 | [
"Apache-2.0"
] | null | null | null | kubernetes/test/test_authentication_v1_api.py | L3T/python | b6e4ae81a2afb49f668a142eb7d1c6e2571ef478 | [
"Apache-2.0"
] | 1 | 2020-12-10T07:28:08.000Z | 2020-12-10T07:28:08.000Z | # coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
OpenAPI spec version: release-1.16
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import kubernetes.client
from kubernetes.client.api.authentication_v1_api import AuthenticationV1Api # noqa: E501
from kubernetes.client.rest import ApiException
class TestAuthenticationV1Api(unittest.TestCase):
"""AuthenticationV1Api unit test stubs"""
def setUp(self):
self.api = kubernetes.client.api.authentication_v1_api.AuthenticationV1Api() # noqa: E501
def tearDown(self):
pass
def test_create_token_review(self):
"""Test case for create_token_review
"""
pass
def test_get_api_resources(self):
"""Test case for get_api_resources
"""
pass
if __name__ == '__main__':
unittest.main()
| 21.804348 | 124 | 0.706879 |
4a1d9d44e6ab37c8c363c1cbdd2c378f87bb29bf | 12,371 | py | Python | var/spack/repos/builtin/packages/openssl/package.py | michael-afanasiev/spack | 5b2ea726a0c1cc90f11bc0ae6e1e55bc73d58575 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null | var/spack/repos/builtin/packages/openssl/package.py | michael-afanasiev/spack | 5b2ea726a0c1cc90f11bc0ae6e1e55bc73d58575 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null | var/spack/repos/builtin/packages/openssl/package.py | michael-afanasiev/spack | 5b2ea726a0c1cc90f11bc0ae6e1e55bc73d58575 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null | # Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import os
import re
import llnl.util.tty as tty
from spack import *
class Openssl(Package): # Uses Fake Autotools, should subclass Package
"""OpenSSL is an open source project that provides a robust,
commercial-grade, and full-featured toolkit for the Transport
Layer Security (TLS) and Secure Sockets Layer (SSL) protocols.
It is also a general-purpose cryptography library."""
homepage = "https://www.openssl.org"
# URL must remain http:// so Spack can bootstrap curl
url = "https://www.openssl.org/source/openssl-1.1.1d.tar.gz"
list_url = "https://www.openssl.org/source/old/"
list_depth = 1
tags = ['core-packages']
executables = ['openssl']
version('3.0.1', sha256='c311ad853353bce796edad01a862c50a8a587f62e7e2100ef465ab53ec9b06d1')
version('3.0.0', sha256='59eedfcb46c25214c9bd37ed6078297b4df01d012267fe9e9eee31f61bc70536')
# The latest stable version is the 1.1.1 series. This is also our Long Term
# Support (LTS) version, supported until 11th September 2023.
version('1.1.1m', sha256='f89199be8b23ca45fc7cb9f1d8d3ee67312318286ad030f5316aca6462db6c96', preferred=True)
version('1.1.1l', sha256='0b7a3e5e59c34827fe0c3a74b7ec8baef302b98fa80088d7f9153aa16fa76bd1', deprecated=True)
version('1.1.1k', sha256='892a0875b9872acd04a9fde79b1f943075d5ea162415de3047c327df33fbaee5', deprecated=True)
version('1.1.1j', sha256='aaf2fcb575cdf6491b98ab4829abf78a3dec8402b8b81efc8f23c00d443981bf', deprecated=True)
version('1.1.1i', sha256='e8be6a35fe41d10603c3cc635e93289ed00bf34b79671a3a4de64fcee00d5242', deprecated=True)
version('1.1.1h', sha256='5c9ca8774bd7b03e5784f26ae9e9e6d749c9da2438545077e6b3d755a06595d9', deprecated=True)
version('1.1.1g', sha256='ddb04774f1e32f0c49751e21b67216ac87852ceb056b75209af2443400636d46', deprecated=True)
version('1.1.1f', sha256='186c6bfe6ecfba7a5b48c47f8a1673d0f3b0e5ba2e25602dd23b629975da3f35', deprecated=True)
version('1.1.1e', sha256='694f61ac11cb51c9bf73f54e771ff6022b0327a43bbdfa1b2f19de1662a6dcbe', deprecated=True)
version('1.1.1d', sha256='1e3a91bc1f9dfce01af26026f856e064eab4c8ee0a8f457b5ae30b40b8b711f2', deprecated=True)
version('1.1.1c', sha256='f6fb3079ad15076154eda9413fed42877d668e7069d9b87396d0804fdb3f4c90', deprecated=True)
version('1.1.1b', sha256='5c557b023230413dfb0756f3137a13e6d726838ccd1430888ad15bfb2b43ea4b', deprecated=True)
version('1.1.1a', sha256='fc20130f8b7cbd2fb918b2f14e2f429e109c31ddd0fb38fc5d71d9ffed3f9f41', deprecated=True)
version('1.1.1', sha256='2836875a0f89c03d0fdf483941512613a50cfb421d6fd94b9f41d7279d586a3d', deprecated=True)
# The 1.1.0 series is out of support and should not be used.
version('1.1.0l', sha256='74a2f756c64fd7386a29184dc0344f4831192d61dc2481a93a4c5dd727f41148', deprecated=True)
version('1.1.0k', sha256='efa4965f4f773574d6cbda1cf874dbbe455ab1c0d4f906115f867d30444470b1', deprecated=True)
version('1.1.0j', sha256='31bec6c203ce1a8e93d5994f4ed304c63ccf07676118b6634edded12ad1b3246', deprecated=True)
version('1.1.0i', sha256='ebbfc844a8c8cc0ea5dc10b86c9ce97f401837f3fa08c17b2cdadc118253cf99', deprecated=True)
version('1.1.0g', sha256='de4d501267da39310905cb6dc8c6121f7a2cad45a7707f76df828fe1b85073af', deprecated=True)
version('1.1.0e', sha256='57be8618979d80c910728cfc99369bf97b2a1abd8f366ab6ebdee8975ad3874c', deprecated=True)
version('1.1.0d', sha256='7d5ebb9e89756545c156ff9c13cf2aa6214193b010a468a3bc789c3c28fe60df', deprecated=True)
version('1.1.0c', sha256='fc436441a2e05752d31b4e46115eb89709a28aef96d4fe786abe92409b2fd6f5', deprecated=True)
# The 1.0.2 series is out of support and should not be used.
version('1.0.2u', sha256='ecd0c6ffb493dd06707d38b14bb4d8c2288bb7033735606569d8f90f89669d16', deprecated=True)
version('1.0.2t', sha256='14cb464efe7ac6b54799b34456bd69558a749a4931ecfd9cf9f71d7881cac7bc', deprecated=True)
version('1.0.2s', sha256='cabd5c9492825ce5bd23f3c3aeed6a97f8142f606d893df216411f07d1abab96', deprecated=True)
version('1.0.2r', sha256='ae51d08bba8a83958e894946f15303ff894d75c2b8bbd44a852b64e3fe11d0d6', deprecated=True)
version('1.0.2p', sha256='50a98e07b1a89eb8f6a99477f262df71c6fa7bef77df4dc83025a2845c827d00', deprecated=True)
version('1.0.2o', sha256='ec3f5c9714ba0fd45cb4e087301eb1336c317e0d20b575a125050470e8089e4d', deprecated=True)
version('1.0.2n', sha256='370babb75f278c39e0c50e8c4e7493bc0f18db6867478341a832a982fd15a8fe', deprecated=True)
version('1.0.2m', sha256='8c6ff15ec6b319b50788f42c7abc2890c08ba5a1cdcd3810eb9092deada37b0f', deprecated=True)
version('1.0.2k', sha256='6b3977c61f2aedf0f96367dcfb5c6e578cf37e7b8d913b4ecb6643c3cb88d8c0', deprecated=True)
version('1.0.2j', sha256='e7aff292be21c259c6af26469c7a9b3ba26e9abaaffd325e3dccc9785256c431', deprecated=True)
version('1.0.2i', sha256='9287487d11c9545b6efb287cdb70535d4e9b284dd10d51441d9b9963d000de6f', deprecated=True)
version('1.0.2h', sha256='1d4007e53aad94a5b2002fe045ee7bb0b3d98f1a47f8b2bc851dcd1c74332919', deprecated=True)
version('1.0.2g', sha256='b784b1b3907ce39abf4098702dade6365522a253ad1552e267a9a0e89594aa33', deprecated=True)
version('1.0.2f', sha256='932b4ee4def2b434f85435d9e3e19ca8ba99ce9a065a61524b429a9d5e9b2e9c', deprecated=True)
version('1.0.2e', sha256='e23ccafdb75cfcde782da0151731aa2185195ac745eea3846133f2e05c0e0bff', deprecated=True)
version('1.0.2d', sha256='671c36487785628a703374c652ad2cebea45fa920ae5681515df25d9f2c9a8c8', deprecated=True)
# The 1.0.1 version is out of support and should not be used.
version('1.0.1u', sha256='4312b4ca1215b6f2c97007503d80db80d5157f76f8f7d3febbe6b4c56ff26739', deprecated=True)
version('1.0.1t', sha256='4a6ee491a2fdb22e519c76fdc2a628bb3cec12762cd456861d207996c8a07088', deprecated=True)
version('1.0.1r', sha256='784bd8d355ed01ce98b812f873f8b2313da61df7c7b5677fcf2e57b0863a3346', deprecated=True)
version('1.0.1h', sha256='9d1c8a9836aa63e2c6adb684186cbd4371c9e9dcc01d6e3bb447abf2d4d3d093', deprecated=True)
version('1.0.1e', sha256='f74f15e8c8ff11aa3d5bb5f276d202ec18d7246e95f961db76054199c69c1ae3', deprecated=True)
variant('certs', default='system',
values=('mozilla', 'system', 'none'), multi=False,
description=('Use certificates from the ca-certificates-mozilla '
'package, symlink system certificates, or none'))
variant('docs', default=False, description='Install docs and manpages')
variant('shared', default=True, description='Build shared libraries')
variant('deprecated', default=True, description='Build legacy.so')
depends_on('zlib')
depends_on('[email protected]:', type=('build', 'test'))
depends_on('ca-certificates-mozilla', type=('build', 'run'), when='certs=mozilla')
@classmethod
def determine_version(cls, exe):
output = Executable(exe)('version', output=str, error=str)
match = re.search(r'OpenSSL.(\S+)*', output)
return match.group(1) if match else None
@property
def libs(self):
return find_libraries(
['libssl', 'libcrypto'], root=self.prefix, recursive=True)
def handle_fetch_error(self, error):
tty.warn("Fetching OpenSSL failed. This may indicate that OpenSSL has "
"been updated, and the version in your instance of Spack is "
"insecure. Consider updating to the latest OpenSSL version.")
def install(self, spec, prefix):
# OpenSSL uses a variable APPS in its Makefile. If it happens to be set
# in the environment, then this will override what is set in the
# Makefile, leading to build errors.
env.pop('APPS', None)
if str(spec.target.family) in ('x86_64', 'ppc64'):
# This needs to be done for all 64-bit architectures (except Linux,
# where it happens automatically?)
env['KERNEL_BITS'] = '64'
options = ['zlib']
if spec.satisfies("+shared"):
options.append("shared")
if spec.satisfies("-shared"):
options.append("no-shared")
if spec.satisfies("-pic"):
options.append("no-pic")
if spec.satisfies("-deprecated"):
options.append("no-deprecated")
if spec.satisfies('@1.0'):
options.append('no-krb5')
# clang does not support the .arch directive in assembly files.
if ('clang' in self.compiler.cc or 'nvc' in self.compiler.cc) and \
spec.target.family == 'aarch64':
options.append('no-asm')
# The default glibc provided by CentOS 7 does not provide proper
# atomic support when using the NVIDIA compilers
if self.spec.satisfies('%nvhpc os=centos7'):
options.append('-D__STDC_NO_ATOMICS__')
config = Executable('./config')
config('--prefix=%s' % prefix,
'--openssldir=%s' % join_path(prefix, 'etc', 'openssl'),
'-I{0}'.format(self.spec['zlib'].prefix.include),
'-L{0}'.format(self.spec['zlib'].prefix.lib),
*options)
# Remove non-standard compiler options if present. These options are
# present e.g. on Darwin. They are non-standard, i.e. most compilers
# (e.g. gcc) will not accept them.
filter_file(r'-arch x86_64', '', 'Makefile')
make()
if self.run_tests:
make('test', parallel=False) # 'VERBOSE=1'
install_tgt = 'install' if self.spec.satisfies('+docs') else 'install_sw'
# See https://github.com/openssl/openssl/issues/7466#issuecomment-432148137
make(install_tgt, parallel=False)
@run_after('install')
def link_system_certs(self):
if self.spec.variants['certs'].value != 'system':
return
system_dirs = [
# CentOS, Fedora, RHEL
'/etc/pki/tls',
# Ubuntu
'/usr/lib/ssl',
# OpenSUSE
'/etc/ssl'
]
pkg_dir = join_path(self.prefix, 'etc', 'openssl')
mkdirp(pkg_dir)
for directory in system_dirs:
# Link configuration file
sys_conf = join_path(directory, 'openssl.cnf')
pkg_conf = join_path(pkg_dir, 'openssl.cnf')
if os.path.exists(sys_conf) and not os.path.exists(pkg_conf):
os.symlink(sys_conf, pkg_conf)
sys_cert = join_path(directory, 'cert.pem')
pkg_cert = join_path(pkg_dir, 'cert.pem')
# If a bundle exists, use it. This is the preferred way on Fedora,
# where the certs directory does not work.
if os.path.exists(sys_cert) and not os.path.exists(pkg_cert):
os.symlink(sys_cert, pkg_cert)
sys_certs = join_path(directory, 'certs')
pkg_certs = join_path(pkg_dir, 'certs')
# If the certs directory exists, symlink it into the package.
# We symlink the whole directory instead of all files because
# the directory contents might change without Spack noticing.
if os.path.isdir(sys_certs) and not os.path.islink(pkg_certs):
if os.path.isdir(pkg_certs):
os.rmdir(pkg_certs)
os.symlink(sys_certs, pkg_certs)
@run_after('install')
def link_mozilla_certs(self):
if self.spec.variants['certs'].value != 'mozilla':
return
pkg_dir = join_path(self.prefix, 'etc', 'openssl')
mkdirp(pkg_dir)
mozilla_pem = self.spec['ca-certificates-mozilla'].pem_path
pkg_cert = join_path(pkg_dir, 'cert.pem')
if not os.path.exists(pkg_cert):
os.symlink(mozilla_pem, pkg_cert)
def patch(self):
if self.spec.satisfies('%nvhpc'):
# Remove incompatible preprocessor flags
filter_file('-MF ', '',
'Configurations/unix-Makefile.tmpl', string=True)
filter_file(r'-MT \$\@ ', '',
'Configurations/unix-Makefile.tmpl', string=True)
def setup_build_environment(self, env):
env.set('PERL', self.spec['perl'].prefix.bin.perl)
| 53.323276 | 113 | 0.702368 |
4a1d9da76bf73a25deec856f5cad0d495c6935cc | 985 | py | Python | project_euler/problem_25/sol3.py | JB1959/Python | b6ca263983933c3ecc06ed0083dd11b6faf870c8 | [
"MIT"
] | 14 | 2020-10-03T05:43:48.000Z | 2021-11-01T21:02:26.000Z | project_euler/problem_25/sol3.py | JB1959/Python | b6ca263983933c3ecc06ed0083dd11b6faf870c8 | [
"MIT"
] | 8 | 2020-03-24T17:47:23.000Z | 2022-03-12T00:33:21.000Z | project_euler/problem_25/sol3.py | JB1959/Python | b6ca263983933c3ecc06ed0083dd11b6faf870c8 | [
"MIT"
] | 12 | 2020-10-03T05:44:19.000Z | 2022-01-16T05:37:54.000Z | """
The Fibonacci sequence is defined by the recurrence relation:
Fn = Fn−1 + Fn−2, where F1 = 1 and F2 = 1.
Hence the first 12 terms will be:
F1 = 1
F2 = 1
F3 = 2
F4 = 3
F5 = 5
F6 = 8
F7 = 13
F8 = 21
F9 = 34
F10 = 55
F11 = 89
F12 = 144
The 12th term, F12, is the first term to contain three digits.
What is the index of the first term in the Fibonacci sequence to contain 1000
digits?
"""
def solution(n):
"""Returns the index of the first term in the Fibonacci sequence to contain
n digits.
>>> solution(1000)
4782
>>> solution(100)
476
>>> solution(50)
237
>>> solution(3)
12
"""
f1, f2 = 1, 1
index = 2
while True:
i = 0
f = f1 + f2
f1, f2 = f2, f
index += 1
for j in str(f):
i += 1
if i == n:
break
return index
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 17.280702 | 79 | 0.523858 |
4a1d9df5d6224b85751c3f1794044588a812f920 | 4,502 | py | Python | pubscience/translate/custom_translate_google.py | bramiozo/PubScience | 78b6a9ef81ea8764b2ed0adaddee4c1185d36fe5 | [
"Apache-2.0"
] | null | null | null | pubscience/translate/custom_translate_google.py | bramiozo/PubScience | 78b6a9ef81ea8764b2ed0adaddee4c1185d36fe5 | [
"Apache-2.0"
] | null | null | null | pubscience/translate/custom_translate_google.py | bramiozo/PubScience | 78b6a9ef81ea8764b2ed0adaddee4c1185d36fe5 | [
"Apache-2.0"
] | null | null | null | import six
from google.cloud import translate
import argparse
import benedict
import re
import os
from collections import deque
class Translate():
def __init__(self,
input_loc = None,
output_loc = None,
glos_loc = None,
source_lang='en',
dest_lang='nl',
method='public',
batch_size=50,
write_mode = 'w',
public_key_file=None,
config_file='./config/settings.yaml'):
'''
Batchsize in number of lines, batching is used to reduce the number of API calls
glos_loc, location in google cloud, projects/locations/glossaries/*.
'''
assert input_loc is not None, "Input location should be provided"
assert output_loc is not None, "Output location should be provided"
self.input_loc = input_loc
self.output_loc = output_loc
self.glos_loc = glos_loc
if os.path.exists(config_file):
if os.path.isfile(config_file):
self.source_lang = source_lang
self.dest_lang = dest_lang
self.method = method
self.batch_size = batch_size
self.write_mode = write_mode
self.config_file = config_file
else:
params = benedict(config_file, format='yaml')
for k,v in params['translation'].items():
setattr(self, k, v)
if self.method == 'public':
assert isinstance(public_key_file, str), "Public key file should be the location of a .json/.p12/.pem file"
if os.path.exists(public_key_file):
if os.path.isfile(public_key_file):
self.translate_client = translate.TranslationServiceClient(keyFilename=public_key_file)
if self.glos_loc is not None:
glossary_config = self.translate_client.TranslateTextGlossaryConfig(glossary=self.glos_loc)
def translate_text_google(self,txt):
"""Translates text into the target language.
Target must be an ISO 639-1 language code.
See https://g.co/cloud/translate/v2/translate-reference#supported_languages
For more information on setting up the google translation API:
* https://cloud.google.com/translate/docs/setup
* https://cloud.google.com/translate/docs/advanced/glossary
* https://cloud.google.com/translate/docs/samples/translate-v3-create-glossary
"""
if isinstance(txt, six.binary_type):
text = txt.decode("utf-8")
result = self.translate_client.translate(txt, target_language_code='nl', source_language_code='en')
return result['translatedText']
def translate_text_nmt(txt):
# https://github.com/UKPLab/EasyNMT
return True
def _reader(self):
# TODO: check if self.input_loc is a file
batch = deque(['' for i range(self.batch_size)], maxlen=self.batch_size)
with open(self.input_loc, 'r') as reader:
for cdx, line in enumerate(reader.readlines()):
batch.appendleft(line)
if (cdx+1)%batch_size==0:
yield "\n".join(batch)
def translate(self):
with open(self.output_loc, self.write_mode) as w:
for b in self._reader():
w.write(self.translate_text_google(b))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Processing input for the cleaning routine')
parser.add_argument('--in', dest='file_loc', help='Absolute input-file location', type=str)
parser.add_argument('--out', dest='out_loc', help='Absolute output-file location', type=str)
parser.add_argument('--glossary', dest='glos_loc', help='Glossary to enforce translation pairs', type=str)
parser.add_argument('--config', dest='config_loc', help='Absolute config-file location',
type=str, default='config/settings.yaml')
parser.add_argument('--pubkey', dest='pub_key_file', help='Absolute credentials-file location',
type=str, default=None)
args = parser.parse_args()
translator = Translate(input_loc=args.file_loc,
output_loc=args.out_loc,
config_file=args.config_loc,
public_key_file=args.public_key_file
) | 43.288462 | 119 | 0.607286 |
4a1d9ea71ddbc9275e8d03da01d80ba1392c0776 | 20,468 | py | Python | examples/terran/mass_reaper.py | Talv/python-sc2 | 436805aad8ae37998f10827dddee1bde9ea7885f | [
"MIT"
] | null | null | null | examples/terran/mass_reaper.py | Talv/python-sc2 | 436805aad8ae37998f10827dddee1bde9ea7885f | [
"MIT"
] | null | null | null | examples/terran/mass_reaper.py | Talv/python-sc2 | 436805aad8ae37998f10827dddee1bde9ea7885f | [
"MIT"
] | null | null | null | """
Bot that stays on 1base, goes 4 rax mass reaper
This bot is one of the first examples that are micro intensive
Bot has a chance to win against elite (=Difficulty.VeryHard) zerg AI
Bot made by Burny
"""
import random
import sc2
from sc2 import Race, Difficulty
from sc2.constants import *
from sc2.position import Point2, Point3
from sc2.unit import Unit
from sc2.player import Bot, Computer
from sc2.player import Human
from sc2.ids.unit_typeid import UnitTypeId
from sc2.ids.ability_id import AbilityId
from sc2.units import Units
class MassReaperBot(sc2.BotAI):
def __init__(self):
# Select distance calculation method 0, which is the pure python distance calculation without caching or indexing, using math.hypot(), for more info see distances.py _distances_override_functions() function
self.distance_calculation_method = 0
async def on_step(self, iteration):
# Benchmark and print duration time of the on_step method based on "self.distance_calculation_method" value
# print(self.time_formatted, self.supply_used, self.step_time[1])
"""
- depots when low on remaining supply
- townhalls contains commandcenter and orbitalcommand
- self.units(TYPE).not_ready.amount selects all units of that type, filters incomplete units, and then counts the amount
- self.already_pending(TYPE) counts how many units are queued
"""
if (
self.supply_left < 5
and self.townhalls
and self.supply_used >= 14
and self.can_afford(UnitTypeId.SUPPLYDEPOT)
and self.already_pending(UnitTypeId.SUPPLYDEPOT) < 1
):
workers = self.workers.gathering
# If workers were found
if workers:
worker = workers.furthest_to(workers.center)
location = await self.find_placement(UnitTypeId.SUPPLYDEPOT, worker.position, placement_step=3)
# If a placement location was found
if location:
# Order worker to build exactly on that location
self.do(worker.build(UnitTypeId.SUPPLYDEPOT, location), subtract_cost=True)
# Lower all depots when finished
for depot in self.structures(UnitTypeId.SUPPLYDEPOT).ready:
self.do(depot(AbilityId.MORPH_SUPPLYDEPOT_LOWER))
# Morph commandcenter to orbitalcommand
# Check if tech requirement for orbital is complete (e.g. you need a barracks to be able to morph an orbital)
orbital_tech_requirement: float = self.tech_requirement_progress(UnitTypeId.ORBITALCOMMAND)
if orbital_tech_requirement == 1:
# Loop over all idle command centers (CCs that are not building SCVs or morphing to orbital)
for cc in self.townhalls(UnitTypeId.COMMANDCENTER).idle:
# Check if we have 150 minerals; this used to be an issue when the API returned 550 (value) of the orbital, but we only wanted the 150 minerals morph cost
if self.can_afford(UnitTypeId.ORBITALCOMMAND):
self.do(cc(AbilityId.UPGRADETOORBITAL_ORBITALCOMMAND), subtract_cost=True)
# Expand if we can afford (400 minerals) and have less than 2 bases
if (
1 <= self.townhalls.amount < 2
and self.already_pending(UnitTypeId.COMMANDCENTER) == 0
and self.can_afford(UnitTypeId.COMMANDCENTER)
):
# get_next_expansion returns the position of the next possible expansion location where you can place a command center
location = await self.get_next_expansion()
if location:
# Now we "select" (or choose) the nearest worker to that found location
worker = self.select_build_worker(location)
if worker and self.can_afford(UnitTypeId.COMMANDCENTER):
# The worker will be commanded to build the command center
self.do(worker.build(UnitTypeId.COMMANDCENTER, location), subtract_cost=True)
# Build up to 4 barracks if we can afford them
# Check if we have a supply depot (tech requirement) before trying to make barracks
barracks_tech_requirement: float = self.tech_requirement_progress(UnitTypeId.BARRACKS)
if (
barracks_tech_requirement == 1
# self.structures.of_type(
# [UnitTypeId.SUPPLYDEPOT, UnitTypeId.SUPPLYDEPOTLOWERED, UnitTypeId.SUPPLYDEPOTDROP]
# ).ready
and self.structures(UnitTypeId.BARRACKS).ready.amount + self.already_pending(UnitTypeId.BARRACKS) < 4
and self.can_afford(UnitTypeId.BARRACKS)
):
workers = self.workers.gathering
if (
workers and self.townhalls
): # need to check if townhalls.amount > 0 because placement is based on townhall location
worker = workers.furthest_to(workers.center)
# I chose placement_step 4 here so there will be gaps between barracks hopefully
location = await self.find_placement(UnitTypeId.BARRACKS, self.townhalls.random.position, placement_step=4)
if location:
self.do(worker.build(UnitTypeId.BARRACKS, location), subtract_cost=True)
# Build refineries (on nearby vespene) when at least one barracks is in construction
if (
self.structures(UnitTypeId.BARRACKS).ready.amount + self.already_pending(UnitTypeId.BARRACKS) > 0
and self.already_pending(UnitTypeId.REFINERY) < 1
):
# Loop over all townhalls that are 100% complete
for th in self.townhalls.ready:
# Find all vespene geysers that are closer than range 10 to this townhall
vgs = self.vespene_geyser.closer_than(10, th)
for vg in vgs:
if await self.can_place(UnitTypeId.REFINERY, vg.position) and self.can_afford(UnitTypeId.REFINERY):
workers = self.workers.gathering
if workers: # same condition as above
worker = workers.closest_to(vg)
# Caution: the target for the refinery has to be the vespene geyser, not its position!
self.do(worker.build(UnitTypeId.REFINERY, vg), subtract_cost=True)
# Dont build more than one each frame
break
# Make scvs until 22, usually you only need 1:1 mineral:gas ratio for reapers, but if you don't lose any then you will need additional depots (mule income should take care of that)
# Stop scv production when barracks is complete but we still have a command center (priotize morphing to orbital command)
if (
self.can_afford(UnitTypeId.SCV)
and self.supply_left > 0
and self.supply_workers < 22
and (
self.structures(UnitTypeId.BARRACKS).ready.amount < 1
and self.townhalls(UnitTypeId.COMMANDCENTER).idle
or self.townhalls(UnitTypeId.ORBITALCOMMAND).idle
)
):
for th in self.townhalls.idle:
self.do(th.train(UnitTypeId.SCV), subtract_cost=True, subtract_supply=True)
# Make reapers if we can afford them and we have supply remaining
if self.supply_left > 0:
# Loop through all idle barracks
for rax in self.structures(UnitTypeId.BARRACKS).idle:
if self.can_afford(UnitTypeId.REAPER):
self.do(rax.train(UnitTypeId.REAPER), subtract_cost=True, subtract_supply=True)
# Send workers to mine from gas
if iteration % 25 == 0:
await self.distribute_workers()
# Reaper micro
enemies = self.enemy_units | self.enemy_structures
enemies_can_attack = enemies.filter(lambda unit: unit.can_attack_ground)
for r in self.units(UnitTypeId.REAPER):
# Move to range 15 of closest unit if reaper is below 20 hp and not regenerating
enemyThreatsClose = enemies_can_attack.filter(
lambda unit: unit.distance_to(r) < 15
) # Threats that can attack the reaper
if r.health_percentage < 2 / 5 and enemyThreatsClose:
retreatPoints = self.neighbors8(r.position, distance=2) | self.neighbors8(r.position, distance=4)
# Filter points that are pathable
retreatPoints = {x for x in retreatPoints if self.in_pathing_grid(x)}
if retreatPoints:
closestEnemy = enemyThreatsClose.closest_to(r)
retreatPoint = closestEnemy.position.furthest(retreatPoints)
self.do(r.move(retreatPoint))
continue # Continue for loop, dont execute any of the following
# Reaper is ready to attack, shoot nearest ground unit
enemyGroundUnits = enemies.filter(
lambda unit: unit.distance_to(r) < 5 and not unit.is_flying
) # Hardcoded attackrange of 5
if r.weapon_cooldown == 0 and enemyGroundUnits:
enemyGroundUnits = enemyGroundUnits.sorted(lambda x: x.distance_to(r))
closestEnemy = enemyGroundUnits[0]
self.do(r.attack(closestEnemy))
continue # Continue for loop, dont execute any of the following
# Attack is on cooldown, check if grenade is on cooldown, if not then throw it to furthest enemy in range 5
reaperGrenadeRange = self._game_data.abilities[AbilityId.KD8CHARGE_KD8CHARGE.value]._proto.cast_range
enemyGroundUnitsInGrenadeRange = enemies_can_attack.filter(
lambda unit: not unit.is_structure
and not unit.is_flying
and unit.type_id not in {UnitTypeId.LARVA, UnitTypeId.EGG}
and unit.distance_to(r) < reaperGrenadeRange
)
if enemyGroundUnitsInGrenadeRange and (r.is_attacking or r.is_moving):
# If AbilityId.KD8CHARGE_KD8CHARGE in abilities, we check that to see if the reaper grenade is off cooldown
abilities = await self.get_available_abilities(r)
enemyGroundUnitsInGrenadeRange = enemyGroundUnitsInGrenadeRange.sorted(
lambda x: x.distance_to(r), reverse=True
)
furthestEnemy = None
for enemy in enemyGroundUnitsInGrenadeRange:
if await self.can_cast(r, AbilityId.KD8CHARGE_KD8CHARGE, enemy, cached_abilities_of_unit=abilities):
furthestEnemy = enemy
break
if furthestEnemy:
self.do(r(AbilityId.KD8CHARGE_KD8CHARGE, furthestEnemy))
continue # Continue for loop, don't execute any of the following
# Move to max unit range if enemy is closer than 4
enemyThreatsVeryClose = enemies.filter(
lambda unit: unit.can_attack_ground and unit.distance_to(r) < 4.5
) # Hardcoded attackrange minus 0.5
# Threats that can attack the reaper
if r.weapon_cooldown != 0 and enemyThreatsVeryClose:
retreatPoints = self.neighbors8(r.position, distance=2) | self.neighbors8(r.position, distance=4)
# Filter points that are pathable by a reaper
retreatPoints = {x for x in retreatPoints if self.in_pathing_grid(x)}
if retreatPoints:
closestEnemy = enemyThreatsVeryClose.closest_to(r)
retreatPoint = max(retreatPoints, key=lambda x: x.distance_to(closestEnemy) - x.distance_to(r))
self.do(r.move(retreatPoint))
continue # Continue for loop, don't execute any of the following
# Move to nearest enemy ground unit/building because no enemy unit is closer than 5
allEnemyGroundUnits = self.enemy_units.not_flying
if allEnemyGroundUnits:
closestEnemy = allEnemyGroundUnits.closest_to(r)
self.do(r.move(closestEnemy))
continue # Continue for loop, don't execute any of the following
# Move to random enemy start location if no enemy buildings have been seen
self.do(r.move(random.choice(self.enemy_start_locations)))
# Manage idle scvs, would be taken care by distribute workers aswell
if self.townhalls:
for w in self.workers.idle:
th = self.townhalls.closest_to(w)
mfs = self.mineral_field.closer_than(10, th)
if mfs:
mf = mfs.closest_to(w)
self.do(w.gather(mf))
# Manage orbital energy and drop mules
for oc in self.townhalls(UnitTypeId.ORBITALCOMMAND).filter(lambda x: x.energy >= 50):
mfs = self.mineral_field.closer_than(10, oc)
if mfs:
mf = max(mfs, key=lambda x: x.mineral_contents)
self.do(oc(AbilityId.CALLDOWNMULE_CALLDOWNMULE, mf))
# When running out of mineral fields near command center, fly to next base with minerals
# Helper functions
# Stolen and modified from position.py
def neighbors4(self, position, distance=1):
p = position
d = distance
return {Point2((p.x - d, p.y)), Point2((p.x + d, p.y)), Point2((p.x, p.y - d)), Point2((p.x, p.y + d))}
# Stolen and modified from position.py
def neighbors8(self, position, distance=1):
p = position
d = distance
return self.neighbors4(position, distance) | {
Point2((p.x - d, p.y - d)),
Point2((p.x - d, p.y + d)),
Point2((p.x + d, p.y - d)),
Point2((p.x + d, p.y + d)),
}
# Distribute workers function rewritten, the default distribute_workers() function did not saturate gas quickly enough
async def distribute_workers(self, performanceHeavy=True, onlySaturateGas=False):
mineralTags = [x.tag for x in self.mineral_field]
gas_buildingTags = [x.tag for x in self.gas_buildings]
workerPool = Units([], self)
workerPoolTags = set()
# Find all gas_buildings that have surplus or deficit
deficit_gas_buildings = {}
surplusgas_buildings = {}
for g in self.gas_buildings.filter(lambda x: x.vespene_contents > 0):
# Only loop over gas_buildings that have still gas in them
deficit = g.ideal_harvesters - g.assigned_harvesters
if deficit > 0:
deficit_gas_buildings[g.tag] = {"unit": g, "deficit": deficit}
elif deficit < 0:
surplusWorkers = self.workers.closer_than(10, g).filter(
lambda w: w not in workerPoolTags
and len(w.orders) == 1
and w.orders[0].ability.id in [AbilityId.HARVEST_GATHER]
and w.orders[0].target in gas_buildingTags
)
for i in range(-deficit):
if surplusWorkers.amount > 0:
w = surplusWorkers.pop()
workerPool.append(w)
workerPoolTags.add(w.tag)
surplusgas_buildings[g.tag] = {"unit": g, "deficit": deficit}
# Find all townhalls that have surplus or deficit
deficitTownhalls = {}
surplusTownhalls = {}
if not onlySaturateGas:
for th in self.townhalls:
deficit = th.ideal_harvesters - th.assigned_harvesters
if deficit > 0:
deficitTownhalls[th.tag] = {"unit": th, "deficit": deficit}
elif deficit < 0:
surplusWorkers = self.workers.closer_than(10, th).filter(
lambda w: w.tag not in workerPoolTags
and len(w.orders) == 1
and w.orders[0].ability.id in [AbilityId.HARVEST_GATHER]
and w.orders[0].target in mineralTags
)
# workerPool.extend(surplusWorkers)
for i in range(-deficit):
if surplusWorkers.amount > 0:
w = surplusWorkers.pop()
workerPool.append(w)
workerPoolTags.add(w.tag)
surplusTownhalls[th.tag] = {"unit": th, "deficit": deficit}
if all(
[
len(deficit_gas_buildings) == 0,
len(surplusgas_buildings) == 0,
len(surplusTownhalls) == 0 or deficitTownhalls == 0,
]
):
# Cancel early if there is nothing to balance
return
# Check if deficit in gas less or equal than what we have in surplus, else grab some more workers from surplus bases
deficitGasCount = sum(
gasInfo["deficit"] for gasTag, gasInfo in deficit_gas_buildings.items() if gasInfo["deficit"] > 0
)
surplusCount = sum(
-gasInfo["deficit"] for gasTag, gasInfo in surplusgas_buildings.items() if gasInfo["deficit"] < 0
)
surplusCount += sum(-thInfo["deficit"] for thTag, thInfo in surplusTownhalls.items() if thInfo["deficit"] < 0)
if deficitGasCount - surplusCount > 0:
# Grab workers near the gas who are mining minerals
for gTag, gInfo in deficit_gas_buildings.items():
if workerPool.amount >= deficitGasCount:
break
workersNearGas = self.workers.closer_than(10, gInfo["unit"]).filter(
lambda w: w.tag not in workerPoolTags
and len(w.orders) == 1
and w.orders[0].ability.id in [AbilityId.HARVEST_GATHER]
and w.orders[0].target in mineralTags
)
while workersNearGas.amount > 0 and workerPool.amount < deficitGasCount:
w = workersNearGas.pop()
workerPool.append(w)
workerPoolTags.add(w.tag)
# Now we should have enough workers in the pool to saturate all gases, and if there are workers left over, make them mine at townhalls that have mineral workers deficit
for gTag, gInfo in deficit_gas_buildings.items():
if performanceHeavy:
# Sort furthest away to closest (as the pop() function will take the last element)
workerPool.sort(key=lambda x: x.distance_to(gInfo["unit"]), reverse=True)
for i in range(gInfo["deficit"]):
if workerPool.amount > 0:
w = workerPool.pop()
if len(w.orders) == 1 and w.orders[0].ability.id in [AbilityId.HARVEST_RETURN]:
self.do(w.gather(gInfo["unit"], queue=True))
else:
self.do(w.gather(gInfo["unit"]))
if not onlySaturateGas:
# If we now have left over workers, make them mine at bases with deficit in mineral workers
for thTag, thInfo in deficitTownhalls.items():
if performanceHeavy:
# Sort furthest away to closest (as the pop() function will take the last element)
workerPool.sort(key=lambda x: x.distance_to(thInfo["unit"]), reverse=True)
for i in range(thInfo["deficit"]):
if workerPool.amount > 0:
w = workerPool.pop()
mf = self.mineral_field.closer_than(10, thInfo["unit"]).closest_to(w)
if len(w.orders) == 1 and w.orders[0].ability.id in [AbilityId.HARVEST_RETURN]:
self.do(w.gather(mf, queue=True))
else:
self.do(w.gather(mf))
def main():
# Multiple difficulties for enemy bots available https://github.com/Blizzard/s2client-api/blob/ce2b3c5ac5d0c85ede96cef38ee7ee55714eeb2f/include/sc2api/sc2_gametypes.h#L30
sc2.run_game(
sc2.maps.get("(2)CatalystLE"),
[Bot(Race.Terran, MassReaperBot()), Computer(Race.Zerg, Difficulty.VeryHard)],
realtime=False,
)
if __name__ == "__main__":
main()
| 51.949239 | 214 | 0.605775 |
4a1d9f0cc485e3b84001b782f5b11851b7fedfa4 | 305 | py | Python | python/rank4.py | berquist/eg | 4c368b12eaaffcf0af8032f10348cf8bc1c3957a | [
"Unlicense"
] | null | null | null | python/rank4.py | berquist/eg | 4c368b12eaaffcf0af8032f10348cf8bc1c3957a | [
"Unlicense"
] | null | null | null | python/rank4.py | berquist/eg | 4c368b12eaaffcf0af8032f10348cf8bc1c3957a | [
"Unlicense"
] | null | null | null | def rank4_simple(a, b):
assert a.shape == b.shape
da, db, dc, dd = a.shape
s = 0
for iia in range(da):
for iib in range(db):
for iic in range(dc):
for iid in range(dd):
s += a[iia, iib, iic, iid] * b[iia, iib, iic, iid]
return s
| 27.727273 | 70 | 0.47541 |
4a1d9f4099b1d0a40dd713ddbd8c1bb9623fba3e | 878 | py | Python | scripts/clean_lib.py | stajichlab/localizaTE | 31dd4fa22b2a38703855f7a86595f68547298048 | [
"MIT"
] | 1 | 2017-09-13T14:27:37.000Z | 2017-09-13T14:27:37.000Z | scripts/clean_lib.py | stajichlab/localizaTE | 31dd4fa22b2a38703855f7a86595f68547298048 | [
"MIT"
] | null | null | null | scripts/clean_lib.py | stajichlab/localizaTE | 31dd4fa22b2a38703855f7a86595f68547298048 | [
"MIT"
] | null | null | null | #######################################
#script for cleaning headers
##########################################
lib=open('all_TEs_cryne', 'r') # change here the input filename when necessary
for line in lib:
line=line.strip()
if ';seqs=' in line:
linef=line.strip()
linef=linef.split(';seqs')
linea=linef[0]
linea=linea.strip()
if 'centroid=' in linea:
item=linea.replace('centroid=', '')
print str(item)
if 'RepeatScout' in linea:
line=linea.split(' (')
print str(line[0])
elif 'Recon' in linea:
line2=linea.split(' (')
print str(line2[0])
elif 'RepeatScout' in line:
line=line.split(' (')
print str(line[0])
elif 'Recon' in line:
line2=line.split(' (')
print str(line2[0])
elif 'MITE_' in line:
lineg=str(line)
lineg=line.strip()
lineg=line.split(' ')
linea2=lineg[0]
print linea2
else:
print line
| 21.95 | 87 | 0.57631 |
4a1da03fc1060a1c19c78a7f990f45c47ae310f2 | 6,365 | py | Python | common/src/autogluon/common/features/infer_types.py | Kota28/autogluon | 83e18a3d3764ba89efa843c6af71164d4ae09b7f | [
"Apache-2.0"
] | 1 | 2022-02-19T13:22:26.000Z | 2022-02-19T13:22:26.000Z | common/src/autogluon/common/features/infer_types.py | Kota28/autogluon | 83e18a3d3764ba89efa843c6af71164d4ae09b7f | [
"Apache-2.0"
] | 3 | 2021-12-30T20:28:01.000Z | 2022-02-09T20:19:21.000Z | common/src/autogluon/common/features/infer_types.py | engsarah365/autogluon | bdbaac2d13d14d075b7aa751561f0bbd39927789 | [
"Apache-2.0"
] | null | null | null | import logging
from collections import defaultdict
from typing import List
import numpy as np
import pandas as pd
from pandas import DataFrame, Series
logger = logging.getLogger(__name__)
def get_type_family_raw(dtype) -> str:
"""From dtype, gets the dtype family."""
try:
if isinstance(dtype, pd.SparseDtype):
dtype = dtype.subtype
if dtype.name == 'category':
return 'category'
if 'datetime' in dtype.name:
return 'datetime'
if 'string' in dtype.name:
return 'object'
elif np.issubdtype(dtype, np.integer):
return 'int'
elif np.issubdtype(dtype, np.floating):
return 'float'
except Exception as err:
logger.error(f'Warning: dtype {dtype} is not recognized as a valid dtype by numpy! AutoGluon may incorrectly handle this feature...')
logger.error(err)
if dtype.name in ['bool', 'bool_']:
return 'bool'
elif dtype.name in ['str', 'string', 'object']:
return 'object'
else:
return dtype.name
# Real dtypes
def get_type_map_real(df: DataFrame) -> dict:
features_types = df.dtypes.to_dict()
return {k: v.name for k, v in features_types.items()}
# Raw dtypes (Real dtypes family)
def get_type_map_raw(df: DataFrame) -> dict:
features_types = df.dtypes.to_dict()
return {k: get_type_family_raw(v) for k, v in features_types.items()}
def get_type_map_special(X: DataFrame) -> dict:
type_map_special = {}
for column in X:
types_special = get_types_special(X[column])
if types_special:
type_map_special[column] = types_special
return type_map_special
def get_types_special(X: Series) -> List[str]:
types_special = []
if isinstance(X.dtype, pd.SparseDtype):
types_special.append('sparse')
if check_if_datetime_as_object_feature(X):
types_special.append('datetime_as_object')
elif check_if_nlp_feature(X):
types_special.append('text')
return types_special
def get_type_group_map(type_map: dict) -> defaultdict:
type_group_map = defaultdict(list)
for key, val in type_map.items():
if isinstance(val, list):
for feature_type in val:
type_group_map[feature_type].append(key)
else:
type_group_map[val].append(key)
return type_group_map
def get_type_group_map_real(df: DataFrame) -> defaultdict:
type_map_real = get_type_map_real(df)
return get_type_group_map(type_map_real)
def get_type_group_map_raw(df: DataFrame) -> defaultdict:
type_map_raw = get_type_map_raw(df)
return get_type_group_map(type_map_raw)
def get_type_group_map_special(df: DataFrame) -> defaultdict:
type_map_special = get_type_map_special(df)
return get_type_group_map(type_map_special)
# TODO: Expand to int64 -> date features (milli from epoch etc)
# TODO: This takes a surprisingly long time to run, ~30 seconds a laptop for 50,000 rows of datetime_as_object for a single column. Try to optimize.
def check_if_datetime_as_object_feature(X: Series) -> bool:
type_family = get_type_family_raw(X.dtype)
# TODO: Check if low numeric numbers, could be categorical encoding!
# TODO: If low numeric, potentially it is just numeric instead of date
if X.isnull().all():
return False
if type_family != 'object': # TODO: seconds from epoch support
return False
try:
# TODO: pd.Series(['20170204','20170205','20170206']) is incorrectly not detected as datetime_as_object
# But we don't want pd.Series(['184','822828','20170206']) to be detected as datetime_as_object
# Need some smart logic (check min/max values?, check last 2 values don't go >31?)
pd.to_numeric(X)
except:
try:
if len(X) > 500:
# Sample to speed-up type inference
X = X.sample(n=500, random_state=0)
result = pd.to_datetime(X, errors='coerce')
if result.isnull().mean() > 0.8: # If over 80% of the rows are NaN
return False
return True
except:
return False
else:
return False
def check_if_nlp_feature(X: Series) -> bool:
type_family = get_type_family_raw(X.dtype)
if type_family != 'object':
return False
if len(X) > 5000:
# Sample to speed-up type inference
X = X.sample(n=5000, random_state=0)
X_unique = X.unique()
num_unique = len(X_unique)
num_rows = len(X)
unique_ratio = num_unique / num_rows
if unique_ratio <= 0.01:
return False
try:
avg_words = Series(X_unique).str.split().str.len().mean()
except AttributeError:
return False
if avg_words < 3:
return False
return True
def get_bool_true_val(series: pd.Series):
"""
From a pandas series, get the replace_val to convert to boolean when calling:
series_bool = series == replace_val
Therefore, any value other than `replace_val` will be set to `False` when converting to boolean.
series must have exactly 2 unique values
We make the assumption that the value chosen as `True` between the two options is mostly arbitrary, with the exception that np.nan will not be considered `True`.
When possible, we try to sort the values so that (0, 1) will choose 1 as True, however this decision should ideally not impact downstream models much.
Any new unseen values (including nan) at inference time will be mapped to `False` automatically.
In this code, 0 and 0.0 (int and float) are treated as the same value. Similarly with any other integer and float (such as 1 and 1.0).
"""
# This is a safety net in case the unique types are mixed (such as string and int). In this scenario, an exception is raised and therefore we use the unsorted values.
try:
uniques = series.unique()
# Sort the values to avoid relying on row-order when determining which value is mapped to `True`.
uniques.sort()
except:
uniques = series.unique()
replace_val = uniques[1]
try:
# This is to ensure that we don't map np.nan to `True` in the boolean.
is_nan = np.isnan(replace_val)
except:
is_nan = False
if is_nan:
replace_val = uniques[0]
return replace_val
| 34.972527 | 170 | 0.666457 |
4a1da18a8a855097d76925fc7f9d8f798b1efae8 | 6,803 | py | Python | path_planning/helpers/geometry.py | omuryildirim/vecihi | 76de1ea31954b74d4107b027ed1fcc30532a61bc | [
"MIT"
] | null | null | null | path_planning/helpers/geometry.py | omuryildirim/vecihi | 76de1ea31954b74d4107b027ed1fcc30532a61bc | [
"MIT"
] | null | null | null | path_planning/helpers/geometry.py | omuryildirim/vecihi | 76de1ea31954b74d4107b027ed1fcc30532a61bc | [
"MIT"
] | null | null | null | # Helpful geometry functions
# Author -- Shikhar Dev Gupta
from math import *
from random import *
# Define a point class
from urllib3.connectionpool import xrange
class point:
def __init__(self, x, y, obstacle=-1, test=-1):
self.x = x
self.y = y
self.obstacle = obstacle
# Kind of a radioactive tracker! See where your point came from
self.test = test
def __str__(self):
return ("x = " + str(self.x) + ", y = " + str(self.y) + ", obs = " + str(self.obstacle) + " and test:" + str(
self.test))
# Are the two points the same
def equals(self, other):
if (self.x == other.x and self.y == other.y):
return True
else:
return False
# Return index of a point from a list of points
def find_point(self, point_list):
for i in range(len(point_list)):
if (self.x == point_list[i].x and self.y == point_list[i].y):
return i
return -1
# Euclidean distance between two points
def find_dist(self, pt2):
return int(sqrt(pow((self.x - pt2.x), 2) + pow(self.y - pt2.y, 2)))
# Check if the point is inside an obstacle
def inside_polygon(self, obstacles):
for polygon in obstacles:
x = self.x
y = self.y
points = []
for i in polygon:
points.append([i.x, i.y])
n = len(points)
inside = False
p1x, p1y = points[0]
for i in range(1, n + 1):
p2x, p2y = points[i % n]
if y > min(p1y, p2y):
if y <= max(p1y, p2y):
if x <= max(p1x, p2x):
if p1y != p2y:
xinters = (y - p1y) * (p2x - p1x) / (p2y - p1y) + p1x
if p1x == p2x or x <= xinters:
inside = not inside
p1x, p1y = p2x, p2y
print(points)
if (inside is True):
return True
return False
# Find the point closest to a given point
def find_closest_point(self, list_of_vertices):
min_dist = 99999999
min_index = -1
for index, i in enumerate(list_of_vertices):
dist = find_dist(self, i)
if (dist < min_dist):
min_dist = dist
min_index = index
return min_index
# General procedures---------------------------------------
# Pull a random point from a given range
def random_point(x_range, y_range):
return point(randint(x_range[0], x_range[1]), randint(y_range[0], y_range[1]))
# See if three points are counter-clockwise in direction
def counter_clockwise(A, B, C):
return (C.y - A.y) * (B.x - A.x) > (B.y - A.y) * (C.x - A.x)
# Return true if line segments AB and CD intersect.
def intersect(A, B, C, D):
# Check if any three points are co-linear
if (((A.x * (B.y - C.y)) + (B.x * (C.y - A.y)) + (C.x * (A.y - B.y))) == 0):
return True
if (((A.x * (B.y - D.y)) + (B.x * (D.y - A.y)) + (D.x * (A.y - B.y))) == 0):
return True
if (((A.x * (C.y - D.y)) + (C.x * (D.y - A.y)) + (D.x * (A.y - C.y))) == 0):
return True
if (((B.x * (C.y - D.y)) + (C.x * (D.y - B.y)) + (D.x * (B.y - C.y))) == 0):
return True
return counter_clockwise(A, C, D) != counter_clockwise(B, C, D) and counter_clockwise(A, B, C) != counter_clockwise(
A, B, D)
# A generic line intersection function. Only returns -1 if lines are parallel
def line_intersection(segment1, segment2):
line1 = [segment1[0].x, segment1[0].y], [segment1[1].x, segment1[1].y]
line2 = [segment2[0].x, segment2[0].y], [segment2[1].x, segment2[1].y]
x_diff = (line1[0][0] - line1[1][0], line2[0][0] - line2[1][0])
y_diff = (line1[0][1] - line1[1][1], line2[0][1] - line2[1][1])
def determinant(a, b):
return ((a[0] * b[1]) - (a[1] * b[0]))
div = determinant(x_diff, y_diff)
if div == 0:
# parallel lines
return -1
d = (determinant(line1[0], line1[1]), determinant(line2[0], line2[1]))
x = determinant(d, x_diff) / float(div)
y = determinant(d, y_diff) / float(div)
x = int(x + 0.5)
y = int(y + 0.5)
return point(x, y)
# Final Segment Intersection function
def segment_intersection(a, b, c, d):
if (intersect(a, b, c, d) == True):
return line_intersection([a, b], [c, d])
else:
return -1
# Find centroid of list of vertices
def centroid(vertices):
n = len(vertices)
if (n == 0):
return -1
sum_x = 0
sum_y = 0
for i in vertices:
sum_x = sum_x + i.x
sum_y = sum_y + i.y
centr_x = int(0.5 + sum_x / float(n))
centr_y = int(0.5 + sum_y / float(n))
return point(centr_x, centr_y)
# Find area of a polygon
def polygon_area(vertices, number_of_vertices):
# Expects closed polygon
n = number_of_vertices
if (n % 2 != 0):
vertices.append(vertices[0])
area = 0
for i in xrange(0, n, 2):
area += vertices[i + 1].x * (vertices[i + 2].y - vertices[i].y) + vertices[i + 1].y * (
vertices[i].x - vertices[i + 2].x)
return int(area / 2)
# Max point in a list of points
def point_max(lst, cmp2=1):
# 1 for x and 2 for y
if (cmp2 == 1):
max2 = -1
max2_ind = -1
tmp = [i.x for i in lst]
for index, j in enumerate(tmp):
if (j > max2):
max2 = j
max2_ind = index
elif (cmp2 == 2):
max2 = -1
max2_ind = -1
tmp = [i.y for i in lst]
for index, j in enumerate(tmp):
if (j > max2):
max2 = j
max2_ind = index
return max2_ind
# Min point in a list of points
def point_min(lst, cmp2=1):
# 1 for x and 2 for y
if (cmp2 == 1):
min2 = 999999999
min2_ind = -1
tmp = [i.x for i in lst]
for index, j in enumerate(tmp):
if (j < min2):
min2 = j
min2_ind = index
elif (cmp2 == 2):
min2 = 999999999
min2_ind = -1
tmp = [i.y for i in lst]
for index, j in enumerate(tmp):
if (j < min2):
min2 = j
min2_ind = index
return min2_ind
# Distance between two points
def find_dist(pt1, pt2):
return int(sqrt(pow((pt1.x - pt2.x), 2) + pow(pt1.y - pt2.y, 2)))
# Check if the given segment is obstructed by given list of obstacles
# Returns True if segment is clear of obstructions
def check_obstruction(obstacles, segment):
res = True
break_out = False
for obs in obstacles:
# Add the last line to make closed polygon
n = len(obs) - 1
if (obs[n].equals(obs[0]) is False):
obs.append(obs[0])
for index in range(len(obs) - 1):
if (segment_intersection(segment[0], segment[1], obs[index], obs[index + 1]) != -1):
res = False
break_out = True
break
if (break_out is True):
break
return res
# Find a point in some distance from the end of a line segment
def find_point_on_line(a, b, step):
if (a.equals(b)):
print("sdfsdfsdfsdf")
v_vector_x = (b.x - a.x)
v_vector_y = (b.y - a.y)
vector_norm_x = v_vector_x / float(sqrt(pow(v_vector_x, 2) + pow(v_vector_y, 2)))
vector_norm_y = v_vector_y / float(sqrt(pow(v_vector_x, 2) + pow(v_vector_y, 2)))
result_x = b.x + (step * vector_norm_x)
result_y = b.y + (step * vector_norm_y)
return point(int(round(result_x)), int(round(result_y)))
def step_from_to(p1, p2, step_size):
if find_dist(p1, p2) < step_size:
return p2
else:
theta = atan2(p2.y - p1.y, p2.x - p1.x)
return point(p1.x + step_size * cos(theta), p1.y + step_size * sin(theta))
| 25.965649 | 117 | 0.615611 |
4a1da1a7f5cb6cf537e10f24dbecaad7897228ab | 6,726 | py | Python | msgraph-cli-extensions/beta/financials_beta/azext_financials_beta/vendored_sdks/financials/operations/_financials_financials_operations.py | thewahome/msgraph-cli | 33127d9efa23a0e5f5303c93242fbdbb73348671 | [
"MIT"
] | null | null | null | msgraph-cli-extensions/beta/financials_beta/azext_financials_beta/vendored_sdks/financials/operations/_financials_financials_operations.py | thewahome/msgraph-cli | 33127d9efa23a0e5f5303c93242fbdbb73348671 | [
"MIT"
] | null | null | null | msgraph-cli-extensions/beta/financials_beta/azext_financials_beta/vendored_sdks/financials/operations/_financials_financials_operations.py | thewahome/msgraph-cli | 33127d9efa23a0e5f5303c93242fbdbb73348671 | [
"MIT"
] | null | null | null | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, List, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class FinancialsFinancialsOperations(object):
"""FinancialsFinancialsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~financials.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def get_financials(
self,
select=None, # type: Optional[List[str]]
expand=None, # type: Optional[List[Union[str, "models.Get1ItemsItem"]]]
**kwargs # type: Any
):
# type: (...) -> "models.MicrosoftGraphFinancials"
"""Get financials.
Get financials.
:param select: Select properties to be returned.
:type select: list[str]
:param expand: Expand related entities.
:type expand: list[str or ~financials.models.Get1ItemsItem]
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MicrosoftGraphFinancials, or the result of cls(response)
:rtype: ~financials.models.MicrosoftGraphFinancials
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.MicrosoftGraphFinancials"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
# Construct URL
url = self.get_financials.metadata['url'] # type: ignore
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if select is not None:
query_parameters['$select'] = self._serialize.query("select", select, '[str]', div=',')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, '[str]', div=',')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('MicrosoftGraphFinancials', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_financials.metadata = {'url': '/financials'} # type: ignore
def update_financials(
self,
body, # type: "models.MicrosoftGraphFinancials"
**kwargs # type: Any
):
# type: (...) -> None
"""Update financials.
Update financials.
:param body: New property values.
:type body: ~financials.models.MicrosoftGraphFinancials
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update_financials.metadata['url'] # type: ignore
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(body, 'MicrosoftGraphFinancials')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
update_financials.metadata = {'url': '/financials'} # type: ignore
| 42.840764 | 133 | 0.66399 |
4a1da1bde53a073a4b3841c4f6ee67bbeea73f5b | 79,936 | py | Python | stubs.min/System/Windows/Forms/__init___parts/DataGrid.py | denfromufa/ironpython-stubs | 4d2b405eda3ceed186e8adca55dd97c332c6f49d | [
"MIT"
] | 1 | 2017-07-07T11:15:45.000Z | 2017-07-07T11:15:45.000Z | stubs.min/System/Windows/Forms/__init___parts/DataGrid.py | hdm-dt-fb/ironpython-stubs | 4d2b405eda3ceed186e8adca55dd97c332c6f49d | [
"MIT"
] | null | null | null | stubs.min/System/Windows/Forms/__init___parts/DataGrid.py | hdm-dt-fb/ironpython-stubs | 4d2b405eda3ceed186e8adca55dd97c332c6f49d | [
"MIT"
] | null | null | null | class DataGrid(Control,IComponent,IDisposable,IOleControl,IOleObject,IOleInPlaceObject,IOleInPlaceActiveObject,IOleWindow,IViewObject,IViewObject2,IPersist,IPersistStreamInit,IPersistPropertyBag,IPersistStorage,IQuickActivate,ISupportOleDropSource,IDropTarget,ISynchronizeInvoke,IWin32Window,IArrangedElement,IBindableComponent,ISupportInitialize,IDataGridEditingService):
"""
Displays ADO.NET data in a scrollable grid. The System.Windows.Forms.DataGridView control replaces and adds functionality to the System.Windows.Forms.DataGrid control; however,the System.Windows.Forms.DataGrid control is retained for both backward compatibility and future use,if you choose.
DataGrid()
"""
def AccessibilityNotifyClients(self,*args):
"""
AccessibilityNotifyClients(self: Control,accEvent: AccessibleEvents,objectID: int,childID: int)
Notifies the accessibility client applications of the specified
System.Windows.Forms.AccessibleEvents for the specified child control .
accEvent: The System.Windows.Forms.AccessibleEvents to notify the accessibility client
applications of.
objectID: The identifier of the System.Windows.Forms.AccessibleObject.
childID: The child System.Windows.Forms.Control to notify of the accessible event.
AccessibilityNotifyClients(self: Control,accEvent: AccessibleEvents,childID: int)
Notifies the accessibility client applications of the specified
System.Windows.Forms.AccessibleEvents for the specified child control.
accEvent: The System.Windows.Forms.AccessibleEvents to notify the accessibility client
applications of.
childID: The child System.Windows.Forms.Control to notify of the accessible event.
"""
pass
def add_RowHeaderClick(self,*args):
""" add_RowHeaderClick(self: DataGrid,value: EventHandler) """
pass
def BeginEdit(self,gridColumn,rowNumber):
"""
BeginEdit(self: DataGrid,gridColumn: DataGridColumnStyle,rowNumber: int) -> bool
Attempts to put the grid into a state where editing is allowed.
gridColumn: A System.Windows.Forms.DataGridColumnStyle to edit.
rowNumber: The number of the row to edit.
Returns: true if the method is successful; otherwise,false.
"""
pass
def BeginInit(self):
"""
BeginInit(self: DataGrid)
Begins the initialization of a System.Windows.Forms.DataGrid that is used on a
form or used by another component. The initialization occurs at run time.
"""
pass
def CancelEditing(self,*args):
"""
CancelEditing(self: DataGrid)
Cancels the current edit operation and rolls back all changes.
"""
pass
def Collapse(self,row):
"""
Collapse(self: DataGrid,row: int)
Collapses child relations,if any exist for all rows,or for a specified row.
row: The number of the row to collapse. If set to -1,all rows are collapsed.
"""
pass
def ColumnStartedEditing(self,*args):
"""
ColumnStartedEditing(self: DataGrid,editingControl: Control)
Informs the System.Windows.Forms.DataGrid control when the user begins to edit
a column using the specified control.
editingControl: The System.Windows.Forms.Control used to edit the column.
ColumnStartedEditing(self: DataGrid,bounds: Rectangle)
Informs the System.Windows.Forms.DataGrid control when the user begins to edit
the column at the specified location.
bounds: The System.Drawing.Rectangle that defines the location of the edited column.
"""
pass
def CreateAccessibilityInstance(self,*args):
"""
CreateAccessibilityInstance(self: DataGrid) -> AccessibleObject
Constructs a new instance of the accessibility object for this control.
Returns: The System.Windows.Forms.Control.ControlAccessibleObject for this control.
"""
pass
def CreateControlsInstance(self,*args):
"""
CreateControlsInstance(self: Control) -> ControlCollection
Creates a new instance of the control collection for the control.
Returns: A new instance of System.Windows.Forms.Control.ControlCollection assigned to
the control.
"""
pass
def CreateGridColumn(self,*args):
"""
CreateGridColumn(self: DataGrid,prop: PropertyDescriptor) -> DataGridColumnStyle
Creates a new System.Windows.Forms.DataGridColumnStyle with the specified
System.ComponentModel.PropertyDescriptor.
prop: The System.ComponentModel.PropertyDescriptor to use for creating the grid
column style.
Returns: The new System.Windows.Forms.DataGridColumnStyle.
CreateGridColumn(self: DataGrid,prop: PropertyDescriptor,isDefault: bool) -> DataGridColumnStyle
Creates a System.Windows.Forms.DataGridColumnStyle using the specified
System.ComponentModel.PropertyDescriptor.
prop: The System.ComponentModel.PropertyDescriptor to use for creating the grid
column style.
isDefault: true to set the column style as the default; otherwise,false.
Returns: The new System.Windows.Forms.DataGridColumnStyle.
"""
pass
def CreateHandle(self,*args):
"""
CreateHandle(self: Control)
Creates a handle for the control.
"""
pass
def DefWndProc(self,*args):
"""
DefWndProc(self: Control,m: Message) -> Message
Sends the specified message to the default window procedure.
m: The Windows System.Windows.Forms.Message to process.
"""
pass
def DestroyHandle(self,*args):
"""
DestroyHandle(self: Control)
Destroys the handle associated with the control.
"""
pass
def Dispose(self):
"""
Dispose(self: DataGrid,disposing: bool)
Disposes of the resources (other than memory) used by the
System.Windows.Forms.DataGrid.
disposing: true to release both managed and unmanaged resources; false to release only
unmanaged resources.
"""
pass
def EndEdit(self,gridColumn,rowNumber,shouldAbort):
"""
EndEdit(self: DataGrid,gridColumn: DataGridColumnStyle,rowNumber: int,shouldAbort: bool) -> bool
Requests an end to an edit operation taking place on the
System.Windows.Forms.DataGrid control.
gridColumn: The System.Windows.Forms.DataGridColumnStyle to cease editing.
rowNumber: The number of the row to cease editing.
shouldAbort: Set to true if the current operation should be stopped.
Returns: true if the editing operation ceases; otherwise,false.
"""
pass
def EndInit(self):
"""
EndInit(self: DataGrid)
Ends the initialization of a System.Windows.Forms.DataGrid that is used on a
form or used by another component. The initialization occurs at run time.
"""
pass
def Expand(self,row):
"""
Expand(self: DataGrid,row: int)
Displays child relations,if any exist,for all rows or a specific row.
row: The number of the row to expand. If set to -1,all rows are expanded.
"""
pass
def GetAccessibilityObjectById(self,*args):
"""
GetAccessibilityObjectById(self: Control,objectId: int) -> AccessibleObject
Retrieves the specified System.Windows.Forms.AccessibleObject.
objectId: An Int32 that identifies the System.Windows.Forms.AccessibleObject to retrieve.
Returns: An System.Windows.Forms.AccessibleObject.
"""
pass
def GetAutoSizeMode(self,*args):
"""
GetAutoSizeMode(self: Control) -> AutoSizeMode
Retrieves a value indicating how a control will behave when its
System.Windows.Forms.Control.AutoSize property is enabled.
Returns: One of the System.Windows.Forms.AutoSizeMode values.
"""
pass
def GetCellBounds(self,*__args):
"""
GetCellBounds(self: DataGrid,dgc: DataGridCell) -> Rectangle
Gets the System.Drawing.Rectangle of the cell specified by
System.Windows.Forms.DataGridCell.
dgc: The System.Windows.Forms.DataGridCell to look up.
Returns: A System.Drawing.Rectangle that defines the current cell's corners.
GetCellBounds(self: DataGrid,row: int,col: int) -> Rectangle
Gets the System.Drawing.Rectangle of the cell specified by row and column
number.
row: The number of the cell's row.
col: The number of the cell's column.
Returns: A System.Drawing.Rectangle that defines the current cell's corners.
"""
pass
def GetCurrentCellBounds(self):
"""
GetCurrentCellBounds(self: DataGrid) -> Rectangle
Gets a System.Drawing.Rectangle that specifies the four corners of the selected
cell.
Returns: A System.Drawing.Rectangle that defines the current cell's corners.
"""
pass
def GetOutputTextDelimiter(self,*args):
"""
GetOutputTextDelimiter(self: DataGrid) -> str
Gets the string that is the delimiter between columns when row contents are
copied to the Clipboard.
Returns: The string value "\t",which represents a tab used to separate columns in a row.
"""
pass
def GetScaledBounds(self,*args):
"""
GetScaledBounds(self: Control,bounds: Rectangle,factor: SizeF,specified: BoundsSpecified) -> Rectangle
Retrieves the bounds within which the control is scaled.
bounds: A System.Drawing.Rectangle that specifies the area for which to retrieve the
display bounds.
factor: The height and width of the control's bounds.
specified: One of the values of System.Windows.Forms.BoundsSpecified that specifies the
bounds of the control to use when defining its size and position.
Returns: A System.Drawing.Rectangle representing the bounds within which the control is
scaled.
"""
pass
def GetService(self,*args):
"""
GetService(self: Component,service: Type) -> object
Returns an object that represents a service provided by the
System.ComponentModel.Component or by its System.ComponentModel.Container.
service: A service provided by the System.ComponentModel.Component.
Returns: An System.Object that represents a service provided by the
System.ComponentModel.Component,or null if the System.ComponentModel.Component
does not provide the specified service.
"""
pass
def GetStyle(self,*args):
"""
GetStyle(self: Control,flag: ControlStyles) -> bool
Retrieves the value of the specified control style bit for the control.
flag: The System.Windows.Forms.ControlStyles bit to return the value from.
Returns: true if the specified control style bit is set to true; otherwise,false.
"""
pass
def GetTopLevel(self,*args):
"""
GetTopLevel(self: Control) -> bool
Determines if the control is a top-level control.
Returns: true if the control is a top-level control; otherwise,false.
"""
pass
def GridHScrolled(self,*args):
"""
GridHScrolled(self: DataGrid,sender: object,se: ScrollEventArgs)
Listens for the scroll event of the horizontal scroll bar.
sender: An System.Object that contains data about the control.
se: A System.Windows.Forms.ScrollEventArgs that contains the event data.
"""
pass
def GridVScrolled(self,*args):
"""
GridVScrolled(self: DataGrid,sender: object,se: ScrollEventArgs)
Listens for the scroll event of the vertical scroll bar.
sender: An System.Object that contains data about the control.
se: A System.Windows.Forms.ScrollEventArgs that contains the event data.
"""
pass
def HitTest(self,*__args):
"""
HitTest(self: DataGrid,position: Point) -> HitTestInfo
Gets information,such as row and column number of a clicked point on the grid,
about the grid using a specific System.Drawing.Point.
position: A System.Drawing.Point that represents single x,y coordinate.
Returns: A System.Windows.Forms.DataGrid.HitTestInfo that contains specific information
about the grid.
HitTest(self: DataGrid,x: int,y: int) -> HitTestInfo
Gets information,such as row and column number of a clicked point on the grid,
using the x and y coordinate passed to the method.
x: The horizontal position of the coordinate.
y: The vertical position of the coordinate.
Returns: A System.Windows.Forms.DataGrid.HitTestInfo that contains information about the
clicked part of the grid.
"""
pass
def InitLayout(self,*args):
"""
InitLayout(self: Control)
Called after the control has been added to another container.
"""
pass
def InvokeGotFocus(self,*args):
"""
InvokeGotFocus(self: Control,toInvoke: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.GotFocus event for the specified
control.
toInvoke: The System.Windows.Forms.Control to assign the event to.
e: An System.EventArgs that contains the event data.
"""
pass
def InvokeLostFocus(self,*args):
"""
InvokeLostFocus(self: Control,toInvoke: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.LostFocus event for the specified
control.
toInvoke: The System.Windows.Forms.Control to assign the event to.
e: An System.EventArgs that contains the event data.
"""
pass
def InvokeOnClick(self,*args):
"""
InvokeOnClick(self: Control,toInvoke: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.Click event for the specified control.
toInvoke: The System.Windows.Forms.Control to assign the
System.Windows.Forms.Control.Click event to.
e: An System.EventArgs that contains the event data.
"""
pass
def InvokePaint(self,*args):
"""
InvokePaint(self: Control,c: Control,e: PaintEventArgs)
Raises the System.Windows.Forms.Control.Paint event for the specified control.
c: The System.Windows.Forms.Control to assign the
System.Windows.Forms.Control.Paint event to.
e: An System.Windows.Forms.PaintEventArgs that contains the event data.
"""
pass
def InvokePaintBackground(self,*args):
"""
InvokePaintBackground(self: Control,c: Control,e: PaintEventArgs)
Raises the PaintBackground event for the specified control.
c: The System.Windows.Forms.Control to assign the
System.Windows.Forms.Control.Paint event to.
e: An System.Windows.Forms.PaintEventArgs that contains the event data.
"""
pass
def IsExpanded(self,rowNumber):
"""
IsExpanded(self: DataGrid,rowNumber: int) -> bool
Gets a value that indicates whether the node of a specified row is expanded or
collapsed.
rowNumber: The number of the row in question.
Returns: true if the node is expanded; otherwise,false.
"""
pass
def IsInputChar(self,*args):
"""
IsInputChar(self: Control,charCode: Char) -> bool
Determines if a character is an input character that the control recognizes.
charCode: The character to test.
Returns: true if the character should be sent directly to the control and not
preprocessed; otherwise,false.
"""
pass
def IsInputKey(self,*args):
"""
IsInputKey(self: Control,keyData: Keys) -> bool
Determines whether the specified key is a regular input key or a special key
that requires preprocessing.
keyData: One of the System.Windows.Forms.Keys values.
Returns: true if the specified key is a regular input key; otherwise,false.
"""
pass
def IsSelected(self,row):
"""
IsSelected(self: DataGrid,row: int) -> bool
Gets a value indicating whether a specified row is selected.
row: The number of the row you are interested in.
Returns: true if the row is selected; otherwise,false.
"""
pass
def MemberwiseClone(self,*args):
"""
MemberwiseClone(self: MarshalByRefObject,cloneIdentity: bool) -> MarshalByRefObject
Creates a shallow copy of the current System.MarshalByRefObject object.
cloneIdentity: false to delete the current System.MarshalByRefObject object's identity,which
will cause the object to be assigned a new identity when it is marshaled across
a remoting boundary. A value of false is usually appropriate. true to copy the
current System.MarshalByRefObject object's identity to its clone,which will
cause remoting client calls to be routed to the remote server object.
Returns: A shallow copy of the current System.MarshalByRefObject object.
MemberwiseClone(self: object) -> object
Creates a shallow copy of the current System.Object.
Returns: A shallow copy of the current System.Object.
"""
pass
def NavigateBack(self):
"""
NavigateBack(self: DataGrid)
Navigates back to the table previously displayed in the grid.
"""
pass
def NavigateTo(self,rowNumber,relationName):
"""
NavigateTo(self: DataGrid,rowNumber: int,relationName: str)
Navigates to the table specified by row and relation name.
rowNumber: The number of the row to navigate to.
relationName: The name of the child relation to navigate to.
"""
pass
def NotifyInvalidate(self,*args):
"""
NotifyInvalidate(self: Control,invalidatedArea: Rectangle)
Raises the System.Windows.Forms.Control.Invalidated event with a specified
region of the control to invalidate.
invalidatedArea: A System.Drawing.Rectangle representing the area to invalidate.
"""
pass
def OnAllowNavigationChanged(self,*args):
"""
OnAllowNavigationChanged(self: DataGrid,e: EventArgs)
Raises the System.Windows.Forms.DataGrid.AllowNavigationChanged event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnAutoSizeChanged(self,*args):
"""
OnAutoSizeChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.AutoSizeChanged event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnBackButtonClicked(self,*args):
"""
OnBackButtonClicked(self: DataGrid,sender: object,e: EventArgs)
Listens for the caption's back button clicked event.
sender: An System.Object that contains data about the control.
e: An System.EventArgs that contains data about the event.
"""
pass
def OnBackColorChanged(self,*args):
"""
OnBackColorChanged(self: DataGrid,e: EventArgs)
Raises the System.Windows.Forms.Control.BackColorChanged event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnBackgroundColorChanged(self,*args):
"""
OnBackgroundColorChanged(self: DataGrid,e: EventArgs)
Raises the System.Windows.Forms.DataGrid.BackgroundColorChanged event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnBackgroundImageChanged(self,*args):
"""
OnBackgroundImageChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.BackgroundImageChanged event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnBackgroundImageLayoutChanged(self,*args):
"""
OnBackgroundImageLayoutChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.BackgroundImageLayoutChanged event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnBindingContextChanged(self,*args):
"""
OnBindingContextChanged(self: DataGrid,e: EventArgs)
Raises the System.Windows.Forms.Control.BindingContextChanged event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnBorderStyleChanged(self,*args):
"""
OnBorderStyleChanged(self: DataGrid,e: EventArgs)
Raises the System.Windows.Forms.DataGrid.BorderStyleChanged event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnCaptionVisibleChanged(self,*args):
"""
OnCaptionVisibleChanged(self: DataGrid,e: EventArgs)
Raises the System.Windows.Forms.DataGrid.CaptionVisibleChanged event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnCausesValidationChanged(self,*args):
"""
OnCausesValidationChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.CausesValidationChanged event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnChangeUICues(self,*args):
"""
OnChangeUICues(self: Control,e: UICuesEventArgs)
Raises the System.Windows.Forms.Control.ChangeUICues event.
e: A System.Windows.Forms.UICuesEventArgs that contains the event data.
"""
pass
def OnClick(self,*args):
"""
OnClick(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.Click event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnClientSizeChanged(self,*args):
"""
OnClientSizeChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.ClientSizeChanged event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnContextMenuChanged(self,*args):
"""
OnContextMenuChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.ContextMenuChanged event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnContextMenuStripChanged(self,*args):
"""
OnContextMenuStripChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.ContextMenuStripChanged event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnControlAdded(self,*args):
"""
OnControlAdded(self: Control,e: ControlEventArgs)
Raises the System.Windows.Forms.Control.ControlAdded event.
e: A System.Windows.Forms.ControlEventArgs that contains the event data.
"""
pass
def OnControlRemoved(self,*args):
"""
OnControlRemoved(self: Control,e: ControlEventArgs)
Raises the System.Windows.Forms.Control.ControlRemoved event.
e: A System.Windows.Forms.ControlEventArgs that contains the event data.
"""
pass
def OnCreateControl(self,*args):
"""
OnCreateControl(self: Control)
Raises the System.Windows.Forms.Control.CreateControl method.
"""
pass
def OnCurrentCellChanged(self,*args):
"""
OnCurrentCellChanged(self: DataGrid,e: EventArgs)
Raises the System.Windows.Forms.DataGrid.CurrentCellChanged event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnCursorChanged(self,*args):
"""
OnCursorChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.CursorChanged event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnDataSourceChanged(self,*args):
"""
OnDataSourceChanged(self: DataGrid,e: EventArgs)
Raises the System.Windows.Forms.DataGrid.DataSourceChanged event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnDockChanged(self,*args):
"""
OnDockChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.DockChanged event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnDoubleClick(self,*args):
"""
OnDoubleClick(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.DoubleClick event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnDragDrop(self,*args):
"""
OnDragDrop(self: Control,drgevent: DragEventArgs)
Raises the System.Windows.Forms.Control.DragDrop event.
drgevent: A System.Windows.Forms.DragEventArgs that contains the event data.
"""
pass
def OnDragEnter(self,*args):
"""
OnDragEnter(self: Control,drgevent: DragEventArgs)
Raises the System.Windows.Forms.Control.DragEnter event.
drgevent: A System.Windows.Forms.DragEventArgs that contains the event data.
"""
pass
def OnDragLeave(self,*args):
"""
OnDragLeave(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.DragLeave event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnDragOver(self,*args):
"""
OnDragOver(self: Control,drgevent: DragEventArgs)
Raises the System.Windows.Forms.Control.DragOver event.
drgevent: A System.Windows.Forms.DragEventArgs that contains the event data.
"""
pass
def OnEnabledChanged(self,*args):
"""
OnEnabledChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.EnabledChanged event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnEnter(self,*args):
"""
OnEnter(self: DataGrid,e: EventArgs)
Raises the System.Windows.Forms.Control.Enter event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnFlatModeChanged(self,*args):
"""
OnFlatModeChanged(self: DataGrid,e: EventArgs)
Raises the System.Windows.Forms.DataGrid.FlatModeChanged event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnFontChanged(self,*args):
"""
OnFontChanged(self: DataGrid,e: EventArgs)
Raises the System.Windows.Forms.Control.FontChanged event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnForeColorChanged(self,*args):
"""
OnForeColorChanged(self: DataGrid,e: EventArgs)
Raises the System.Windows.Forms.Control.ForeColorChanged event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnGiveFeedback(self,*args):
"""
OnGiveFeedback(self: Control,gfbevent: GiveFeedbackEventArgs)
Raises the System.Windows.Forms.Control.GiveFeedback event.
gfbevent: A System.Windows.Forms.GiveFeedbackEventArgs that contains the event data.
"""
pass
def OnGotFocus(self,*args):
"""
OnGotFocus(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.GotFocus event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnHandleCreated(self,*args):
"""
OnHandleCreated(self: DataGrid,e: EventArgs)
Raises the System.Windows.Forms.Control.CreateHandle event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnHandleDestroyed(self,*args):
"""
OnHandleDestroyed(self: DataGrid,e: EventArgs)
Raises the System.Windows.Forms.Control.DestroyHandle event.
e: An System.EventArgs containing the event data.
"""
pass
def OnHelpRequested(self,*args):
"""
OnHelpRequested(self: Control,hevent: HelpEventArgs)
Raises the System.Windows.Forms.Control.HelpRequested event.
hevent: A System.Windows.Forms.HelpEventArgs that contains the event data.
"""
pass
def OnImeModeChanged(self,*args):
"""
OnImeModeChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.ImeModeChanged event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnInvalidated(self,*args):
"""
OnInvalidated(self: Control,e: InvalidateEventArgs)
Raises the System.Windows.Forms.Control.Invalidated event.
e: An System.Windows.Forms.InvalidateEventArgs that contains the event data.
"""
pass
def OnKeyDown(self,*args):
"""
OnKeyDown(self: DataGrid,ke: KeyEventArgs)
Raises the System.Windows.Forms.Control.KeyDown event.
ke: A System.Windows.Forms.KeyEventArgs that provides data about the
System.Windows.Forms.Control.OnKeyDown(System.Windows.Forms.KeyEventArgs)
event.
"""
pass
def OnKeyPress(self,*args):
"""
OnKeyPress(self: DataGrid,kpe: KeyPressEventArgs)
Raises the System.Windows.Forms.Control.KeyPress event.
kpe: A System.Windows.Forms.KeyPressEventArgs that contains data about the
System.Windows.Forms.Control.OnKeyPress(System.Windows.Forms.KeyPressEventArgs)
event
"""
pass
def OnKeyUp(self,*args):
"""
OnKeyUp(self: Control,e: KeyEventArgs)
Raises the System.Windows.Forms.Control.KeyUp event.
e: A System.Windows.Forms.KeyEventArgs that contains the event data.
"""
pass
def OnLayout(self,*args):
"""
OnLayout(self: DataGrid,levent: LayoutEventArgs)
Raises the System.Windows.Forms.Control.Layout event,which repositions
controls and updates scroll bars.
levent: A System.Windows.Forms.LayoutEventArgs that contains the event data.
"""
pass
def OnLeave(self,*args):
"""
OnLeave(self: DataGrid,e: EventArgs)
Raises the System.Windows.Forms.Control.Leave event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnLocationChanged(self,*args):
"""
OnLocationChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.LocationChanged event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnLostFocus(self,*args):
"""
OnLostFocus(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.LostFocus event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnMarginChanged(self,*args):
"""
OnMarginChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.MarginChanged event.
e: A System.EventArgs that contains the event data.
"""
pass
def OnMouseCaptureChanged(self,*args):
"""
OnMouseCaptureChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.MouseCaptureChanged event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnMouseClick(self,*args):
"""
OnMouseClick(self: Control,e: MouseEventArgs)
Raises the System.Windows.Forms.Control.MouseClick event.
e: An System.Windows.Forms.MouseEventArgs that contains the event data.
"""
pass
def OnMouseDoubleClick(self,*args):
"""
OnMouseDoubleClick(self: Control,e: MouseEventArgs)
Raises the System.Windows.Forms.Control.MouseDoubleClick event.
e: An System.Windows.Forms.MouseEventArgs that contains the event data.
"""
pass
def OnMouseDown(self,*args):
"""
OnMouseDown(self: DataGrid,e: MouseEventArgs)
Raises the System.Windows.Forms.Control.MouseDown event.
e: A System.Windows.Forms.MouseEventArgs that contains data about the
System.Windows.Forms.Control.OnMouseDown(System.Windows.Forms.MouseEventArgs)
event.
"""
pass
def OnMouseEnter(self,*args):
"""
OnMouseEnter(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.MouseEnter event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnMouseHover(self,*args):
"""
OnMouseHover(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.MouseHover event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnMouseLeave(self,*args):
"""
OnMouseLeave(self: DataGrid,e: EventArgs)
Creates the System.Windows.Forms.Control.MouseLeave event.
e: An System.EventArgs that contains data about the
System.Windows.Forms.Control.OnMouseLeave(System.EventArgs) event.
"""
pass
def OnMouseMove(self,*args):
"""
OnMouseMove(self: DataGrid,e: MouseEventArgs)
Raises the System.Windows.Forms.Control.MouseMove event.
e: A System.Windows.Forms.MouseEventArgs that contains data about the
System.Windows.Forms.Control.OnMouseMove(System.Windows.Forms.MouseEventArgs)
event.
"""
pass
def OnMouseUp(self,*args):
"""
OnMouseUp(self: DataGrid,e: MouseEventArgs)
Raises the System.Windows.Forms.Control.MouseUp event.
e: A System.Windows.Forms.MouseEventArgs that contains data about the
System.Windows.Forms.Control.OnMouseUp(System.Windows.Forms.MouseEventArgs)
event.
"""
pass
def OnMouseWheel(self,*args):
"""
OnMouseWheel(self: DataGrid,e: MouseEventArgs)
Raises the System.Windows.Forms.Control.MouseWheel event.
e: A System.Windows.Forms.MouseEventArgs that contains data about the
System.Windows.Forms.Control.OnMouseUp(System.Windows.Forms.MouseEventArgs)
event.
"""
pass
def OnMove(self,*args):
"""
OnMove(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.Move event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnNavigate(self,*args):
"""
OnNavigate(self: DataGrid,e: NavigateEventArgs)
Raises the System.Windows.Forms.DataGrid.Navigate event.
e: A System.Windows.Forms.NavigateEventArgs that contains the event data.
"""
pass
def OnNotifyMessage(self,*args):
"""
OnNotifyMessage(self: Control,m: Message)
Notifies the control of Windows messages.
m: A System.Windows.Forms.Message that represents the Windows message.
"""
pass
def OnPaddingChanged(self,*args):
"""
OnPaddingChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.PaddingChanged event.
e: A System.EventArgs that contains the event data.
"""
pass
def OnPaint(self,*args):
"""
OnPaint(self: DataGrid,pe: PaintEventArgs)
Raises the System.Windows.Forms.Control.Paint event.
pe: A System.Windows.Forms.PaintEventArgs which contains data about the event.
"""
pass
def OnPaintBackground(self,*args):
"""
OnPaintBackground(self: DataGrid,ebe: PaintEventArgs)
Overrides
System.Windows.Forms.Control.OnPaintBackground(System.Windows.Forms.PaintEventAr
gs) to prevent painting the background of the System.Windows.Forms.DataGrid
control.
ebe: A System.Windows.Forms.PaintEventArgs that contains information about the
control to paint.
"""
pass
def OnParentBackColorChanged(self,*args):
"""
OnParentBackColorChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.BackColorChanged event when the
System.Windows.Forms.Control.BackColor property value of the control's
container changes.
e: An System.EventArgs that contains the event data.
"""
pass
def OnParentBackgroundImageChanged(self,*args):
"""
OnParentBackgroundImageChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.BackgroundImageChanged event when the
System.Windows.Forms.Control.BackgroundImage property value of the control's
container changes.
e: An System.EventArgs that contains the event data.
"""
pass
def OnParentBindingContextChanged(self,*args):
"""
OnParentBindingContextChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.BindingContextChanged event when the
System.Windows.Forms.Control.BindingContext property value of the control's
container changes.
e: An System.EventArgs that contains the event data.
"""
pass
def OnParentChanged(self,*args):
"""
OnParentChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.ParentChanged event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnParentCursorChanged(self,*args):
"""
OnParentCursorChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.CursorChanged event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnParentEnabledChanged(self,*args):
"""
OnParentEnabledChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.EnabledChanged event when the
System.Windows.Forms.Control.Enabled property value of the control's container
changes.
e: An System.EventArgs that contains the event data.
"""
pass
def OnParentFontChanged(self,*args):
"""
OnParentFontChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.FontChanged event when the
System.Windows.Forms.Control.Font property value of the control's container
changes.
e: An System.EventArgs that contains the event data.
"""
pass
def OnParentForeColorChanged(self,*args):
"""
OnParentForeColorChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.ForeColorChanged event when the
System.Windows.Forms.Control.ForeColor property value of the control's
container changes.
e: An System.EventArgs that contains the event data.
"""
pass
def OnParentRightToLeftChanged(self,*args):
"""
OnParentRightToLeftChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.RightToLeftChanged event when the
System.Windows.Forms.Control.RightToLeft property value of the control's
container changes.
e: An System.EventArgs that contains the event data.
"""
pass
def OnParentRowsLabelStyleChanged(self,*args):
"""
OnParentRowsLabelStyleChanged(self: DataGrid,e: EventArgs)
Raises the System.Windows.Forms.DataGrid.ParentRowsLabelStyleChanged event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnParentRowsVisibleChanged(self,*args):
"""
OnParentRowsVisibleChanged(self: DataGrid,e: EventArgs)
Raises the System.Windows.Forms.DataGrid.ParentRowsVisibleChanged event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnParentVisibleChanged(self,*args):
"""
OnParentVisibleChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.VisibleChanged event when the
System.Windows.Forms.Control.Visible property value of the control's container
changes.
e: An System.EventArgs that contains the event data.
"""
pass
def OnPreviewKeyDown(self,*args):
"""
OnPreviewKeyDown(self: Control,e: PreviewKeyDownEventArgs)
Raises the System.Windows.Forms.Control.PreviewKeyDown event.
e: A System.Windows.Forms.PreviewKeyDownEventArgs that contains the event data.
"""
pass
def OnPrint(self,*args):
"""
OnPrint(self: Control,e: PaintEventArgs)
Raises the System.Windows.Forms.Control.Paint event.
e: A System.Windows.Forms.PaintEventArgs that contains the event data.
"""
pass
def OnQueryContinueDrag(self,*args):
"""
OnQueryContinueDrag(self: Control,qcdevent: QueryContinueDragEventArgs)
Raises the System.Windows.Forms.Control.QueryContinueDrag event.
qcdevent: A System.Windows.Forms.QueryContinueDragEventArgs that contains the event data.
"""
pass
def OnReadOnlyChanged(self,*args):
"""
OnReadOnlyChanged(self: DataGrid,e: EventArgs)
Raises the System.Windows.Forms.DataGrid.ReadOnlyChanged event
e: An System.EventArgs that contains the event data.
"""
pass
def OnRegionChanged(self,*args):
"""
OnRegionChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.RegionChanged event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnResize(self,*args):
"""
OnResize(self: DataGrid,e: EventArgs)
Raises the System.Windows.Forms.Control.Resize event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnRightToLeftChanged(self,*args):
"""
OnRightToLeftChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.RightToLeftChanged event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnRowHeaderClick(self,*args):
"""
OnRowHeaderClick(self: DataGrid,e: EventArgs)
Raises the System.Windows.Forms.DataGrid.RowHeaderClick event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnScroll(self,*args):
"""
OnScroll(self: DataGrid,e: EventArgs)
Raises the System.Windows.Forms.DataGrid.Scroll event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnShowParentDetailsButtonClicked(self,*args):
"""
OnShowParentDetailsButtonClicked(self: DataGrid,sender: object,e: EventArgs)
Raises the System.Windows.Forms.DataGrid.ShowParentDetailsButtonClick event.
sender: The source of the event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnSizeChanged(self,*args):
"""
OnSizeChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.SizeChanged event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnStyleChanged(self,*args):
"""
OnStyleChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.StyleChanged event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnSystemColorsChanged(self,*args):
"""
OnSystemColorsChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.SystemColorsChanged event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnTabIndexChanged(self,*args):
"""
OnTabIndexChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.TabIndexChanged event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnTabStopChanged(self,*args):
"""
OnTabStopChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.TabStopChanged event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnTextChanged(self,*args):
"""
OnTextChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.TextChanged event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnValidated(self,*args):
"""
OnValidated(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.Validated event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnValidating(self,*args):
"""
OnValidating(self: Control,e: CancelEventArgs)
Raises the System.Windows.Forms.Control.Validating event.
e: A System.ComponentModel.CancelEventArgs that contains the event data.
"""
pass
def OnVisibleChanged(self,*args):
"""
OnVisibleChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.VisibleChanged event.
e: An System.EventArgs that contains the event data.
"""
pass
def ProcessCmdKey(self,*args):
"""
ProcessCmdKey(self: Control,msg: Message,keyData: Keys) -> (bool,Message)
Processes a command key.
msg: A System.Windows.Forms.Message,passed by reference,that represents the window
message to process.
keyData: One of the System.Windows.Forms.Keys values that represents the key to process.
Returns: true if the character was processed by the control; otherwise,false.
"""
pass
def ProcessDialogChar(self,*args):
"""
ProcessDialogChar(self: Control,charCode: Char) -> bool
Processes a dialog character.
charCode: The character to process.
Returns: true if the character was processed by the control; otherwise,false.
"""
pass
def ProcessDialogKey(self,*args):
"""
ProcessDialogKey(self: DataGrid,keyData: Keys) -> bool
Gets or sets a value that indicates whether a key should be processed further.
keyData: A System.Windows.Forms.Keys that contains data about the pressed key.
Returns: true,the key should be processed; otherwise,false.
"""
pass
def ProcessGridKey(self,*args):
"""
ProcessGridKey(self: DataGrid,ke: KeyEventArgs) -> bool
Processes keys for grid navigation.
ke: A System.Windows.Forms.KeyEventArgs that contains data about the key up or key
down event.
Returns: true,if the key was processed; otherwise false.
"""
pass
def ProcessKeyEventArgs(self,*args):
"""
ProcessKeyEventArgs(self: Control,m: Message) -> (bool,Message)
Processes a key message and generates the appropriate control events.
m: A System.Windows.Forms.Message,passed by reference,that represents the window
message to process.
Returns: true if the message was processed by the control; otherwise,false.
"""
pass
def ProcessKeyMessage(self,*args):
"""
ProcessKeyMessage(self: Control,m: Message) -> (bool,Message)
Processes a keyboard message.
m: A System.Windows.Forms.Message,passed by reference,that represents the window
message to process.
Returns: true if the message was processed by the control; otherwise,false.
"""
pass
def ProcessKeyPreview(self,*args):
"""
ProcessKeyPreview(self: DataGrid,m: Message) -> (bool,Message)
Previews a keyboard message and returns a value indicating if the key was
consumed.
m: A System.Windows.Forms.Message that contains data about the event. The
parameter is passed by reference.
Returns: true,if the key was consumed; otherwise,false.
"""
pass
def ProcessMnemonic(self,*args):
"""
ProcessMnemonic(self: Control,charCode: Char) -> bool
Processes a mnemonic character.
charCode: The character to process.
Returns: true if the character was processed as a mnemonic by the control; otherwise,
false.
"""
pass
def ProcessTabKey(self,*args):
"""
ProcessTabKey(self: DataGrid,keyData: Keys) -> bool
Gets a value indicating whether the Tab key should be processed.
keyData: A System.Windows.Forms.Keys that contains data about which the pressed key.
Returns: true if the TAB key should be processed; otherwise,false.
"""
pass
def RaiseDragEvent(self,*args):
"""
RaiseDragEvent(self: Control,key: object,e: DragEventArgs)
Raises the appropriate drag event.
key: The event to raise.
e: A System.Windows.Forms.DragEventArgs that contains the event data.
"""
pass
def RaiseKeyEvent(self,*args):
"""
RaiseKeyEvent(self: Control,key: object,e: KeyEventArgs)
Raises the appropriate key event.
key: The event to raise.
e: A System.Windows.Forms.KeyEventArgs that contains the event data.
"""
pass
def RaiseMouseEvent(self,*args):
"""
RaiseMouseEvent(self: Control,key: object,e: MouseEventArgs)
Raises the appropriate mouse event.
key: The event to raise.
e: A System.Windows.Forms.MouseEventArgs that contains the event data.
"""
pass
def RaisePaintEvent(self,*args):
"""
RaisePaintEvent(self: Control,key: object,e: PaintEventArgs)
Raises the appropriate paint event.
key: The event to raise.
e: A System.Windows.Forms.PaintEventArgs that contains the event data.
"""
pass
def RecreateHandle(self,*args):
"""
RecreateHandle(self: Control)
Forces the re-creation of the handle for the control.
"""
pass
def remove_RowHeaderClick(self,*args):
""" remove_RowHeaderClick(self: DataGrid,value: EventHandler) """
pass
def ResetAlternatingBackColor(self):
"""
ResetAlternatingBackColor(self: DataGrid)
Resets the System.Windows.Forms.DataGrid.AlternatingBackColor property to its
default color.
"""
pass
def ResetBackColor(self):
"""
ResetBackColor(self: DataGrid)
Resets the System.Windows.Forms.DataGrid.BackColor property to its default
value.
"""
pass
def ResetForeColor(self):
"""
ResetForeColor(self: DataGrid)
Resets the System.Windows.Forms.DataGrid.ForeColor property to its default
value.
"""
pass
def ResetGridLineColor(self):
"""
ResetGridLineColor(self: DataGrid)
Resets the System.Windows.Forms.DataGrid.GridLineColor property to its default
value.
"""
pass
def ResetHeaderBackColor(self):
"""
ResetHeaderBackColor(self: DataGrid)
Resets the System.Windows.Forms.DataGrid.HeaderBackColor property to its
default value.
"""
pass
def ResetHeaderFont(self):
"""
ResetHeaderFont(self: DataGrid)
Resets the System.Windows.Forms.DataGrid.HeaderFont property to its default
value.
"""
pass
def ResetHeaderForeColor(self):
"""
ResetHeaderForeColor(self: DataGrid)
Resets the System.Windows.Forms.DataGrid.HeaderForeColor property to its
default value.
"""
pass
def ResetLinkColor(self):
"""
ResetLinkColor(self: DataGrid)
Resets the System.Windows.Forms.DataGrid.LinkColor property to its default
value.
"""
pass
def ResetLinkHoverColor(self):
"""
ResetLinkHoverColor(self: DataGrid)
Resets the System.Windows.Forms.DataGrid.LinkHoverColor property to its default
value.
"""
pass
def ResetMouseEventArgs(self,*args):
"""
ResetMouseEventArgs(self: Control)
Resets the control to handle the System.Windows.Forms.Control.MouseLeave event.
"""
pass
def ResetSelection(self,*args):
"""
ResetSelection(self: DataGrid)
Turns off selection for all rows that are selected.
"""
pass
def ResetSelectionBackColor(self):
"""
ResetSelectionBackColor(self: DataGrid)
Resets the System.Windows.Forms.DataGrid.SelectionBackColor property to its
default value.
"""
pass
def ResetSelectionForeColor(self):
"""
ResetSelectionForeColor(self: DataGrid)
Resets the System.Windows.Forms.DataGrid.SelectionForeColor property to its
default value.
"""
pass
def RtlTranslateAlignment(self,*args):
"""
RtlTranslateAlignment(self: Control,align: ContentAlignment) -> ContentAlignment
Converts the specified System.Drawing.ContentAlignment to the appropriate
System.Drawing.ContentAlignment to support right-to-left text.
align: One of the System.Drawing.ContentAlignment values.
Returns: One of the System.Drawing.ContentAlignment values.
RtlTranslateAlignment(self: Control,align: LeftRightAlignment) -> LeftRightAlignment
Converts the specified System.Windows.Forms.LeftRightAlignment to the
appropriate System.Windows.Forms.LeftRightAlignment to support right-to-left
text.
align: One of the System.Windows.Forms.LeftRightAlignment values.
Returns: One of the System.Windows.Forms.LeftRightAlignment values.
RtlTranslateAlignment(self: Control,align: HorizontalAlignment) -> HorizontalAlignment
Converts the specified System.Windows.Forms.HorizontalAlignment to the
appropriate System.Windows.Forms.HorizontalAlignment to support right-to-left
text.
align: One of the System.Windows.Forms.HorizontalAlignment values.
Returns: One of the System.Windows.Forms.HorizontalAlignment values.
"""
pass
def RtlTranslateContent(self,*args):
"""
RtlTranslateContent(self: Control,align: ContentAlignment) -> ContentAlignment
Converts the specified System.Drawing.ContentAlignment to the appropriate
System.Drawing.ContentAlignment to support right-to-left text.
align: One of the System.Drawing.ContentAlignment values.
Returns: One of the System.Drawing.ContentAlignment values.
"""
pass
def RtlTranslateHorizontal(self,*args):
"""
RtlTranslateHorizontal(self: Control,align: HorizontalAlignment) -> HorizontalAlignment
Converts the specified System.Windows.Forms.HorizontalAlignment to the
appropriate System.Windows.Forms.HorizontalAlignment to support right-to-left
text.
align: One of the System.Windows.Forms.HorizontalAlignment values.
Returns: One of the System.Windows.Forms.HorizontalAlignment values.
"""
pass
def RtlTranslateLeftRight(self,*args):
"""
RtlTranslateLeftRight(self: Control,align: LeftRightAlignment) -> LeftRightAlignment
Converts the specified System.Windows.Forms.LeftRightAlignment to the
appropriate System.Windows.Forms.LeftRightAlignment to support right-to-left
text.
align: One of the System.Windows.Forms.LeftRightAlignment values.
Returns: One of the System.Windows.Forms.LeftRightAlignment values.
"""
pass
def ScaleControl(self,*args):
"""
ScaleControl(self: Control,factor: SizeF,specified: BoundsSpecified)
Scales a control's location,size,padding and margin.
factor: The factor by which the height and width of the control will be scaled.
specified: A System.Windows.Forms.BoundsSpecified value that specifies the bounds of the
control to use when defining its size and position.
"""
pass
def ScaleCore(self,*args):
"""
ScaleCore(self: Control,dx: Single,dy: Single)
This method is not relevant for this class.
dx: The horizontal scaling factor.
dy: The vertical scaling factor.
"""
pass
def Select(self,row=None):
"""
Select(self: DataGrid,row: int)
Selects a specified row.
row: The index of the row to select.
"""
pass
def SetAutoSizeMode(self,*args):
"""
SetAutoSizeMode(self: Control,mode: AutoSizeMode)
Sets a value indicating how a control will behave when its
System.Windows.Forms.Control.AutoSize property is enabled.
mode: One of the System.Windows.Forms.AutoSizeMode values.
"""
pass
def SetBoundsCore(self,*args):
"""
SetBoundsCore(self: Control,x: int,y: int,width: int,height: int,specified: BoundsSpecified)
Performs the work of setting the specified bounds of this control.
x: The new System.Windows.Forms.Control.Left property value of the control.
y: The new System.Windows.Forms.Control.Top property value of the control.
width: The new System.Windows.Forms.Control.Width property value of the control.
height: The new System.Windows.Forms.Control.Height property value of the control.
specified: A bitwise combination of the System.Windows.Forms.BoundsSpecified values.
"""
pass
def SetClientSizeCore(self,*args):
"""
SetClientSizeCore(self: Control,x: int,y: int)
Sets the size of the client area of the control.
x: The client area width,in pixels.
y: The client area height,in pixels.
"""
pass
def SetDataBinding(self,dataSource,dataMember):
"""
SetDataBinding(self: DataGrid,dataSource: object,dataMember: str)
Sets the System.Windows.Forms.DataGrid.DataSource and
System.Windows.Forms.DataGrid.DataMember properties at run time.
dataSource: The data source for the System.Windows.Forms.DataGrid control.
dataMember: The System.Windows.Forms.DataGrid.DataMember string that specifies the table to
bind to within the object returned by the
System.Windows.Forms.DataGrid.DataSource property.
"""
pass
def SetStyle(self,*args):
"""
SetStyle(self: Control,flag: ControlStyles,value: bool)
Sets a specified System.Windows.Forms.ControlStyles flag to either true or
false.
flag: The System.Windows.Forms.ControlStyles bit to set.
value: true to apply the specified style to the control; otherwise,false.
"""
pass
def SetTopLevel(self,*args):
"""
SetTopLevel(self: Control,value: bool)
Sets the control as the top-level control.
value: true to set the control as the top-level control; otherwise,false.
"""
pass
def SetVisibleCore(self,*args):
"""
SetVisibleCore(self: Control,value: bool)
Sets the control to the specified visible state.
value: true to make the control visible; otherwise,false.
"""
pass
def ShouldSerializeAlternatingBackColor(self,*args):
"""
ShouldSerializeAlternatingBackColor(self: DataGrid) -> bool
Indicates whether the System.Windows.Forms.DataGrid.AlternatingBackColor
property should be persisted.
Returns: true if the property value has changed from its default; otherwise,false.
"""
pass
def ShouldSerializeBackgroundColor(self,*args):
"""
ShouldSerializeBackgroundColor(self: DataGrid) -> bool
Indicates whether the System.Windows.Forms.DataGrid.BackgroundColor property
should be persisted.
Returns: true if the property value has changed from its default; otherwise,false.
"""
pass
def ShouldSerializeCaptionBackColor(self,*args):
"""
ShouldSerializeCaptionBackColor(self: DataGrid) -> bool
Gets a value indicating whether the
System.Windows.Forms.DataGrid.CaptionBackColor property should be persisted.
Returns: true if the property value has been changed from its default; otherwise,false.
"""
pass
def ShouldSerializeCaptionForeColor(self,*args):
"""
ShouldSerializeCaptionForeColor(self: DataGrid) -> bool
Gets a value indicating whether the
System.Windows.Forms.DataGrid.CaptionForeColor property should be persisted.
Returns: true if the property value has been changed from its default; otherwise,false.
"""
pass
def ShouldSerializeGridLineColor(self,*args):
"""
ShouldSerializeGridLineColor(self: DataGrid) -> bool
Indicates whether the System.Windows.Forms.DataGrid.GridLineColor property
should be persisted.
Returns: true if the property value has changed from its default; otherwise,false.
"""
pass
def ShouldSerializeHeaderBackColor(self,*args):
"""
ShouldSerializeHeaderBackColor(self: DataGrid) -> bool
Indicates whether the System.Windows.Forms.DataGrid.HeaderBackColor property
should be persisted.
Returns: true if the property value has changed from its default; otherwise,false.
"""
pass
def ShouldSerializeHeaderFont(self,*args):
"""
ShouldSerializeHeaderFont(self: DataGrid) -> bool
Indicates whether the System.Windows.Forms.DataGrid.HeaderFont property should
be persisted.
Returns: true if the property value has changed from its default; otherwise,false.
"""
pass
def ShouldSerializeHeaderForeColor(self,*args):
"""
ShouldSerializeHeaderForeColor(self: DataGrid) -> bool
Indicates whether the System.Windows.Forms.DataGrid.HeaderForeColor property
should be persisted.
Returns: true if the property value has changed from its default; otherwise,false.
"""
pass
def ShouldSerializeLinkHoverColor(self,*args):
"""
ShouldSerializeLinkHoverColor(self: DataGrid) -> bool
Indicates whether the System.Windows.Forms.DataGrid.LinkHoverColor property
should be persisted.
Returns: true if the property value has changed from its default; otherwise,false.
"""
pass
def ShouldSerializeParentRowsBackColor(self,*args):
"""
ShouldSerializeParentRowsBackColor(self: DataGrid) -> bool
Indicates whether the System.Windows.Forms.DataGrid.ParentRowsBackColor
property should be persisted.
Returns: true if the property value has been changed from its default; otherwise,false.
"""
pass
def ShouldSerializeParentRowsForeColor(self,*args):
"""
ShouldSerializeParentRowsForeColor(self: DataGrid) -> bool
Indicates whether the System.Windows.Forms.DataGrid.ParentRowsForeColor
property should be persisted.
Returns: true if the property value has been changed from its default; otherwise,false.
"""
pass
def ShouldSerializePreferredRowHeight(self,*args):
"""
ShouldSerializePreferredRowHeight(self: DataGrid) -> bool
Indicates whether the System.Windows.Forms.DataGrid.PreferredRowHeight property
should be persisted.
Returns: true if the property value has changed from its default; otherwise,false.
"""
pass
def ShouldSerializeSelectionBackColor(self,*args):
"""
ShouldSerializeSelectionBackColor(self: DataGrid) -> bool
Indicates whether the System.Windows.Forms.DataGrid.SelectionBackColor property
should be persisted.
Returns: true if the property value has changed from its default; otherwise,false.
"""
pass
def ShouldSerializeSelectionForeColor(self,*args):
"""
ShouldSerializeSelectionForeColor(self: DataGrid) -> bool
Indicates whether the System.Windows.Forms.DataGrid.SelectionForeColor property
should be persisted.
Returns: true if the property value has changed from its default; otherwise,false.
"""
pass
def SizeFromClientSize(self,*args):
"""
SizeFromClientSize(self: Control,clientSize: Size) -> Size
Determines the size of the entire control from the height and width of its
client area.
clientSize: A System.Drawing.Size value representing the height and width of the control's
client area.
Returns: A System.Drawing.Size value representing the height and width of the entire
control.
"""
pass
def SubObjectsSiteChange(self,site):
"""
SubObjectsSiteChange(self: DataGrid,site: bool)
Adds or removes the System.Windows.Forms.DataGridTableStyle objects from the
container that is associated with the System.Windows.Forms.DataGrid.
site: true to add the System.Windows.Forms.DataGridTableStyle objects to a container;
false to remove them.
"""
pass
def UnSelect(self,row):
"""
UnSelect(self: DataGrid,row: int)
Unselects a specified row.
row: The index of the row to deselect.
"""
pass
def UpdateBounds(self,*args):
"""
UpdateBounds(self: Control,x: int,y: int,width: int,height: int,clientWidth: int,clientHeight: int)
Updates the bounds of the control with the specified size,location,and client
size.
x: The System.Drawing.Point.X coordinate of the control.
y: The System.Drawing.Point.Y coordinate of the control.
width: The System.Drawing.Size.Width of the control.
height: The System.Drawing.Size.Height of the control.
clientWidth: The client System.Drawing.Size.Width of the control.
clientHeight: The client System.Drawing.Size.Height of the control.
UpdateBounds(self: Control,x: int,y: int,width: int,height: int)
Updates the bounds of the control with the specified size and location.
x: The System.Drawing.Point.X coordinate of the control.
y: The System.Drawing.Point.Y coordinate of the control.
width: The System.Drawing.Size.Width of the control.
height: The System.Drawing.Size.Height of the control.
UpdateBounds(self: Control)
Updates the bounds of the control with the current size and location.
"""
pass
def UpdateStyles(self,*args):
"""
UpdateStyles(self: Control)
Forces the assigned styles to be reapplied to the control.
"""
pass
def UpdateZOrder(self,*args):
"""
UpdateZOrder(self: Control)
Updates the control in its parent's z-order.
"""
pass
def WndProc(self,*args):
"""
WndProc(self: Control,m: Message) -> Message
Processes Windows messages.
m: The Windows System.Windows.Forms.Message to process.
"""
pass
def __enter__(self,*args):
""" __enter__(self: IDisposable) -> object """
pass
def __exit__(self,*args):
""" __exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object) """
pass
def __getitem__(self,*args):
""" x.__getitem__(y) <==> x[y]x.__getitem__(y) <==> x[y] """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __setitem__(self,*args):
""" x.__setitem__(i,y) <==> x[i]=x.__setitem__(i,y) <==> x[i]= """
pass
def __str__(self,*args):
pass
AllowNavigation=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets a value indicating whether navigation is allowed.
Get: AllowNavigation(self: DataGrid) -> bool
Set: AllowNavigation(self: DataGrid)=value
"""
AllowSorting=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets a value indicating whether the grid can be resorted by clicking on a column header.
Get: AllowSorting(self: DataGrid) -> bool
Set: AllowSorting(self: DataGrid)=value
"""
AlternatingBackColor=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the background color of odd-numbered rows of the grid.
Get: AlternatingBackColor(self: DataGrid) -> Color
Set: AlternatingBackColor(self: DataGrid)=value
"""
BackColor=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the background color of even-numbered rows of the grid.
Get: BackColor(self: DataGrid) -> Color
Set: BackColor(self: DataGrid)=value
"""
BackgroundColor=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the color of the non-row area of the grid.
Get: BackgroundColor(self: DataGrid) -> Color
Set: BackgroundColor(self: DataGrid)=value
"""
BackgroundImage=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""This member is not meaningful for this control.
Get: BackgroundImage(self: DataGrid) -> Image
Set: BackgroundImage(self: DataGrid)=value
"""
BackgroundImageLayout=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""This member is not meaningful for this control.
Get: BackgroundImageLayout(self: DataGrid) -> ImageLayout
Set: BackgroundImageLayout(self: DataGrid)=value
"""
BorderStyle=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the grid's border style.
Get: BorderStyle(self: DataGrid) -> BorderStyle
Set: BorderStyle(self: DataGrid)=value
"""
CanEnableIme=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets a value indicating whether the System.Windows.Forms.Control.ImeMode property can be set to an active value,to enable IME support.
"""
CanRaiseEvents=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Determines if events can be raised on the control.
"""
CaptionBackColor=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the background color of the caption area.
Get: CaptionBackColor(self: DataGrid) -> Color
Set: CaptionBackColor(self: DataGrid)=value
"""
CaptionFont=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the font of the grid's caption.
Get: CaptionFont(self: DataGrid) -> Font
Set: CaptionFont(self: DataGrid)=value
"""
CaptionForeColor=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the foreground color of the caption area.
Get: CaptionForeColor(self: DataGrid) -> Color
Set: CaptionForeColor(self: DataGrid)=value
"""
CaptionText=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the text of the grid's window caption.
Get: CaptionText(self: DataGrid) -> str
Set: CaptionText(self: DataGrid)=value
"""
CaptionVisible=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets a value that indicates whether the grid's caption is visible.
Get: CaptionVisible(self: DataGrid) -> bool
Set: CaptionVisible(self: DataGrid)=value
"""
ColumnHeadersVisible=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets a value indicating whether the column headers of a table are visible.
Get: ColumnHeadersVisible(self: DataGrid) -> bool
Set: ColumnHeadersVisible(self: DataGrid)=value
"""
CreateParams=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the required creation parameters when the control handle is created.
"""
CurrentCell=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets which cell has the focus. Not available at design time.
Get: CurrentCell(self: DataGrid) -> DataGridCell
Set: CurrentCell(self: DataGrid)=value
"""
CurrentRowIndex=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets index of the row that currently has focus.
Get: CurrentRowIndex(self: DataGrid) -> int
Set: CurrentRowIndex(self: DataGrid)=value
"""
Cursor=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""This member is not meaningful for this control.
Get: Cursor(self: DataGrid) -> Cursor
Set: Cursor(self: DataGrid)=value
"""
DataMember=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the specific list in a System.Windows.Forms.DataGrid.DataSource for which the System.Windows.Forms.DataGrid control displays a grid.
Get: DataMember(self: DataGrid) -> str
Set: DataMember(self: DataGrid)=value
"""
DataSource=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the data source that the grid is displaying data for.
Get: DataSource(self: DataGrid) -> object
Set: DataSource(self: DataGrid)=value
"""
DefaultCursor=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the default cursor for the control.
"""
DefaultImeMode=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the default Input Method Editor (IME) mode supported by the control.
"""
DefaultMargin=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the space,in pixels,that is specified by default between controls.
"""
DefaultMaximumSize=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the length and height,in pixels,that is specified as the default maximum size of a control.
"""
DefaultMinimumSize=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the length and height,in pixels,that is specified as the default minimum size of a control.
"""
DefaultPadding=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the internal spacing,in pixels,of the contents of a control.
"""
DefaultSize=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the default size of the control.
"""
DesignMode=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets a value that indicates whether the System.ComponentModel.Component is currently in design mode.
"""
DoubleBuffered=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets a value indicating whether this control should redraw its surface using a secondary buffer to reduce or prevent flicker.
"""
Events=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the list of event handlers that are attached to this System.ComponentModel.Component.
"""
FirstVisibleColumn=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the index of the first visible column in a grid.
Get: FirstVisibleColumn(self: DataGrid) -> int
"""
FlatMode=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets a value indicating whether the grid displays in flat mode.
Get: FlatMode(self: DataGrid) -> bool
Set: FlatMode(self: DataGrid)=value
"""
ForeColor=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the foreground color (typically the color of the text) property of the System.Windows.Forms.DataGrid control.
Get: ForeColor(self: DataGrid) -> Color
Set: ForeColor(self: DataGrid)=value
"""
GridLineColor=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the color of the grid lines.
Get: GridLineColor(self: DataGrid) -> Color
Set: GridLineColor(self: DataGrid)=value
"""
GridLineStyle=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the line style of the grid.
Get: GridLineStyle(self: DataGrid) -> DataGridLineStyle
Set: GridLineStyle(self: DataGrid)=value
"""
HeaderBackColor=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the background color of all row and column headers.
Get: HeaderBackColor(self: DataGrid) -> Color
Set: HeaderBackColor(self: DataGrid)=value
"""
HeaderFont=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the font used for column headers.
Get: HeaderFont(self: DataGrid) -> Font
Set: HeaderFont(self: DataGrid)=value
"""
HeaderForeColor=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the foreground color of headers.
Get: HeaderForeColor(self: DataGrid) -> Color
Set: HeaderForeColor(self: DataGrid)=value
"""
HorizScrollBar=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the horizontal scroll bar for the grid.
"""
ImeModeBase=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the IME mode of a control.
"""
LinkColor=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the color of the text that you can click to navigate to a child table.
Get: LinkColor(self: DataGrid) -> Color
Set: LinkColor(self: DataGrid)=value
"""
LinkHoverColor=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""This member is not meaningful for this control.
Get: LinkHoverColor(self: DataGrid) -> Color
Set: LinkHoverColor(self: DataGrid)=value
"""
ListManager=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the System.Windows.Forms.CurrencyManager for this System.Windows.Forms.DataGrid control.
"""
ParentRowsBackColor=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the background color of parent rows.
Get: ParentRowsBackColor(self: DataGrid) -> Color
Set: ParentRowsBackColor(self: DataGrid)=value
"""
ParentRowsForeColor=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the foreground color of parent rows.
Get: ParentRowsForeColor(self: DataGrid) -> Color
Set: ParentRowsForeColor(self: DataGrid)=value
"""
ParentRowsLabelStyle=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the way parent row labels are displayed.
Get: ParentRowsLabelStyle(self: DataGrid) -> DataGridParentRowsLabelStyle
Set: ParentRowsLabelStyle(self: DataGrid)=value
"""
ParentRowsVisible=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets a value indicating whether the parent rows of a table are visible.
Get: ParentRowsVisible(self: DataGrid) -> bool
Set: ParentRowsVisible(self: DataGrid)=value
"""
PreferredColumnWidth=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the default width of the grid columns in pixels.
Get: PreferredColumnWidth(self: DataGrid) -> int
Set: PreferredColumnWidth(self: DataGrid)=value
"""
PreferredRowHeight=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the preferred row height for the System.Windows.Forms.DataGrid control.
Get: PreferredRowHeight(self: DataGrid) -> int
Set: PreferredRowHeight(self: DataGrid)=value
"""
ReadOnly=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets a value indicating whether the grid is in read-only mode.
Get: ReadOnly(self: DataGrid) -> bool
Set: ReadOnly(self: DataGrid)=value
"""
RenderRightToLeft=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""This property is now obsolete.
"""
ResizeRedraw=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets a value indicating whether the control redraws itself when resized.
"""
RowHeadersVisible=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets a value that specifies whether row headers are visible.
Get: RowHeadersVisible(self: DataGrid) -> bool
Set: RowHeadersVisible(self: DataGrid)=value
"""
RowHeaderWidth=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the width of row headers.
Get: RowHeaderWidth(self: DataGrid) -> int
Set: RowHeaderWidth(self: DataGrid)=value
"""
ScaleChildren=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets a value that determines the scaling of child controls.
"""
SelectionBackColor=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the background color of selected rows.
Get: SelectionBackColor(self: DataGrid) -> Color
Set: SelectionBackColor(self: DataGrid)=value
"""
SelectionForeColor=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or set the foreground color of selected rows.
Get: SelectionForeColor(self: DataGrid) -> Color
Set: SelectionForeColor(self: DataGrid)=value
"""
ShowFocusCues=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets a value indicating whether the control should display focus rectangles.
"""
ShowKeyboardCues=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets a value indicating whether the user interface is in the appropriate state to show or hide keyboard accelerators.
"""
Site=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Site(self: DataGrid) -> ISite
Set: Site(self: DataGrid)=value
"""
TableStyles=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the collection of System.Windows.Forms.DataGridTableStyle objects for the grid.
Get: TableStyles(self: DataGrid) -> GridTableStylesCollection
"""
Text=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""This member is not meaningful for this control.
Get: Text(self: DataGrid) -> str
Set: Text(self: DataGrid)=value
"""
VertScrollBar=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the vertical scroll bar of the control.
"""
VisibleColumnCount=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the number of visible columns.
Get: VisibleColumnCount(self: DataGrid) -> int
"""
VisibleRowCount=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the number of rows visible.
Get: VisibleRowCount(self: DataGrid) -> int
"""
AllowNavigationChanged=None
BackButtonClick=None
BackgroundColorChanged=None
BackgroundImageChanged=None
BackgroundImageLayoutChanged=None
BorderStyleChanged=None
CaptionVisibleChanged=None
CurrentCellChanged=None
CursorChanged=None
DataSourceChanged=None
FlatModeChanged=None
HitTestInfo=None
HitTestType=None
Navigate=None
ParentRowsLabelStyleChanged=None
ParentRowsVisibleChanged=None
ReadOnlyChanged=None
Scroll=None
ShowParentDetailsButtonClick=None
TextChanged=None
| 33.086093 | 373 | 0.706278 |
4a1da43c9174e328f4efb1fe56a91cea4a44d016 | 8,455 | py | Python | social_auth/tests/odnoklassniki.py | Memrise/django-social-auth | ddfecb6f78f1dc53e66689264f1c95fc81b5d3be | [
"BSD-2-Clause",
"BSD-3-Clause"
] | 1 | 2018-06-11T17:35:10.000Z | 2018-06-11T17:35:10.000Z | social_auth/tests/odnoklassniki.py | Memrise/django-social-auth | ddfecb6f78f1dc53e66689264f1c95fc81b5d3be | [
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null | social_auth/tests/odnoklassniki.py | Memrise/django-social-auth | ddfecb6f78f1dc53e66689264f1c95fc81b5d3be | [
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null | # -*- coding:utf-8 -*-
from unittest import skipUnless
from django.conf import settings
from django.core.urlresolvers import reverse
from django.test.testcases import LiveServerTestCase, SimpleTestCase
from django.test.utils import override_settings
from selenium.webdriver.firefox.webdriver import WebDriver
from selenium.webdriver.support.ui import WebDriverWait
from social_auth.backends.contrib.odnoklassniki import odnoklassniki_oauth_sig
from social_auth.models import UserSocialAuth
import time
class SignatureTest(SimpleTestCase):
def test_oauth_signature(self):
data = {'access_token': 'cq240efje3pd0gdXUmrvvMaHyb-74XQi8',
'application_key': 'CBAJLNABABABABABA',
'method': 'users.getCurrentUser',
'format': 'JSON'}
secret = '31D6095131175A7C9656EC2C'
signature = '755fe7af274abbe545916039eb428c98'
self.assertEqual(odnoklassniki_oauth_sig(data, secret), signature)
class OdnoklassnikiLiveTest(LiveServerTestCase):
@classmethod
def setUpClass(cls):
cls.selenium = WebDriver()
super(OdnoklassnikiLiveTest, cls).setUpClass()
@classmethod
def tearDownClass(cls):
super(OdnoklassnikiLiveTest, cls).tearDownClass()
cls.selenium.quit()
def get_odnoklassniki_name(self):
raise NotImplementedError('This method is part of interface, but should be implemented in subclass')
class BaseOdnoklassnikiAppTest(OdnoklassnikiLiveTest):
@skipUnless(hasattr(settings, 'ODNOKLASSNIKI_APP_ID'),
"You need to have ODNOKLASSNIKI_APP_ID in settings to test iframe app")
@skipUnless(hasattr(settings, 'ODNOKLASSNIKI_SANDBOX_DEV_USERNAME'),
"You need to have ODNOKLASSNIKI_SANDBOX_DEV_USERNAME in settings to test iframe app")
@skipUnless(hasattr(settings, 'ODNOKLASSNIKI_SANDBOX_DEV_PASSWORD'),
"You need to have ODNOKLASSNIKI_SANDBOX_DEV_PASSWORD in settings to test iframe app")
def setUp(self):
self.app_id = settings.ODNOKLASSNIKI_APP_ID
self.dev_username = settings.ODNOKLASSNIKI_SANDBOX_DEV_USERNAME
self.dev_password = settings.ODNOKLASSNIKI_SANDBOX_DEV_PASSWORD
self.get_odnoklassniki_name()
def sandbox_login(self):
WebDriverWait(self.selenium, 3).until(lambda ff: ff.find_element_by_name('j_username'))
dev_username_input = self.selenium.find_element_by_name('j_username')
dev_username_input.send_keys(self.dev_username)
dev_password_input = self.selenium.find_element_by_name('j_password')
dev_password_input.send_keys(self.dev_password)
self.selenium.find_element_by_name('actionId').click()
def sandbox_logout(self):
self.selenium.get('http://api-sandbox.odnoklassniki.ru:8088/sandbox/logout.do')
WebDriverWait(self.selenium, 3).until(lambda ff: ff.find_element_by_name('j_username'))
def get_odnoklassniki_name(self):
self.selenium.get('http://api-sandbox.odnoklassniki.ru:8088/sandbox/protected/main.do')
self.sandbox_login()
WebDriverWait(self.selenium, 3).until(lambda ff: ff.find_element_by_tag_name('fieldset'))
self.odnoklassniki_name = self.selenium.find_element_by_xpath('//*[@id="command"]/fieldset/table/tbody/tr[2]/td[2]').text
self.sandbox_logout()
def login_into_sandbox(self):
self.selenium.get('http://api-sandbox.odnoklassniki.ru:8088/sandbox/protected/application/launch.do?appId={0:s}&userId=0'.format(self.app_id))
self.sandbox_login()
WebDriverWait(self.selenium, 3).until(lambda ff: ff.find_element_by_tag_name('iframe'))
time.sleep(1)
class OdnoklassnikiAppTest(BaseOdnoklassnikiAppTest):
def test_auth(self):
self.login_into_sandbox()
self.assertEqual(UserSocialAuth.objects.count(), 1)
social_auth = UserSocialAuth.objects.get()
user = social_auth.user
full_name = '{0} {1}'.format(user.first_name, user.last_name)
self.assertEqual(full_name, self.odnoklassniki_name)
self.assertTrue('apiconnection' in social_auth.extra_data)
self.assertTrue('api_server' in social_auth.extra_data)
class OdnoklassnikiAppTestExtraData(BaseOdnoklassnikiAppTest):
@override_settings(ODNOKLASSNIKI_APP_EXTRA_USER_DATA_LIST = ('gender', 'birthday', 'age'))
def test_extra_data(self):
self.login_into_sandbox()
self.assertEqual(UserSocialAuth.objects.count(), 1)
social_user = UserSocialAuth.objects.get()
user = social_user.user
full_name = '{0} {1}'.format(user.first_name, user.last_name)
self.assertEqual(full_name, self.odnoklassniki_name)
self.assertTrue(all([field in social_user.extra_data for field in ('gender', 'birthday', 'age')]))
class OdnoklassnikiOAuthTest(OdnoklassnikiLiveTest):
@skipUnless(hasattr(settings, "ODNOKLASSNIKI_OAUTH2_CLIENT_KEY"),
"You need to have ODNOKLASSNIKI_OAUTH2_CLIENT_KEY in settings to test odnoklassniki OAuth")
@skipUnless(hasattr(settings, "ODNOKLASSNIKI_TEST_USERNAME"),
"You need to have ODNOKLASSNIKI_TEST_USERNAME in settings to test odnoklassniki OAuth")
@skipUnless(hasattr(settings, "ODNOKLASSNIKI_TEST_PASSWORD"),
"You need to have ODNOKLASSNIKI_TEST_PASSWORD in settings to test odnoklassniki OAuth")
def setUp(self):
self.username = settings.ODNOKLASSNIKI_TEST_USERNAME
self.password = settings.ODNOKLASSNIKI_TEST_PASSWORD
self.get_odnoklassniki_name()
def get_odnoklassniki_name(self):
#Load login page
self.selenium.get('http://www.odnoklassniki.ru/')
WebDriverWait(self.selenium, 3).until(lambda ff: ff.find_element_by_id('field_email'))
email_input = self.selenium.find_element_by_id('field_email')
email_input.send_keys(self.username)
pw_input = self.selenium.find_element_by_id('field_password')
pw_input.send_keys(self.password)
self.selenium.find_element_by_id('hook_FormButton_button_go').click()
#Submit form, wait for successful login
name_css_sel = '#hook_Block_MiddleColumnTopCardUser .mctc_name>a.mctc_nameLink'
WebDriverWait(self.selenium, 2).until(lambda ff: ff.find_element_by_css_selector(name_css_sel))
self.odnoklassniki_name = self.selenium.find_element_by_css_selector(name_css_sel).text
#Remember the name of logged user
link = [el for el in self.selenium.find_elements_by_css_selector('.portal-headline__login__link') if el.text == 'выход']
self.assertTrue(len(link) == 1)
link[0].click()
#Click on logout link to show logout popup
WebDriverWait(self.selenium, 2).until(lambda ff: ff.find_element_by_id('hook_Form_PopLayerLogoffUserForm') and ff.find_element_by_id('hook_Form_PopLayerLogoffUserForm').is_displayed())
self.selenium.find_element_by_css_selector('#hook_FormButton_button_logoff').click()
#Click logout popup and wait for the login form be shown
WebDriverWait(self.selenium, 2).until(lambda ff: ff.find_element_by_id('field_email'))
def login_into_odnoklassniki(self):
url = reverse('socialauth_begin', args=('odnoklassniki',))
self.selenium.get('{0:s}{1:s}'.format(self.live_server_url, url))
WebDriverWait(self.selenium, 2).until(lambda ff: ff.find_element_by_id('field_email'))
email_input = self.selenium.find_element_by_id('field_email')
pw_input = self.selenium.find_element_by_id('field_password')
email_input.send_keys(self.username)
pw_input.send_keys(self.password)
self.selenium.find_element_by_name('button_continue').click()
WebDriverWait(self.selenium, 2).until(lambda ff: ff.find_element_by_name('button_accept_request'))
self.selenium.find_element_by_name('button_accept_request').click()
self.selenium.implicitly_wait(2)
time.sleep(1)#We need this for the server to close database connection
#If this line is removed, following line will fail
def test_auth(self):
self.login_into_odnoklassniki()
self.assertEqual(UserSocialAuth.objects.count(), 1)
user = UserSocialAuth.objects.get().user
full_name = '{0} {1}'.format(user.first_name, user.last_name)
self.assertEqual(full_name, self.odnoklassniki_name)
| 54.548387 | 192 | 0.723714 |
4a1da5dc1b36a29eb0ff025cb0c08e8d9054960a | 14,970 | py | Python | ixnetwork_restpy/testplatform/sessions/ixnetwork/topology/configmdlevelsparams_1bf3d8514855f50e409c0aef7ac6bf1e.py | Vibaswan/ixnetwork_restpy | 239fedc7050890746cbabd71ea1e91c68d9e5cad | [
"MIT"
] | null | null | null | ixnetwork_restpy/testplatform/sessions/ixnetwork/topology/configmdlevelsparams_1bf3d8514855f50e409c0aef7ac6bf1e.py | Vibaswan/ixnetwork_restpy | 239fedc7050890746cbabd71ea1e91c68d9e5cad | [
"MIT"
] | null | null | null | ixnetwork_restpy/testplatform/sessions/ixnetwork/topology/configmdlevelsparams_1bf3d8514855f50e409c0aef7ac6bf1e.py | Vibaswan/ixnetwork_restpy | 239fedc7050890746cbabd71ea1e91c68d9e5cad | [
"MIT"
] | null | null | null | # MIT LICENSE
#
# Copyright 1997 - 2020 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
class ConfigMDLevelsParams(Base):
"""Import IPv6 routes from standard route file. Supported format - Cisco IOS, Juniper JUNOS, Classis Ixia (.csv) and standard CSV.
The ConfigMDLevelsParams class encapsulates a required configMDLevelsParams resource which will be retrieved from the server every time the property is accessed.
"""
__slots__ = ()
_SDM_NAME = 'configMDLevelsParams'
_SDM_ATT_MAP = {
'MdLevel1': 'mdLevel1',
'MdLevel2': 'mdLevel2',
'MdLevel3': 'mdLevel3',
'MdLevel4': 'mdLevel4',
'MdLevel5': 'mdLevel5',
'MdLevel6': 'mdLevel6',
'MdLevel7': 'mdLevel7',
'MdLevel8': 'mdLevel8',
'MdName1': 'mdName1',
'MdName2': 'mdName2',
'MdName3': 'mdName3',
'MdName4': 'mdName4',
'MdName5': 'mdName5',
'MdName6': 'mdName6',
'MdName7': 'mdName7',
'MdName8': 'mdName8',
'MdNameFormat1': 'mdNameFormat1',
'MdNameFormat2': 'mdNameFormat2',
'MdNameFormat3': 'mdNameFormat3',
'MdNameFormat4': 'mdNameFormat4',
'MdNameFormat5': 'mdNameFormat5',
'MdNameFormat6': 'mdNameFormat6',
'MdNameFormat7': 'mdNameFormat7',
'MdNameFormat8': 'mdNameFormat8',
'NumMDLevels': 'numMDLevels',
}
def __init__(self, parent):
super(ConfigMDLevelsParams, self).__init__(parent)
@property
def MdLevel1(self):
"""
Returns
-------
- number: Text
"""
return self._get_attribute(self._SDM_ATT_MAP['MdLevel1'])
@MdLevel1.setter
def MdLevel1(self, value):
self._set_attribute(self._SDM_ATT_MAP['MdLevel1'], value)
@property
def MdLevel2(self):
"""
Returns
-------
- number: Text
"""
return self._get_attribute(self._SDM_ATT_MAP['MdLevel2'])
@MdLevel2.setter
def MdLevel2(self, value):
self._set_attribute(self._SDM_ATT_MAP['MdLevel2'], value)
@property
def MdLevel3(self):
"""
Returns
-------
- number: Text
"""
return self._get_attribute(self._SDM_ATT_MAP['MdLevel3'])
@MdLevel3.setter
def MdLevel3(self, value):
self._set_attribute(self._SDM_ATT_MAP['MdLevel3'], value)
@property
def MdLevel4(self):
"""
Returns
-------
- number: Text
"""
return self._get_attribute(self._SDM_ATT_MAP['MdLevel4'])
@MdLevel4.setter
def MdLevel4(self, value):
self._set_attribute(self._SDM_ATT_MAP['MdLevel4'], value)
@property
def MdLevel5(self):
"""
Returns
-------
- number: Text
"""
return self._get_attribute(self._SDM_ATT_MAP['MdLevel5'])
@MdLevel5.setter
def MdLevel5(self, value):
self._set_attribute(self._SDM_ATT_MAP['MdLevel5'], value)
@property
def MdLevel6(self):
"""
Returns
-------
- number: Text
"""
return self._get_attribute(self._SDM_ATT_MAP['MdLevel6'])
@MdLevel6.setter
def MdLevel6(self, value):
self._set_attribute(self._SDM_ATT_MAP['MdLevel6'], value)
@property
def MdLevel7(self):
"""
Returns
-------
- number: Text
"""
return self._get_attribute(self._SDM_ATT_MAP['MdLevel7'])
@MdLevel7.setter
def MdLevel7(self, value):
self._set_attribute(self._SDM_ATT_MAP['MdLevel7'], value)
@property
def MdLevel8(self):
"""
Returns
-------
- number: Text
"""
return self._get_attribute(self._SDM_ATT_MAP['MdLevel8'])
@MdLevel8.setter
def MdLevel8(self, value):
self._set_attribute(self._SDM_ATT_MAP['MdLevel8'], value)
@property
def MdName1(self):
"""
Returns
-------
- str: Network Address Step Value.
"""
return self._get_attribute(self._SDM_ATT_MAP['MdName1'])
@MdName1.setter
def MdName1(self, value):
self._set_attribute(self._SDM_ATT_MAP['MdName1'], value)
@property
def MdName2(self):
"""
Returns
-------
- str: Network Address Step Value.
"""
return self._get_attribute(self._SDM_ATT_MAP['MdName2'])
@MdName2.setter
def MdName2(self, value):
self._set_attribute(self._SDM_ATT_MAP['MdName2'], value)
@property
def MdName3(self):
"""
Returns
-------
- str: Network Address Step Value.
"""
return self._get_attribute(self._SDM_ATT_MAP['MdName3'])
@MdName3.setter
def MdName3(self, value):
self._set_attribute(self._SDM_ATT_MAP['MdName3'], value)
@property
def MdName4(self):
"""
Returns
-------
- str: Network Address Step Value.
"""
return self._get_attribute(self._SDM_ATT_MAP['MdName4'])
@MdName4.setter
def MdName4(self, value):
self._set_attribute(self._SDM_ATT_MAP['MdName4'], value)
@property
def MdName5(self):
"""
Returns
-------
- str: Network Address Step Value.
"""
return self._get_attribute(self._SDM_ATT_MAP['MdName5'])
@MdName5.setter
def MdName5(self, value):
self._set_attribute(self._SDM_ATT_MAP['MdName5'], value)
@property
def MdName6(self):
"""
Returns
-------
- str: Network Address Step Value.
"""
return self._get_attribute(self._SDM_ATT_MAP['MdName6'])
@MdName6.setter
def MdName6(self, value):
self._set_attribute(self._SDM_ATT_MAP['MdName6'], value)
@property
def MdName7(self):
"""
Returns
-------
- str: Network Address Step Value.
"""
return self._get_attribute(self._SDM_ATT_MAP['MdName7'])
@MdName7.setter
def MdName7(self, value):
self._set_attribute(self._SDM_ATT_MAP['MdName7'], value)
@property
def MdName8(self):
"""
Returns
-------
- str: Network Address Step Value.
"""
return self._get_attribute(self._SDM_ATT_MAP['MdName8'])
@MdName8.setter
def MdName8(self, value):
self._set_attribute(self._SDM_ATT_MAP['MdName8'], value)
@property
def MdNameFormat1(self):
"""
Returns
-------
- str(mdNameFormatNoMaintenanceDomainName | mdNameFormatDomainNameBasedStr | mdNameFormatMacPlusTwoOctetInt | mdNameFormatCharacterStr): Text
"""
return self._get_attribute(self._SDM_ATT_MAP['MdNameFormat1'])
@MdNameFormat1.setter
def MdNameFormat1(self, value):
self._set_attribute(self._SDM_ATT_MAP['MdNameFormat1'], value)
@property
def MdNameFormat2(self):
"""
Returns
-------
- str(mdNameFormatNoMaintenanceDomainName | mdNameFormatDomainNameBasedStr | mdNameFormatMacPlusTwoOctetInt | mdNameFormatCharacterStr): Text
"""
return self._get_attribute(self._SDM_ATT_MAP['MdNameFormat2'])
@MdNameFormat2.setter
def MdNameFormat2(self, value):
self._set_attribute(self._SDM_ATT_MAP['MdNameFormat2'], value)
@property
def MdNameFormat3(self):
"""
Returns
-------
- str(mdNameFormatNoMaintenanceDomainName | mdNameFormatDomainNameBasedStr | mdNameFormatMacPlusTwoOctetInt | mdNameFormatCharacterStr): Text
"""
return self._get_attribute(self._SDM_ATT_MAP['MdNameFormat3'])
@MdNameFormat3.setter
def MdNameFormat3(self, value):
self._set_attribute(self._SDM_ATT_MAP['MdNameFormat3'], value)
@property
def MdNameFormat4(self):
"""
Returns
-------
- str(mdNameFormatNoMaintenanceDomainName | mdNameFormatDomainNameBasedStr | mdNameFormatMacPlusTwoOctetInt | mdNameFormatCharacterStr): Text
"""
return self._get_attribute(self._SDM_ATT_MAP['MdNameFormat4'])
@MdNameFormat4.setter
def MdNameFormat4(self, value):
self._set_attribute(self._SDM_ATT_MAP['MdNameFormat4'], value)
@property
def MdNameFormat5(self):
"""
Returns
-------
- str(mdNameFormatNoMaintenanceDomainName | mdNameFormatDomainNameBasedStr | mdNameFormatMacPlusTwoOctetInt | mdNameFormatCharacterStr): Text
"""
return self._get_attribute(self._SDM_ATT_MAP['MdNameFormat5'])
@MdNameFormat5.setter
def MdNameFormat5(self, value):
self._set_attribute(self._SDM_ATT_MAP['MdNameFormat5'], value)
@property
def MdNameFormat6(self):
"""
Returns
-------
- str(mdNameFormatNoMaintenanceDomainName | mdNameFormatDomainNameBasedStr | mdNameFormatMacPlusTwoOctetInt | mdNameFormatCharacterStr): Text
"""
return self._get_attribute(self._SDM_ATT_MAP['MdNameFormat6'])
@MdNameFormat6.setter
def MdNameFormat6(self, value):
self._set_attribute(self._SDM_ATT_MAP['MdNameFormat6'], value)
@property
def MdNameFormat7(self):
"""
Returns
-------
- str(mdNameFormatNoMaintenanceDomainName | mdNameFormatDomainNameBasedStr | mdNameFormatMacPlusTwoOctetInt | mdNameFormatCharacterStr): Text
"""
return self._get_attribute(self._SDM_ATT_MAP['MdNameFormat7'])
@MdNameFormat7.setter
def MdNameFormat7(self, value):
self._set_attribute(self._SDM_ATT_MAP['MdNameFormat7'], value)
@property
def MdNameFormat8(self):
"""
Returns
-------
- str(mdNameFormatNoMaintenanceDomainName | mdNameFormatDomainNameBasedStr | mdNameFormatMacPlusTwoOctetInt | mdNameFormatCharacterStr): Text
"""
return self._get_attribute(self._SDM_ATT_MAP['MdNameFormat8'])
@MdNameFormat8.setter
def MdNameFormat8(self, value):
self._set_attribute(self._SDM_ATT_MAP['MdNameFormat8'], value)
@property
def NumMDLevels(self):
"""
Returns
-------
- number: Import only the best routes (provided route file has this information).
"""
return self._get_attribute(self._SDM_ATT_MAP['NumMDLevels'])
@NumMDLevels.setter
def NumMDLevels(self, value):
self._set_attribute(self._SDM_ATT_MAP['NumMDLevels'], value)
def update(self, MdLevel1=None, MdLevel2=None, MdLevel3=None, MdLevel4=None, MdLevel5=None, MdLevel6=None, MdLevel7=None, MdLevel8=None, MdName1=None, MdName2=None, MdName3=None, MdName4=None, MdName5=None, MdName6=None, MdName7=None, MdName8=None, MdNameFormat1=None, MdNameFormat2=None, MdNameFormat3=None, MdNameFormat4=None, MdNameFormat5=None, MdNameFormat6=None, MdNameFormat7=None, MdNameFormat8=None, NumMDLevels=None):
"""Updates configMDLevelsParams resource on the server.
Args
----
- MdLevel1 (number): Text
- MdLevel2 (number): Text
- MdLevel3 (number): Text
- MdLevel4 (number): Text
- MdLevel5 (number): Text
- MdLevel6 (number): Text
- MdLevel7 (number): Text
- MdLevel8 (number): Text
- MdName1 (str): Network Address Step Value.
- MdName2 (str): Network Address Step Value.
- MdName3 (str): Network Address Step Value.
- MdName4 (str): Network Address Step Value.
- MdName5 (str): Network Address Step Value.
- MdName6 (str): Network Address Step Value.
- MdName7 (str): Network Address Step Value.
- MdName8 (str): Network Address Step Value.
- MdNameFormat1 (str(mdNameFormatNoMaintenanceDomainName | mdNameFormatDomainNameBasedStr | mdNameFormatMacPlusTwoOctetInt | mdNameFormatCharacterStr)): Text
- MdNameFormat2 (str(mdNameFormatNoMaintenanceDomainName | mdNameFormatDomainNameBasedStr | mdNameFormatMacPlusTwoOctetInt | mdNameFormatCharacterStr)): Text
- MdNameFormat3 (str(mdNameFormatNoMaintenanceDomainName | mdNameFormatDomainNameBasedStr | mdNameFormatMacPlusTwoOctetInt | mdNameFormatCharacterStr)): Text
- MdNameFormat4 (str(mdNameFormatNoMaintenanceDomainName | mdNameFormatDomainNameBasedStr | mdNameFormatMacPlusTwoOctetInt | mdNameFormatCharacterStr)): Text
- MdNameFormat5 (str(mdNameFormatNoMaintenanceDomainName | mdNameFormatDomainNameBasedStr | mdNameFormatMacPlusTwoOctetInt | mdNameFormatCharacterStr)): Text
- MdNameFormat6 (str(mdNameFormatNoMaintenanceDomainName | mdNameFormatDomainNameBasedStr | mdNameFormatMacPlusTwoOctetInt | mdNameFormatCharacterStr)): Text
- MdNameFormat7 (str(mdNameFormatNoMaintenanceDomainName | mdNameFormatDomainNameBasedStr | mdNameFormatMacPlusTwoOctetInt | mdNameFormatCharacterStr)): Text
- MdNameFormat8 (str(mdNameFormatNoMaintenanceDomainName | mdNameFormatDomainNameBasedStr | mdNameFormatMacPlusTwoOctetInt | mdNameFormatCharacterStr)): Text
- NumMDLevels (number): Import only the best routes (provided route file has this information).
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._update(self._map_locals(self._SDM_ATT_MAP, locals()))
def ConfigMDLevels(self):
"""Executes the configMDLevels operation on the server.
Import IPv6 routes from standard route file. Supported format - Cisco IOS, Juniper JUNOS, Classis Ixia (.csv) and standard CSV.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
return self._execute('configMDLevels', payload=payload, response_object=None)
| 36.246973 | 431 | 0.654041 |
4a1da61fd1b57fc4cd34addeecd92efebffe4e4d | 9,681 | py | Python | apps/files/tasks.py | spasovski/zamboni | c7f4714029e3b2dc918ddfc2103f8e051193c14d | [
"BSD-3-Clause"
] | 1 | 2021-07-29T00:51:09.000Z | 2021-07-29T00:51:09.000Z | apps/files/tasks.py | imclab/olympia | 35bc9c484e384bafab520ca8b5d5b0f8da5b62c0 | [
"BSD-3-Clause"
] | null | null | null | apps/files/tasks.py | imclab/olympia | 35bc9c484e384bafab520ca8b5d5b0f8da5b62c0 | [
"BSD-3-Clause"
] | null | null | null | import hashlib
import logging
import os
import urllib
import urllib2
import urlparse
import uuid
from datetime import datetime
import django.core.mail
from django.conf import settings
from django.core.files.storage import default_storage as storage
from django.db import transaction
import jingo
from celeryutils import task
from tower import ugettext as _
import amo
from amo.decorators import write
from amo.helpers import absolutify
from amo.urlresolvers import reverse
from amo.utils import get_email_backend, Message
from addons.models import Addon
from versions.compare import version_int as vint
from versions.models import ApplicationsVersions, Version
from .models import File
from .utils import JetpackUpgrader, parse_addon
task_log = logging.getLogger('z.task')
jp_log = logging.getLogger('z.jp.repack')
@task
def extract_file(viewer, **kw):
# This message is for end users so they'll see a nice error.
msg = Message('file-viewer:%s' % viewer)
msg.delete()
# This flag is so that we can signal when the extraction is completed.
flag = Message(viewer._extraction_cache_key())
task_log.debug('[1@%s] Unzipping %s for file viewer.' % (
extract_file.rate_limit, viewer))
try:
flag.save('extracting') # Set the flag to a truthy value.
viewer.extract()
except Exception, err:
if settings.DEBUG:
msg.save(_('There was an error accessing file %s. %s.') %
(viewer, err))
else:
msg.save(_('There was an error accessing file %s.') % viewer)
task_log.error('[1@%s] Error unzipping: %s' %
(extract_file.rate_limit, err))
finally:
# Always delete the flag so the file never gets into a bad state.
flag.delete()
# The version/file creation methods expect a files.FileUpload object.
class FakeUpload(object):
def __init__(self, path, hash, validation):
self.path = path
self.name = os.path.basename(path)
self.hash = hash
self.validation = validation
class RedisLogHandler(logging.Handler):
"""Logging handler that sends jetpack messages to redis."""
def __init__(self, logger, upgrader, file_data, level=logging.WARNING):
self.logger = logger
self.upgrader = upgrader
self.file_data = file_data
logging.Handler.__init__(self, level)
def emit(self, record):
self.file_data['status'] = 'failed'
self.file_data['msg'] = record.msg
if 'file' in self.file_data:
self.upgrader.file(self.file_data['file'], self.file_data)
self.logger.removeHandler(self)
@task
@write
@transaction.commit_on_success
def repackage_jetpack(builder_data, **kw):
repack_data = dict(urlparse.parse_qsl(builder_data['request']))
jp_log.info('[1@None] Repackaging jetpack for %s.'
% repack_data['file_id'])
jp_log.info('; '.join('%s: "%s"' % i for i in builder_data.items()))
all_keys = builder_data.copy()
all_keys.update(repack_data)
msg = lambda s: ('[{file_id}]: ' + s).format(**all_keys)
upgrader = JetpackUpgrader()
file_data = upgrader.file(repack_data['file_id'])
redis_logger = RedisLogHandler(jp_log, upgrader, file_data)
jp_log.addHandler(redis_logger)
if file_data.get('uuid') != repack_data['uuid']:
_msg = ('Aborting repack. AMO<=>Builder tracking number mismatch '
'(%s) (%s)' % (file_data.get('uuid'), repack_data['uuid']))
return jp_log.warning(msg(_msg))
if builder_data['result'] != 'success':
return jp_log.warning(msg('Build not successful. {result}: {msg}'))
try:
addon = Addon.objects.get(id=repack_data['addon'])
old_file = File.objects.get(id=repack_data['file_id'])
old_version = old_file.version
except Exception:
jp_log.error(msg('Could not find addon or file.'), exc_info=True)
raise
# Fetch the file from builder.amo.
try:
filepath, headers = urllib.urlretrieve(builder_data['location'])
except Exception:
jp_log.error(msg('Could not retrieve {location}.'), exc_info=True)
raise
# Figure out the SHA256 hash of the file.
try:
hash_ = hashlib.sha256()
with storage.open(filepath, 'rb') as fd:
while True:
chunk = fd.read(8192)
if not chunk:
break
hash_.update(chunk)
except Exception:
jp_log.error(msg('Error hashing file.'), exc_info=True)
raise
upload = FakeUpload(path=filepath, hash='sha256:%s' % hash_.hexdigest(),
validation=None)
try:
version = parse_addon(upload, addon)['version']
if addon.versions.filter(version=version).exists():
jp_log.warning('Duplicate version [%s] for %r detected. Bailing.'
% (version, addon))
return
except Exception:
pass
# TODO: multi-file: have we already created the new version for a different
# file?
try:
new_version = Version.from_upload(upload, addon, [old_file.platform],
send_signal=False)
except Exception:
jp_log.error(msg('Error creating new version.'))
raise
try:
# Sync the compatible apps of the new version with data from the old
# version if the repack didn't specify compat info.
for app in old_version.apps.values():
sync_app = amo.APP_IDS[app['application_id']]
new_compat = new_version.compatible_apps
if sync_app not in new_compat:
app.update(version_id=new_version.id, id=None)
ApplicationsVersions.objects.create(**app)
else:
new_compat[sync_app].min_id = app['min_id']
new_compat[sync_app].max_id = app['max_id']
new_compat[sync_app].save()
except Exception:
jp_log.error(msg('Error syncing compat info. [%s] => [%s]' %
(old_version.id, new_version.id)), exc_info=True)
pass # Skip this for now, we can fix up later.
try:
# Sync the status of the new file.
new_file = new_version.files.using('default')[0]
new_file.status = old_file.status
new_file.save()
if (addon.status in amo.MIRROR_STATUSES
and new_file.status in amo.MIRROR_STATUSES):
new_file.copy_to_mirror()
except Exception:
jp_log.error(msg('Error syncing old file status.'), exc_info=True)
raise
# Sync out the new version.
addon.update_version()
upgrader.finish(repack_data['file_id'])
jp_log.info('Repacked %r from %r for %r.' %
(new_version, old_version, addon))
jp_log.removeHandler(redis_logger)
try:
send_upgrade_email(addon, new_version, file_data['version'])
except Exception:
jp_log.error(msg('Could not send success email.'), exc_info=True)
raise
# Return the new file to make testing easier.
return new_file
def send_upgrade_email(addon, new_version, sdk_version):
cxn = get_email_backend()
subject = u'%s updated to SDK version %s' % (addon.name, sdk_version)
from_ = settings.DEFAULT_FROM_EMAIL
to = set(addon.authors.values_list('email', flat=True))
t = jingo.env.get_template('files/jetpack_upgraded.txt')
msg = t.render(addon=addon, new_version=new_version,
sdk_version=sdk_version)
django.core.mail.send_mail(subject, msg, from_, to, connection=cxn)
@task
def start_upgrade(file_ids, sdk_version=None, priority='low', **kw):
upgrader = JetpackUpgrader()
minver, maxver = upgrader.jetpack_versions()
files = File.objects.filter(id__in=file_ids).select_related('version')
now = datetime.now()
filedata = {}
for file_ in files:
if not (file_.jetpack_version and
vint(minver) <= vint(file_.jetpack_version) < vint(maxver)):
continue
jp_log.info('Sending %s to builder for jetpack version %s.'
% (file_.id, maxver))
# Data stored locally so we can figure out job details and if it should
# be cancelled.
data = {'file': file_.id,
'version': maxver,
'time': now,
'uuid': uuid.uuid4().hex,
'status': 'Sent to builder',
'owner': 'bulk'}
# Data POSTed to the builder.
post = {'addon': file_.version.addon_id,
'file_id': file_.id,
'priority': priority,
'secret': settings.BUILDER_SECRET_KEY,
'uuid': data['uuid'],
'pingback': absolutify(reverse('amo.builder-pingback'))}
if file_.builder_version:
post['package_key'] = file_.builder_version
else:
# Older jetpacks might not have builderVersion in their harness.
post['location'] = file_.get_url_path('builder')
if sdk_version:
post['sdk_version'] = sdk_version
try:
jp_log.info(urllib.urlencode(post))
response = urllib2.urlopen(settings.BUILDER_UPGRADE_URL,
urllib.urlencode(post))
jp_log.info('Response from builder for %s: [%s] %s' %
(file_.id, response.code, response.read()))
except Exception:
jp_log.error('Could not talk to builder for %s.' % file_.id,
exc_info=True)
filedata[file_.id] = data
upgrader.files(filedata)
| 36.532075 | 79 | 0.622663 |
4a1da6f77351c45a4596cf09893fe6bf21db91d4 | 1,659 | py | Python | apyec/logs/parse_log.py | HazenBabcock/APyEC | 2377e4e6ae242ffc324487112541f1a2cf8b499c | [
"MIT"
] | 3 | 2015-09-30T14:08:02.000Z | 2021-02-18T02:49:01.000Z | apyec/logs/parse_log.py | HazenBabcock/APyEC | 2377e4e6ae242ffc324487112541f1a2cf8b499c | [
"MIT"
] | 4 | 2015-10-01T17:24:36.000Z | 2016-02-18T16:27:25.000Z | apyec/logs/parse_log.py | HazenBabcock/APyEC | 2377e4e6ae242ffc324487112541f1a2cf8b499c | [
"MIT"
] | null | null | null | #!/usr/bin/env python
#
## @file
#
# This parses log file series (i.e. log, log.1, log.2, etc..) to
# make it easier to see which functions / methods call which other
# functions / methods. It is used mostly to make sure that what
# we expect to happen is actually happening, and to try and
# identify unnecessary function calls.
#
# Hazen 7/15.
#
from datetime import datetime
import os
import sys
if (len(sys.argv) != 2):
print "usage: <log file>"
exit()
pattern = '%Y-%m-%d %H:%M:%S,%f'
def parseCommand(command):
return command.split(" ")[0]
command_timing = {}
indent = 0
start_time = None
for ext in [".5", ".4", ".3", ".2", ".1", ""]:
if not os.path.exists(sys.argv[1] + ".out" + ext):
continue
with open(sys.argv[1] + ".out" + ext) as fp:
for line in fp:
try:
[time, command] = map(lambda x: x.strip(), line.split(":apyec:INFO:"))
except ValueError:
continue
if start_time is None:
elapsed = "{0:6.2f}".format(0.0)
start_time = datetime.strptime(time, pattern)
else:
cur_time = datetime.strptime(time, pattern)
elapsed = "{0:6.2f}".format((cur_time - start_time).total_seconds())
# Command start.
if (" started" in line):
print elapsed, " " * indent, command
indent += 2
# Command end.
if (" ended" in line):
indent -= 2
if (indent < 0):
indent = 0
print elapsed, " " * indent, command
| 26.333333 | 86 | 0.521398 |
4a1da7cdf7546536f339b7e4d30aac99e980b76b | 76,054 | py | Python | vobject/icalendar.py | karalan/google-tasks-porter | 58754f4ee5d478a780bc316bbeea5a5a82e1f6f7 | [
"Apache-2.0"
] | 1 | 2015-08-29T18:40:43.000Z | 2015-08-29T18:40:43.000Z | vobject/icalendar.py | karalan/google-tasks-porter | 58754f4ee5d478a780bc316bbeea5a5a82e1f6f7 | [
"Apache-2.0"
] | 4 | 2021-03-19T15:38:56.000Z | 2021-09-08T02:47:16.000Z | vendor-local/lib/python/vobject/icalendar.py | Acidburn0zzz/airmozilla | 7b03af6d6efe9af00a6070f5327e10fb755c3766 | [
"BSD-3-Clause"
] | 1 | 2019-11-02T23:29:13.000Z | 2019-11-02T23:29:13.000Z | """Definitions and behavior for iCalendar, also known as vCalendar 2.0"""
import string
import behavior
import dateutil.rrule
import dateutil.tz
import StringIO, cStringIO
import datetime
import socket, random #for generating a UID
import itertools
from base import (VObjectError, NativeError, ValidateError, ParseError,
VBase, Component, ContentLine, logger, defaultSerialize,
registerBehavior, backslashEscape, foldOneLine,
newFromBehavior, CRLF, LF, ascii)
#------------------------------- Constants -------------------------------------
DATENAMES = ("rdate", "exdate")
RULENAMES = ("exrule", "rrule")
DATESANDRULES = ("exrule", "rrule", "rdate", "exdate")
PRODID = u"-//PYVOBJECT//NONSGML Version 1//EN"
WEEKDAYS = "MO", "TU", "WE", "TH", "FR", "SA", "SU"
FREQUENCIES = ('YEARLY', 'MONTHLY', 'WEEKLY', 'DAILY', 'HOURLY', 'MINUTELY',
'SECONDLY')
zeroDelta = datetime.timedelta(0)
twoHours = datetime.timedelta(hours=2)
#---------------------------- TZID registry ------------------------------------
__tzidMap={}
def toUnicode(s):
"""Take a string or unicode, turn it into unicode, decoding as utf-8"""
if isinstance(s, str):
s = s.decode('utf-8')
return s
def registerTzid(tzid, tzinfo):
"""Register a tzid -> tzinfo mapping."""
__tzidMap[toUnicode(tzid)]=tzinfo
def getTzid(tzid):
"""Return the tzid if it exists, or None."""
return __tzidMap.get(toUnicode(tzid), None)
utc = dateutil.tz.tzutc()
registerTzid("UTC", utc)
#-------------------- Helper subclasses ----------------------------------------
class TimezoneComponent(Component):
"""A VTIMEZONE object.
VTIMEZONEs are parsed by dateutil.tz.tzical, the resulting datetime.tzinfo
subclass is stored in self.tzinfo, self.tzid stores the TZID associated
with this timezone.
@ivar name:
The uppercased name of the object, in this case always 'VTIMEZONE'.
@ivar tzinfo:
A datetime.tzinfo subclass representing this timezone.
@ivar tzid:
The string used to refer to this timezone.
"""
def __init__(self, tzinfo=None, *args, **kwds):
"""Accept an existing Component or a tzinfo class."""
super(TimezoneComponent, self).__init__(*args, **kwds)
self.isNative=True
# hack to make sure a behavior is assigned
if self.behavior is None:
self.behavior = VTimezone
if tzinfo is not None:
self.tzinfo = tzinfo
if not hasattr(self, 'name') or self.name == '':
self.name = 'VTIMEZONE'
self.useBegin = True
@classmethod
def registerTzinfo(obj, tzinfo):
"""Register tzinfo if it's not already registered, return its tzid."""
tzid = obj.pickTzid(tzinfo)
if tzid and not getTzid(tzid):
registerTzid(tzid, tzinfo)
return tzid
def gettzinfo(self):
# workaround for dateutil failing to parse some experimental properties
good_lines = ('rdate', 'rrule', 'dtstart', 'tzname', 'tzoffsetfrom',
'tzoffsetto', 'tzid')
# serialize encodes as utf-8, cStringIO will leave utf-8 alone
buffer = cStringIO.StringIO()
# allow empty VTIMEZONEs
if len(self.contents) == 0:
return None
def customSerialize(obj):
if isinstance(obj, Component):
foldOneLine(buffer, u"BEGIN:" + obj.name)
for child in obj.lines():
if child.name.lower() in good_lines:
child.serialize(buffer, 75, validate=False)
for comp in obj.components():
customSerialize(comp)
foldOneLine(buffer, u"END:" + obj.name)
customSerialize(self)
buffer.seek(0) # tzical wants to read a stream
return dateutil.tz.tzical(buffer).get()
def settzinfo(self, tzinfo, start=2000, end=2030):
"""Create appropriate objects in self to represent tzinfo.
Collapse DST transitions to rrules as much as possible.
Assumptions:
- DST <-> Standard transitions occur on the hour
- never within a month of one another
- twice or fewer times a year
- never in the month of December
- DST always moves offset exactly one hour later
- tzinfo classes dst method always treats times that could be in either
offset as being in the later regime
"""
def fromLastWeek(dt):
"""How many weeks from the end of the month dt is, starting from 1."""
weekDelta = datetime.timedelta(weeks=1)
n = 1
current = dt + weekDelta
while current.month == dt.month:
n += 1
current += weekDelta
return n
# lists of dictionaries defining rules which are no longer in effect
completed = {'daylight' : [], 'standard' : []}
# dictionary defining rules which are currently in effect
working = {'daylight' : None, 'standard' : None}
# rule may be based on the nth week of the month or the nth from the last
for year in xrange(start, end + 1):
newyear = datetime.datetime(year, 1, 1)
for transitionTo in 'daylight', 'standard':
transition = getTransition(transitionTo, year, tzinfo)
oldrule = working[transitionTo]
if transition == newyear:
# transitionTo is in effect for the whole year
rule = {'end' : None,
'start' : newyear,
'month' : 1,
'weekday' : None,
'hour' : None,
'plus' : None,
'minus' : None,
'name' : tzinfo.tzname(newyear),
'offset' : tzinfo.utcoffset(newyear),
'offsetfrom' : tzinfo.utcoffset(newyear)}
if oldrule is None:
# transitionTo was not yet in effect
working[transitionTo] = rule
else:
# transitionTo was already in effect
if (oldrule['offset'] !=
tzinfo.utcoffset(newyear)):
# old rule was different, it shouldn't continue
oldrule['end'] = year - 1
completed[transitionTo].append(oldrule)
working[transitionTo] = rule
elif transition is None:
# transitionTo is not in effect
if oldrule is not None:
# transitionTo used to be in effect
oldrule['end'] = year - 1
completed[transitionTo].append(oldrule)
working[transitionTo] = None
else:
# an offset transition was found
old_offset = tzinfo.utcoffset(transition - twoHours)
rule = {'end' : None, # None, or an integer year
'start' : transition, # the datetime of transition
'month' : transition.month,
'weekday' : transition.weekday(),
'hour' : transition.hour,
'name' : tzinfo.tzname(transition),
'plus' : (transition.day - 1)/ 7 + 1,#nth week of the month
'minus' : fromLastWeek(transition), #nth from last week
'offset' : tzinfo.utcoffset(transition),
'offsetfrom' : old_offset}
if oldrule is None:
working[transitionTo] = rule
else:
plusMatch = rule['plus'] == oldrule['plus']
minusMatch = rule['minus'] == oldrule['minus']
truth = plusMatch or minusMatch
for key in 'month', 'weekday', 'hour', 'offset':
truth = truth and rule[key] == oldrule[key]
if truth:
# the old rule is still true, limit to plus or minus
if not plusMatch:
oldrule['plus'] = None
if not minusMatch:
oldrule['minus'] = None
else:
# the new rule did not match the old
oldrule['end'] = year - 1
completed[transitionTo].append(oldrule)
working[transitionTo] = rule
for transitionTo in 'daylight', 'standard':
if working[transitionTo] is not None:
completed[transitionTo].append(working[transitionTo])
self.tzid = []
self.daylight = []
self.standard = []
self.add('tzid').value = self.pickTzid(tzinfo, True)
old = None
for transitionTo in 'daylight', 'standard':
for rule in completed[transitionTo]:
comp = self.add(transitionTo)
dtstart = comp.add('dtstart')
dtstart.value = rule['start']
if rule['name'] is not None:
comp.add('tzname').value = rule['name']
line = comp.add('tzoffsetto')
line.value = deltaToOffset(rule['offset'])
line = comp.add('tzoffsetfrom')
line.value = deltaToOffset(rule['offsetfrom'])
if rule['plus'] is not None:
num = rule['plus']
elif rule['minus'] is not None:
num = -1 * rule['minus']
else:
num = None
if num is not None:
dayString = ";BYDAY=" + str(num) + WEEKDAYS[rule['weekday']]
else:
dayString = ""
if rule['end'] is not None:
if rule['hour'] is None:
# all year offset, with no rule
endDate = datetime.datetime(rule['end'], 1, 1)
else:
weekday = dateutil.rrule.weekday(rule['weekday'], num)
du_rule = dateutil.rrule.rrule(dateutil.rrule.YEARLY,
bymonth = rule['month'],byweekday = weekday,
dtstart = datetime.datetime(
rule['end'], 1, 1, rule['hour'])
)
endDate = du_rule[0]
endDate = endDate.replace(tzinfo = utc) - rule['offsetfrom']
endString = ";UNTIL="+ dateTimeToString(endDate)
else:
endString = ''
rulestring = "FREQ=YEARLY%s;BYMONTH=%s%s" % \
(dayString, str(rule['month']), endString)
comp.add('rrule').value = rulestring
tzinfo = property(gettzinfo, settzinfo)
# prevent Component's __setattr__ from overriding the tzinfo property
normal_attributes = Component.normal_attributes + ['tzinfo']
@staticmethod
def pickTzid(tzinfo, allowUTC=False):
"""
Given a tzinfo class, use known APIs to determine TZID, or use tzname.
"""
if tzinfo is None or (not allowUTC and tzinfo_eq(tzinfo, utc)):
#If tzinfo is UTC, we don't need a TZID
return None
# try PyICU's tzid key
if hasattr(tzinfo, 'tzid'):
return toUnicode(tzinfo.tzid)
# try pytz zone key
if hasattr(tzinfo, 'zone'):
return toUnicode(tzinfo.zone)
# try tzical's tzid key
elif hasattr(tzinfo, '_tzid'):
return toUnicode(tzinfo._tzid)
else:
# return tzname for standard (non-DST) time
notDST = datetime.timedelta(0)
for month in xrange(1,13):
dt = datetime.datetime(2000, month, 1)
if tzinfo.dst(dt) == notDST:
return toUnicode(tzinfo.tzname(dt))
# there was no standard time in 2000!
raise VObjectError("Unable to guess TZID for tzinfo %s" % str(tzinfo))
def __str__(self):
return "<VTIMEZONE | " + str(getattr(self, 'tzid', 'No TZID')) +">"
def __repr__(self):
return self.__str__()
def prettyPrint(self, level, tabwidth):
pre = ' ' * level * tabwidth
print pre, self.name
print pre, "TZID:", self.tzid
print
class RecurringComponent(Component):
"""A vCalendar component like VEVENT or VTODO which may recur.
Any recurring component can have one or multiple RRULE, RDATE,
EXRULE, or EXDATE lines, and one or zero DTSTART lines. It can also have a
variety of children that don't have any recurrence information.
In the example below, note that dtstart is included in the rruleset.
This is not the default behavior for dateutil's rrule implementation unless
dtstart would already have been a member of the recurrence rule, and as a
result, COUNT is wrong. This can be worked around when getting rruleset by
adjusting count down by one if an rrule has a count and dtstart isn't in its
result set, but by default, the rruleset property doesn't do this work
around, to access it getrruleset must be called with addRDate set True.
>>> import dateutil.rrule, datetime
>>> vevent = RecurringComponent(name='VEVENT')
>>> vevent.add('rrule').value =u"FREQ=WEEKLY;COUNT=2;INTERVAL=2;BYDAY=TU,TH"
>>> vevent.add('dtstart').value = datetime.datetime(2005, 1, 19, 9)
When creating rrule's programmatically it should be kept in
mind that count doesn't necessarily mean what rfc2445 says.
>>> list(vevent.rruleset)
[datetime.datetime(2005, 1, 20, 9, 0), datetime.datetime(2005, 2, 1, 9, 0)]
>>> list(vevent.getrruleset(addRDate=True))
[datetime.datetime(2005, 1, 19, 9, 0), datetime.datetime(2005, 1, 20, 9, 0)]
Also note that dateutil will expand all-day events (datetime.date values) to
datetime.datetime value with time 0 and no timezone.
>>> vevent.dtstart.value = datetime.date(2005,3,18)
>>> list(vevent.rruleset)
[datetime.datetime(2005, 3, 29, 0, 0), datetime.datetime(2005, 3, 31, 0, 0)]
>>> list(vevent.getrruleset(True))
[datetime.datetime(2005, 3, 18, 0, 0), datetime.datetime(2005, 3, 29, 0, 0)]
@ivar rruleset:
A U{rruleset<https://moin.conectiva.com.br/DateUtil>}.
"""
def __init__(self, *args, **kwds):
super(RecurringComponent, self).__init__(*args, **kwds)
self.isNative=True
#self.clobberedRDates=[]
def getrruleset(self, addRDate = False):
"""Get an rruleset created from self.
If addRDate is True, add an RDATE for dtstart if it's not included in
an RRULE, and count is decremented if it exists.
Note that for rules which don't match DTSTART, DTSTART may not appear
in list(rruleset), although it should. By default, an RDATE is not
created in these cases, and count isn't updated, so dateutil may list
a spurious occurrence.
"""
rruleset = None
for name in DATESANDRULES:
addfunc = None
for line in self.contents.get(name, ()):
# don't bother creating a rruleset unless there's a rule
if rruleset is None:
rruleset = dateutil.rrule.rruleset()
if addfunc is None:
addfunc=getattr(rruleset, name)
if name in DATENAMES:
if type(line.value[0]) == datetime.datetime:
map(addfunc, line.value)
elif type(line.value[0]) == datetime.date:
for dt in line.value:
addfunc(datetime.datetime(dt.year, dt.month, dt.day))
else:
# ignore RDATEs with PERIOD values for now
pass
elif name in RULENAMES:
try:
dtstart = self.dtstart.value
except AttributeError, KeyError:
# Special for VTODO - try DUE property instead
try:
if self.name == "VTODO":
dtstart = self.due.value
else:
# if there's no dtstart, just return None
return None
except AttributeError, KeyError:
# if there's no due, just return None
return None
# rrulestr complains about unicode, so cast to str
# a Ruby iCalendar library escapes semi-colons in rrules,
# so also remove any backslashes
value = str(line.value).replace('\\', '')
rule = dateutil.rrule.rrulestr(value, dtstart=dtstart)
until = rule._until
if until is not None and \
isinstance(dtstart, datetime.datetime) and \
(until.tzinfo != dtstart.tzinfo):
# dateutil converts the UNTIL date to a datetime,
# check to see if the UNTIL parameter value was a date
vals = dict(pair.split('=') for pair in
line.value.upper().split(';'))
if len(vals.get('UNTIL', '')) == 8:
until = datetime.datetime.combine(until.date(),
dtstart.time())
# While RFC2445 says UNTIL MUST be UTC, Chandler allows
# floating recurring events, and uses floating UNTIL values.
# Also, some odd floating UNTIL but timezoned DTSTART values
# have shown up in the wild, so put floating UNTIL values
# DTSTART's timezone
if until.tzinfo is None:
until = until.replace(tzinfo=dtstart.tzinfo)
if dtstart.tzinfo is not None:
until = until.astimezone(dtstart.tzinfo)
rule._until = until
# add the rrule or exrule to the rruleset
addfunc(rule)
if name == 'rrule' and addRDate:
try:
# dateutils does not work with all-day (datetime.date) items
# so we need to convert to a datetime.datetime
# (which is what dateutils does internally)
if not isinstance(dtstart, datetime.datetime):
adddtstart = datetime.datetime.fromordinal(dtstart.toordinal())
else:
adddtstart = dtstart
if rruleset._rrule[-1][0] != adddtstart:
rruleset.rdate(adddtstart)
added = True
else:
added = False
except IndexError:
# it's conceivable that an rrule might have 0 datetimes
added = False
if added and rruleset._rrule[-1]._count != None:
rruleset._rrule[-1]._count -= 1
return rruleset
def setrruleset(self, rruleset):
# Get DTSTART from component (or DUE if no DTSTART in a VTODO)
try:
dtstart = self.dtstart.value
except AttributeError, KeyError:
if self.name == "VTODO":
dtstart = self.due.value
else:
raise
isDate = datetime.date == type(dtstart)
if isDate:
dtstart = datetime.datetime(dtstart.year,dtstart.month, dtstart.day)
untilSerialize = dateToString
else:
# make sure to convert time zones to UTC
untilSerialize = lambda x: dateTimeToString(x, True)
for name in DATESANDRULES:
if hasattr(self.contents, name):
del self.contents[name]
setlist = getattr(rruleset, '_' + name)
if name in DATENAMES:
setlist = list(setlist) # make a copy of the list
if name == 'rdate' and dtstart in setlist:
setlist.remove(dtstart)
if isDate:
setlist = [dt.date() for dt in setlist]
if len(setlist) > 0:
self.add(name).value = setlist
elif name in RULENAMES:
for rule in setlist:
buf = StringIO.StringIO()
buf.write('FREQ=')
buf.write(FREQUENCIES[rule._freq])
values = {}
if rule._interval != 1:
values['INTERVAL'] = [str(rule._interval)]
if rule._wkst != 0: # wkst defaults to Monday
values['WKST'] = [WEEKDAYS[rule._wkst]]
if rule._bysetpos is not None:
values['BYSETPOS'] = [str(i) for i in rule._bysetpos]
if rule._count is not None:
values['COUNT'] = [str(rule._count)]
elif rule._until is not None:
values['UNTIL'] = [untilSerialize(rule._until)]
days = []
if (rule._byweekday is not None and (
dateutil.rrule.WEEKLY != rule._freq or
len(rule._byweekday) != 1 or
rule._dtstart.weekday() != rule._byweekday[0])):
# ignore byweekday if freq is WEEKLY and day correlates
# with dtstart because it was automatically set by
# dateutil
days.extend(WEEKDAYS[n] for n in rule._byweekday)
if rule._bynweekday is not None:
days.extend(str(n) + WEEKDAYS[day] for day, n in rule._bynweekday)
if len(days) > 0:
values['BYDAY'] = days
if rule._bymonthday is not None and len(rule._bymonthday) > 0:
if not (rule._freq <= dateutil.rrule.MONTHLY and
len(rule._bymonthday) == 1 and
rule._bymonthday[0] == rule._dtstart.day):
# ignore bymonthday if it's generated by dateutil
values['BYMONTHDAY'] = [str(n) for n in rule._bymonthday]
if rule._bynmonthday is not None and len(rule._bynmonthday) > 0:
values.setdefault('BYMONTHDAY', []).extend(str(n) for n in rule._bynmonthday)
if rule._bymonth is not None and len(rule._bymonth) > 0:
if (rule._byweekday is not None or
len(rule._bynweekday or ()) > 0 or
not (rule._freq == dateutil.rrule.YEARLY and
len(rule._bymonth) == 1 and
rule._bymonth[0] == rule._dtstart.month)):
# ignore bymonth if it's generated by dateutil
values['BYMONTH'] = [str(n) for n in rule._bymonth]
if rule._byyearday is not None:
values['BYYEARDAY'] = [str(n) for n in rule._byyearday]
if rule._byweekno is not None:
values['BYWEEKNO'] = [str(n) for n in rule._byweekno]
# byhour, byminute, bysecond are always ignored for now
for key, paramvals in values.iteritems():
buf.write(';')
buf.write(key)
buf.write('=')
buf.write(','.join(paramvals))
self.add(name).value = buf.getvalue()
rruleset = property(getrruleset, setrruleset)
def __setattr__(self, name, value):
"""For convenience, make self.contents directly accessible."""
if name == 'rruleset':
self.setrruleset(value)
else:
super(RecurringComponent, self).__setattr__(name, value)
class TextBehavior(behavior.Behavior):
"""Provide backslash escape encoding/decoding for single valued properties.
TextBehavior also deals with base64 encoding if the ENCODING parameter is
explicitly set to BASE64.
"""
base64string = 'BASE64' # vCard uses B
@classmethod
def decode(cls, line):
"""Remove backslash escaping from line.value."""
if line.encoded:
encoding = getattr(line, 'encoding_param', None)
if encoding and encoding.upper() == cls.base64string:
line.value = line.value.decode('base64')
else:
line.value = stringToTextValues(line.value)[0]
line.encoded=False
@classmethod
def encode(cls, line):
"""Backslash escape line.value."""
if not line.encoded:
encoding = getattr(line, 'encoding_param', None)
if encoding and encoding.upper() == cls.base64string:
line.value = line.value.encode('base64').replace('\n', '')
else:
line.value = backslashEscape(line.value)
line.encoded=True
class VCalendarComponentBehavior(behavior.Behavior):
defaultBehavior = TextBehavior
isComponent = True
class RecurringBehavior(VCalendarComponentBehavior):
"""Parent Behavior for components which should be RecurringComponents."""
hasNative = True
@staticmethod
def transformToNative(obj):
"""Turn a recurring Component into a RecurringComponent."""
if not obj.isNative:
object.__setattr__(obj, '__class__', RecurringComponent)
obj.isNative = True
return obj
@staticmethod
def transformFromNative(obj):
if obj.isNative:
object.__setattr__(obj, '__class__', Component)
obj.isNative = False
return obj
@staticmethod
def generateImplicitParameters(obj):
"""Generate a UID if one does not exist.
This is just a dummy implementation, for now.
"""
if not hasattr(obj, 'uid'):
rand = str(int(random.random() * 100000))
now = datetime.datetime.now(utc)
now = dateTimeToString(now)
host = socket.gethostname()
obj.add(ContentLine('UID', [], now + '-' + rand + '@' + host))
class DateTimeBehavior(behavior.Behavior):
"""Parent Behavior for ContentLines containing one DATE-TIME."""
hasNative = True
@staticmethod
def transformToNative(obj):
"""Turn obj.value into a datetime.
RFC2445 allows times without time zone information, "floating times"
in some properties. Mostly, this isn't what you want, but when parsing
a file, real floating times are noted by setting to 'TRUE' the
X-VOBJ-FLOATINGTIME-ALLOWED parameter.
"""
if obj.isNative: return obj
obj.isNative = True
if obj.value == '': return obj
obj.value=str(obj.value)
#we're cheating a little here, parseDtstart allows DATE
obj.value=parseDtstart(obj)
if obj.value.tzinfo is None:
obj.params['X-VOBJ-FLOATINGTIME-ALLOWED'] = ['TRUE']
if obj.params.get('TZID'):
# Keep a copy of the original TZID around
obj.params['X-VOBJ-ORIGINAL-TZID'] = [obj.params['TZID']]
del obj.params['TZID']
return obj
@classmethod
def transformFromNative(cls, obj):
"""Replace the datetime in obj.value with an ISO 8601 string."""
if obj.isNative:
obj.isNative = False
tzid = TimezoneComponent.registerTzinfo(obj.value.tzinfo)
obj.value = dateTimeToString(obj.value, cls.forceUTC)
if not cls.forceUTC and tzid is not None:
obj.tzid_param = tzid
if obj.params.get('X-VOBJ-ORIGINAL-TZID'):
if not hasattr(obj, 'tzid_param'):
obj.tzid_param = obj.x_vobj_original_tzid_param
del obj.params['X-VOBJ-ORIGINAL-TZID']
return obj
class UTCDateTimeBehavior(DateTimeBehavior):
"""A value which must be specified in UTC."""
forceUTC = True
class DateOrDateTimeBehavior(behavior.Behavior):
"""Parent Behavior for ContentLines containing one DATE or DATE-TIME."""
hasNative = True
@staticmethod
def transformToNative(obj):
"""Turn obj.value into a date or datetime."""
if obj.isNative: return obj
obj.isNative = True
if obj.value == '': return obj
obj.value=str(obj.value)
obj.value=parseDtstart(obj, allowSignatureMismatch=True)
if getattr(obj, 'value_param', 'DATE-TIME').upper() == 'DATE-TIME':
if hasattr(obj, 'tzid_param'):
# Keep a copy of the original TZID around
obj.params['X-VOBJ-ORIGINAL-TZID'] = [obj.tzid_param]
del obj.tzid_param
return obj
@staticmethod
def transformFromNative(obj):
"""Replace the date or datetime in obj.value with an ISO 8601 string."""
if type(obj.value) == datetime.date:
obj.isNative = False
obj.value_param = 'DATE'
obj.value = dateToString(obj.value)
return obj
else: return DateTimeBehavior.transformFromNative(obj)
class MultiDateBehavior(behavior.Behavior):
"""
Parent Behavior for ContentLines containing one or more DATE, DATE-TIME, or
PERIOD.
"""
hasNative = True
@staticmethod
def transformToNative(obj):
"""
Turn obj.value into a list of dates, datetimes, or
(datetime, timedelta) tuples.
"""
if obj.isNative:
return obj
obj.isNative = True
if obj.value == '':
obj.value = []
return obj
tzinfo = getTzid(getattr(obj, 'tzid_param', None))
valueParam = getattr(obj, 'value_param', "DATE-TIME").upper()
valTexts = obj.value.split(",")
if valueParam == "DATE":
obj.value = [stringToDate(x) for x in valTexts]
elif valueParam == "DATE-TIME":
obj.value = [stringToDateTime(x, tzinfo) for x in valTexts]
elif valueParam == "PERIOD":
obj.value = [stringToPeriod(x, tzinfo) for x in valTexts]
return obj
@staticmethod
def transformFromNative(obj):
"""
Replace the date, datetime or period tuples in obj.value with
appropriate strings.
"""
if obj.value and type(obj.value[0]) == datetime.date:
obj.isNative = False
obj.value_param = 'DATE'
obj.value = ','.join([dateToString(val) for val in obj.value])
return obj
# Fixme: handle PERIOD case
else:
if obj.isNative:
obj.isNative = False
transformed = []
tzid = None
for val in obj.value:
if tzid is None and type(val) == datetime.datetime:
tzid = TimezoneComponent.registerTzinfo(val.tzinfo)
if tzid is not None:
obj.tzid_param = tzid
transformed.append(dateTimeToString(val))
obj.value = ','.join(transformed)
return obj
class MultiTextBehavior(behavior.Behavior):
"""Provide backslash escape encoding/decoding of each of several values.
After transformation, value is a list of strings.
"""
listSeparator = ","
@classmethod
def decode(cls, line):
"""Remove backslash escaping from line.value, then split on commas."""
if line.encoded:
line.value = stringToTextValues(line.value,
listSeparator=cls.listSeparator)
line.encoded=False
@classmethod
def encode(cls, line):
"""Backslash escape line.value."""
if not line.encoded:
line.value = cls.listSeparator.join(backslashEscape(val) for val in line.value)
line.encoded=True
class SemicolonMultiTextBehavior(MultiTextBehavior):
listSeparator = ";"
#------------------------ Registered Behavior subclasses -----------------------
class VCalendar2_0(VCalendarComponentBehavior):
"""vCalendar 2.0 behavior. With added VAVAILABILITY support."""
name = 'VCALENDAR'
description = 'vCalendar 2.0, also known as iCalendar.'
versionString = '2.0'
sortFirst = ('version', 'calscale', 'method', 'prodid', 'vtimezone')
knownChildren = {'CALSCALE': (0, 1, None),#min, max, behaviorRegistry id
'METHOD': (0, 1, None),
'VERSION': (0, 1, None),#required, but auto-generated
'PRODID': (1, 1, None),
'VTIMEZONE': (0, None, None),
'VEVENT': (0, None, None),
'VTODO': (0, None, None),
'VJOURNAL': (0, None, None),
'VFREEBUSY': (0, None, None),
'VAVAILABILITY': (0, None, None),
}
@classmethod
def generateImplicitParameters(cls, obj):
"""Create PRODID, VERSION, and VTIMEZONEs if needed.
VTIMEZONEs will need to exist whenever TZID parameters exist or when
datetimes with tzinfo exist.
"""
for comp in obj.components():
if comp.behavior is not None:
comp.behavior.generateImplicitParameters(comp)
if not hasattr(obj, 'prodid'):
obj.add(ContentLine('PRODID', [], PRODID))
if not hasattr(obj, 'version'):
obj.add(ContentLine('VERSION', [], cls.versionString))
tzidsUsed = {}
def findTzids(obj, table):
if isinstance(obj, ContentLine) and (obj.behavior is None or
not obj.behavior.forceUTC):
if getattr(obj, 'tzid_param', None):
table[obj.tzid_param] = 1
else:
if type(obj.value) == list:
for item in obj.value:
tzinfo = getattr(obj.value, 'tzinfo', None)
tzid = TimezoneComponent.registerTzinfo(tzinfo)
if tzid:
table[tzid] = 1
else:
tzinfo = getattr(obj.value, 'tzinfo', None)
tzid = TimezoneComponent.registerTzinfo(tzinfo)
if tzid:
table[tzid] = 1
for child in obj.getChildren():
if obj.name != 'VTIMEZONE':
findTzids(child, table)
findTzids(obj, tzidsUsed)
oldtzids = [toUnicode(x.tzid.value) for x in getattr(obj, 'vtimezone_list', [])]
for tzid in tzidsUsed.keys():
tzid = toUnicode(tzid)
if tzid != u'UTC' and tzid not in oldtzids:
obj.add(TimezoneComponent(tzinfo=getTzid(tzid)))
registerBehavior(VCalendar2_0)
class VTimezone(VCalendarComponentBehavior):
"""Timezone behavior."""
name = 'VTIMEZONE'
hasNative = True
description = 'A grouping of component properties that defines a time zone.'
sortFirst = ('tzid', 'last-modified', 'tzurl', 'standard', 'daylight')
knownChildren = {'TZID': (1, 1, None),#min, max, behaviorRegistry id
'LAST-MODIFIED':(0, 1, None),
'TZURL': (0, 1, None),
'STANDARD': (0, None, None),#NOTE: One of Standard or
'DAYLIGHT': (0, None, None) # Daylight must appear
}
@classmethod
def validate(cls, obj, raiseException, *args):
if not hasattr(obj, 'tzid') or obj.tzid.value is None:
if raiseException:
m = "VTIMEZONE components must contain a valid TZID"
raise ValidateError(m)
return False
if obj.contents.has_key('standard') or obj.contents.has_key('daylight'):
return super(VTimezone, cls).validate(obj, raiseException, *args)
else:
if raiseException:
m = "VTIMEZONE components must contain a STANDARD or a DAYLIGHT\
component"
raise ValidateError(m)
return False
@staticmethod
def transformToNative(obj):
if not obj.isNative:
object.__setattr__(obj, '__class__', TimezoneComponent)
obj.isNative = True
obj.registerTzinfo(obj.tzinfo)
return obj
@staticmethod
def transformFromNative(obj):
return obj
registerBehavior(VTimezone)
class TZID(behavior.Behavior):
"""Don't use TextBehavior for TZID.
RFC2445 only allows TZID lines to be paramtext, so they shouldn't need any
encoding or decoding. Unfortunately, some Microsoft products use commas
in TZIDs which should NOT be treated as a multi-valued text property, nor
do we want to escape them. Leaving them alone works for Microsoft's breakage,
and doesn't affect compliant iCalendar streams.
"""
registerBehavior(TZID)
class DaylightOrStandard(VCalendarComponentBehavior):
hasNative = False
knownChildren = {'DTSTART': (1, 1, None),#min, max, behaviorRegistry id
'RRULE': (0, 1, None)}
registerBehavior(DaylightOrStandard, 'STANDARD')
registerBehavior(DaylightOrStandard, 'DAYLIGHT')
class VEvent(RecurringBehavior):
"""Event behavior."""
name='VEVENT'
sortFirst = ('uid', 'recurrence-id', 'dtstart', 'duration', 'dtend')
description='A grouping of component properties, and possibly including \
"VALARM" calendar components, that represents a scheduled \
amount of time on a calendar.'
knownChildren = {'DTSTART': (0, 1, None),#min, max, behaviorRegistry id
'CLASS': (0, 1, None),
'CREATED': (0, 1, None),
'DESCRIPTION': (0, 1, None),
'GEO': (0, 1, None),
'LAST-MODIFIED':(0, 1, None),
'LOCATION': (0, 1, None),
'ORGANIZER': (0, 1, None),
'PRIORITY': (0, 1, None),
'DTSTAMP': (0, 1, None),
'SEQUENCE': (0, 1, None),
'STATUS': (0, 1, None),
'SUMMARY': (0, 1, None),
'TRANSP': (0, 1, None),
'UID': (1, 1, None),
'URL': (0, 1, None),
'RECURRENCE-ID':(0, 1, None),
'DTEND': (0, 1, None), #NOTE: Only one of DtEnd or
'DURATION': (0, 1, None), # Duration can appear
'ATTACH': (0, None, None),
'ATTENDEE': (0, None, None),
'CATEGORIES': (0, None, None),
'COMMENT': (0, None, None),
'CONTACT': (0, None, None),
'EXDATE': (0, None, None),
'EXRULE': (0, None, None),
'REQUEST-STATUS': (0, None, None),
'RELATED-TO': (0, None, None),
'RESOURCES': (0, None, None),
'RDATE': (0, None, None),
'RRULE': (0, None, None),
'VALARM': (0, None, None)
}
@classmethod
def validate(cls, obj, raiseException, *args):
if obj.contents.has_key('dtend') and obj.contents.has_key('duration'):
if raiseException:
m = "VEVENT components cannot contain both DTEND and DURATION\
components"
raise ValidateError(m)
return False
else:
return super(VEvent, cls).validate(obj, raiseException, *args)
registerBehavior(VEvent)
class VTodo(RecurringBehavior):
"""To-do behavior."""
name='VTODO'
description='A grouping of component properties and possibly "VALARM" \
calendar components that represent an action-item or \
assignment.'
knownChildren = {'DTSTART': (0, 1, None),#min, max, behaviorRegistry id
'CLASS': (0, 1, None),
'COMPLETED': (0, 1, None),
'CREATED': (0, 1, None),
'DESCRIPTION': (0, 1, None),
'GEO': (0, 1, None),
'LAST-MODIFIED':(0, 1, None),
'LOCATION': (0, 1, None),
'ORGANIZER': (0, 1, None),
'PERCENT': (0, 1, None),
'PRIORITY': (0, 1, None),
'DTSTAMP': (0, 1, None),
'SEQUENCE': (0, 1, None),
'STATUS': (0, 1, None),
'SUMMARY': (0, 1, None),
'UID': (0, 1, None),
'URL': (0, 1, None),
'RECURRENCE-ID':(0, 1, None),
'DUE': (0, 1, None), #NOTE: Only one of Due or
'DURATION': (0, 1, None), # Duration can appear
'ATTACH': (0, None, None),
'ATTENDEE': (0, None, None),
'CATEGORIES': (0, None, None),
'COMMENT': (0, None, None),
'CONTACT': (0, None, None),
'EXDATE': (0, None, None),
'EXRULE': (0, None, None),
'REQUEST-STATUS': (0, None, None),
'RELATED-TO': (0, None, None),
'RESOURCES': (0, None, None),
'RDATE': (0, None, None),
'RRULE': (0, None, None),
'VALARM': (0, None, None)
}
@classmethod
def validate(cls, obj, raiseException, *args):
if obj.contents.has_key('due') and obj.contents.has_key('duration'):
if raiseException:
m = "VTODO components cannot contain both DUE and DURATION\
components"
raise ValidateError(m)
return False
else:
return super(VTodo, cls).validate(obj, raiseException, *args)
registerBehavior(VTodo)
class VJournal(RecurringBehavior):
"""Journal entry behavior."""
name='VJOURNAL'
knownChildren = {'DTSTART': (0, 1, None),#min, max, behaviorRegistry id
'CLASS': (0, 1, None),
'CREATED': (0, 1, None),
'DESCRIPTION': (0, 1, None),
'LAST-MODIFIED':(0, 1, None),
'ORGANIZER': (0, 1, None),
'DTSTAMP': (0, 1, None),
'SEQUENCE': (0, 1, None),
'STATUS': (0, 1, None),
'SUMMARY': (0, 1, None),
'UID': (0, 1, None),
'URL': (0, 1, None),
'RECURRENCE-ID':(0, 1, None),
'ATTACH': (0, None, None),
'ATTENDEE': (0, None, None),
'CATEGORIES': (0, None, None),
'COMMENT': (0, None, None),
'CONTACT': (0, None, None),
'EXDATE': (0, None, None),
'EXRULE': (0, None, None),
'REQUEST-STATUS': (0, None, None),
'RELATED-TO': (0, None, None),
'RDATE': (0, None, None),
'RRULE': (0, None, None)
}
registerBehavior(VJournal)
class VFreeBusy(VCalendarComponentBehavior):
"""Free/busy state behavior.
>>> vfb = newFromBehavior('VFREEBUSY')
>>> vfb.add('uid').value = 'test'
>>> vfb.add('dtstart').value = datetime.datetime(2006, 2, 16, 1, tzinfo=utc)
>>> vfb.add('dtend').value = vfb.dtstart.value + twoHours
>>> vfb.add('freebusy').value = [(vfb.dtstart.value, twoHours / 2)]
>>> vfb.add('freebusy').value = [(vfb.dtstart.value, vfb.dtend.value)]
>>> print vfb.serialize()
BEGIN:VFREEBUSY
UID:test
DTSTART:20060216T010000Z
DTEND:20060216T030000Z
FREEBUSY:20060216T010000Z/PT1H
FREEBUSY:20060216T010000Z/20060216T030000Z
END:VFREEBUSY
"""
name='VFREEBUSY'
description='A grouping of component properties that describe either a \
request for free/busy time, describe a response to a request \
for free/busy time or describe a published set of busy time.'
sortFirst = ('uid', 'dtstart', 'duration', 'dtend')
knownChildren = {'DTSTART': (0, 1, None),#min, max, behaviorRegistry id
'CONTACT': (0, 1, None),
'DTEND': (0, 1, None),
'DURATION': (0, 1, None),
'ORGANIZER': (0, 1, None),
'DTSTAMP': (0, 1, None),
'UID': (0, 1, None),
'URL': (0, 1, None),
'ATTENDEE': (0, None, None),
'COMMENT': (0, None, None),
'FREEBUSY': (0, None, None),
'REQUEST-STATUS': (0, None, None)
}
registerBehavior(VFreeBusy)
class VAlarm(VCalendarComponentBehavior):
"""Alarm behavior."""
name='VALARM'
description='Alarms describe when and how to provide alerts about events \
and to-dos.'
knownChildren = {'ACTION': (1, 1, None),#min, max, behaviorRegistry id
'TRIGGER': (1, 1, None),
'DURATION': (0, 1, None),
'REPEAT': (0, 1, None),
'DESCRIPTION': (0, 1, None)
}
@staticmethod
def generateImplicitParameters(obj):
"""Create default ACTION and TRIGGER if they're not set."""
try:
obj.action
except AttributeError:
obj.add('action').value = 'AUDIO'
try:
obj.trigger
except AttributeError:
obj.add('trigger').value = datetime.timedelta(0)
@classmethod
def validate(cls, obj, raiseException, *args):
"""
#TODO
audioprop = 2*(
; 'action' and 'trigger' are both REQUIRED,
; but MUST NOT occur more than once
action / trigger /
; 'duration' and 'repeat' are both optional,
; and MUST NOT occur more than once each,
; but if one occurs, so MUST the other
duration / repeat /
; the following is optional,
; but MUST NOT occur more than once
attach /
dispprop = 3*(
; the following are all REQUIRED,
; but MUST NOT occur more than once
action / description / trigger /
; 'duration' and 'repeat' are both optional,
; and MUST NOT occur more than once each,
; but if one occurs, so MUST the other
duration / repeat /
emailprop = 5*(
; the following are all REQUIRED,
; but MUST NOT occur more than once
action / description / trigger / summary
; the following is REQUIRED,
; and MAY occur more than once
attendee /
; 'duration' and 'repeat' are both optional,
; and MUST NOT occur more than once each,
; but if one occurs, so MUST the other
duration / repeat /
procprop = 3*(
; the following are all REQUIRED,
; but MUST NOT occur more than once
action / attach / trigger /
; 'duration' and 'repeat' are both optional,
; and MUST NOT occur more than once each,
; but if one occurs, so MUST the other
duration / repeat /
; 'description' is optional,
; and MUST NOT occur more than once
description /
if obj.contents.has_key('dtend') and obj.contents.has_key('duration'):
if raiseException:
m = "VEVENT components cannot contain both DTEND and DURATION\
components"
raise ValidateError(m)
return False
else:
return super(VEvent, cls).validate(obj, raiseException, *args)
"""
return True
registerBehavior(VAlarm)
class VAvailability(VCalendarComponentBehavior):
"""Availability state behavior.
>>> vav = newFromBehavior('VAVAILABILITY')
>>> vav.add('uid').value = 'test'
>>> vav.add('dtstamp').value = datetime.datetime(2006, 2, 15, 0, tzinfo=utc)
>>> vav.add('dtstart').value = datetime.datetime(2006, 2, 16, 0, tzinfo=utc)
>>> vav.add('dtend').value = datetime.datetime(2006, 2, 17, 0, tzinfo=utc)
>>> vav.add('busytype').value = "BUSY"
>>> av = newFromBehavior('AVAILABLE')
>>> av.add('uid').value = 'test1'
>>> av.add('dtstamp').value = datetime.datetime(2006, 2, 15, 0, tzinfo=utc)
>>> av.add('dtstart').value = datetime.datetime(2006, 2, 16, 9, tzinfo=utc)
>>> av.add('dtend').value = datetime.datetime(2006, 2, 16, 12, tzinfo=utc)
>>> av.add('summary').value = "Available in the morning"
>>> ignore = vav.add(av)
>>> print vav.serialize()
BEGIN:VAVAILABILITY
UID:test
DTSTART:20060216T000000Z
DTEND:20060217T000000Z
BEGIN:AVAILABLE
UID:test1
DTSTART:20060216T090000Z
DTEND:20060216T120000Z
DTSTAMP:20060215T000000Z
SUMMARY:Available in the morning
END:AVAILABLE
BUSYTYPE:BUSY
DTSTAMP:20060215T000000Z
END:VAVAILABILITY
"""
name='VAVAILABILITY'
description='A component used to represent a user\'s available time slots.'
sortFirst = ('uid', 'dtstart', 'duration', 'dtend')
knownChildren = {'UID': (1, 1, None),#min, max, behaviorRegistry id
'DTSTAMP': (1, 1, None),
'BUSYTYPE': (0, 1, None),
'CREATED': (0, 1, None),
'DTSTART': (0, 1, None),
'LAST-MODIFIED': (0, 1, None),
'ORGANIZER': (0, 1, None),
'SEQUENCE': (0, 1, None),
'SUMMARY': (0, 1, None),
'URL': (0, 1, None),
'DTEND': (0, 1, None),
'DURATION': (0, 1, None),
'CATEGORIES': (0, None, None),
'COMMENT': (0, None, None),
'CONTACT': (0, None, None),
'AVAILABLE': (0, None, None),
}
@classmethod
def validate(cls, obj, raiseException, *args):
if obj.contents.has_key('dtend') and obj.contents.has_key('duration'):
if raiseException:
m = "VAVAILABILITY components cannot contain both DTEND and DURATION\
components"
raise ValidateError(m)
return False
else:
return super(VAvailability, cls).validate(obj, raiseException, *args)
registerBehavior(VAvailability)
class Available(RecurringBehavior):
"""Event behavior."""
name='AVAILABLE'
sortFirst = ('uid', 'recurrence-id', 'dtstart', 'duration', 'dtend')
description='Defines a period of time in which a user is normally available.'
knownChildren = {'DTSTAMP': (1, 1, None),#min, max, behaviorRegistry id
'DTSTART': (1, 1, None),
'UID': (1, 1, None),
'DTEND': (0, 1, None), #NOTE: One of DtEnd or
'DURATION': (0, 1, None), # Duration must appear, but not both
'CREATED': (0, 1, None),
'LAST-MODIFIED':(0, 1, None),
'RECURRENCE-ID':(0, 1, None),
'RRULE': (0, 1, None),
'SUMMARY': (0, 1, None),
'CATEGORIES': (0, None, None),
'COMMENT': (0, None, None),
'CONTACT': (0, None, None),
'EXDATE': (0, None, None),
'RDATE': (0, None, None),
}
@classmethod
def validate(cls, obj, raiseException, *args):
has_dtend = obj.contents.has_key('dtend')
has_duration = obj.contents.has_key('duration')
if has_dtend and has_duration:
if raiseException:
m = "AVAILABLE components cannot contain both DTEND and DURATION\
properties"
raise ValidateError(m)
return False
elif not (has_dtend or has_duration):
if raiseException:
m = "AVAILABLE components must contain one of DTEND or DURATION\
properties"
raise ValidateError(m)
return False
else:
return super(Available, cls).validate(obj, raiseException, *args)
registerBehavior(Available)
class Duration(behavior.Behavior):
"""Behavior for Duration ContentLines. Transform to datetime.timedelta."""
name = 'DURATION'
hasNative = True
@staticmethod
def transformToNative(obj):
"""Turn obj.value into a datetime.timedelta."""
if obj.isNative: return obj
obj.isNative = True
obj.value=str(obj.value)
if obj.value == '':
return obj
else:
deltalist=stringToDurations(obj.value)
#When can DURATION have multiple durations? For now:
if len(deltalist) == 1:
obj.value = deltalist[0]
return obj
else:
raise ParseError("DURATION must have a single duration string.")
@staticmethod
def transformFromNative(obj):
"""Replace the datetime.timedelta in obj.value with an RFC2445 string.
"""
if not obj.isNative: return obj
obj.isNative = False
obj.value = timedeltaToString(obj.value)
return obj
registerBehavior(Duration)
class Trigger(behavior.Behavior):
"""DATE-TIME or DURATION"""
name='TRIGGER'
description='This property specifies when an alarm will trigger.'
hasNative = True
forceUTC = True
@staticmethod
def transformToNative(obj):
"""Turn obj.value into a timedelta or datetime."""
if obj.isNative: return obj
value = getattr(obj, 'value_param', 'DURATION').upper()
if hasattr(obj, 'value_param'):
del obj.value_param
if obj.value == '':
obj.isNative = True
return obj
elif value == 'DURATION':
try:
return Duration.transformToNative(obj)
except ParseError:
logger.warn("TRIGGER not recognized as DURATION, trying "
"DATE-TIME, because iCal sometimes exports "
"DATE-TIMEs without setting VALUE=DATE-TIME")
try:
obj.isNative = False
dt = DateTimeBehavior.transformToNative(obj)
return dt
except:
msg = "TRIGGER with no VALUE not recognized as DURATION " \
"or as DATE-TIME"
raise ParseError(msg)
elif value == 'DATE-TIME':
#TRIGGERs with DATE-TIME values must be in UTC, we could validate
#that fact, for now we take it on faith.
return DateTimeBehavior.transformToNative(obj)
else:
raise ParseError("VALUE must be DURATION or DATE-TIME")
@staticmethod
def transformFromNative(obj):
if type(obj.value) == datetime.datetime:
obj.value_param = 'DATE-TIME'
return UTCDateTimeBehavior.transformFromNative(obj)
elif type(obj.value) == datetime.timedelta:
return Duration.transformFromNative(obj)
else:
raise NativeError("Native TRIGGER values must be timedelta or datetime")
registerBehavior(Trigger)
class PeriodBehavior(behavior.Behavior):
"""A list of (date-time, timedelta) tuples.
>>> line = ContentLine('test', [], '', isNative=True)
>>> line.behavior = PeriodBehavior
>>> line.value = [(datetime.datetime(2006, 2, 16, 10), twoHours)]
>>> line.transformFromNative().value
'20060216T100000/PT2H'
>>> line.transformToNative().value
[(datetime.datetime(2006, 2, 16, 10, 0), datetime.timedelta(0, 7200))]
>>> line.value.append((datetime.datetime(2006, 5, 16, 10), twoHours))
>>> print line.serialize().strip()
TEST:20060216T100000/PT2H,20060516T100000/PT2H
"""
hasNative = True
@staticmethod
def transformToNative(obj):
"""Convert comma separated periods into tuples."""
if obj.isNative:
return obj
obj.isNative = True
if obj.value == '':
obj.value = []
return obj
tzinfo = getTzid(getattr(obj, 'tzid_param', None))
obj.value = [stringToPeriod(x, tzinfo) for x in obj.value.split(",")]
return obj
@classmethod
def transformFromNative(cls, obj):
"""Convert the list of tuples in obj.value to strings."""
if obj.isNative:
obj.isNative = False
transformed = []
for tup in obj.value:
transformed.append(periodToString(tup, cls.forceUTC))
if len(transformed) > 0:
tzid = TimezoneComponent.registerTzinfo(tup[0].tzinfo)
if not cls.forceUTC and tzid is not None:
obj.tzid_param = tzid
obj.value = ','.join(transformed)
return obj
class FreeBusy(PeriodBehavior):
"""Free or busy period of time, must be specified in UTC."""
name = 'FREEBUSY'
forceUTC = True
registerBehavior(FreeBusy)
class RRule(behavior.Behavior):
"""
Dummy behavior to avoid having RRULEs being treated as text lines (and thus
having semi-colons inaccurately escaped).
"""
registerBehavior(RRule, 'RRULE')
registerBehavior(RRule, 'EXRULE')
#------------------------ Registration of common classes -----------------------
utcDateTimeList = ['LAST-MODIFIED', 'CREATED', 'COMPLETED', 'DTSTAMP']
map(lambda x: registerBehavior(UTCDateTimeBehavior, x), utcDateTimeList)
dateTimeOrDateList = ['DTEND', 'DTSTART', 'DUE', 'RECURRENCE-ID']
map(lambda x: registerBehavior(DateOrDateTimeBehavior, x),
dateTimeOrDateList)
registerBehavior(MultiDateBehavior, 'RDATE')
registerBehavior(MultiDateBehavior, 'EXDATE')
textList = ['CALSCALE', 'METHOD', 'PRODID', 'CLASS', 'COMMENT', 'DESCRIPTION',
'LOCATION', 'STATUS', 'SUMMARY', 'TRANSP', 'CONTACT', 'RELATED-TO',
'UID', 'ACTION', 'BUSYTYPE']
map(lambda x: registerBehavior(TextBehavior, x), textList)
multiTextList = ['CATEGORIES', 'RESOURCES']
map(lambda x: registerBehavior(MultiTextBehavior, x), multiTextList)
registerBehavior(SemicolonMultiTextBehavior, 'REQUEST-STATUS')
#------------------------ Serializing helper functions -------------------------
def numToDigits(num, places):
"""Helper, for converting numbers to textual digits."""
s = str(num)
if len(s) < places:
return ("0" * (places - len(s))) + s
elif len(s) > places:
return s[len(s)-places: ]
else:
return s
def timedeltaToString(delta):
"""Convert timedelta to an rfc2445 DURATION."""
if delta.days == 0: sign = 1
else: sign = delta.days / abs(delta.days)
delta = abs(delta)
days = delta.days
hours = delta.seconds / 3600
minutes = (delta.seconds % 3600) / 60
seconds = delta.seconds % 60
out = ''
if sign == -1: out = '-'
out += 'P'
if days: out += str(days) + 'D'
if hours or minutes or seconds: out += 'T'
elif not days: #Deal with zero duration
out += 'T0S'
if hours: out += str(hours) + 'H'
if minutes: out += str(minutes) + 'M'
if seconds: out += str(seconds) + 'S'
return out
def timeToString(dateOrDateTime):
"""
Wraps dateToString and dateTimeToString, returning the results
of either based on the type of the argument
"""
# Didn't use isinstance here as date and datetime sometimes evalutes as both
if (type(dateOrDateTime) == datetime.date):
return dateToString(dateOrDateTime)
elif(type(dateOrDateTime) == datetime.datetime):
return dateTimeToString(dateOrDateTime)
def dateToString(date):
year = numToDigits( date.year, 4 )
month = numToDigits( date.month, 2 )
day = numToDigits( date.day, 2 )
return year + month + day
def dateTimeToString(dateTime, convertToUTC=False):
"""Ignore tzinfo unless convertToUTC. Output string."""
if dateTime.tzinfo and convertToUTC:
dateTime = dateTime.astimezone(utc)
if tzinfo_eq(dateTime.tzinfo, utc): utcString = "Z"
else: utcString = ""
year = numToDigits( dateTime.year, 4 )
month = numToDigits( dateTime.month, 2 )
day = numToDigits( dateTime.day, 2 )
hour = numToDigits( dateTime.hour, 2 )
mins = numToDigits( dateTime.minute, 2 )
secs = numToDigits( dateTime.second, 2 )
return year + month + day + "T" + hour + mins + secs + utcString
def deltaToOffset(delta):
absDelta = abs(delta)
hours = absDelta.seconds / 3600
hoursString = numToDigits(hours, 2)
minutesString = '00'
if absDelta == delta:
signString = "+"
else:
signString = "-"
return signString + hoursString + minutesString
def periodToString(period, convertToUTC=False):
txtstart = dateTimeToString(period[0], convertToUTC)
if isinstance(period[1], datetime.timedelta):
txtend = timedeltaToString(period[1])
else:
txtend = dateTimeToString(period[1], convertToUTC)
return txtstart + "/" + txtend
#----------------------- Parsing functions -------------------------------------
def isDuration(s):
s = string.upper(s)
return (string.find(s, "P") != -1) and (string.find(s, "P") < 2)
def stringToDate(s):
year = int( s[0:4] )
month = int( s[4:6] )
day = int( s[6:8] )
return datetime.date(year,month,day)
def stringToDateTime(s, tzinfo=None):
"""Returns datetime.datetime object."""
try:
year = int( s[0:4] )
month = int( s[4:6] )
day = int( s[6:8] )
hour = int( s[9:11] )
minute = int( s[11:13] )
second = int( s[13:15] )
if len(s) > 15:
if s[15] == 'Z':
tzinfo = utc
except:
raise ParseError("'%s' is not a valid DATE-TIME" % s)
return datetime.datetime(year, month, day, hour, minute, second, 0, tzinfo)
# DQUOTE included to work around iCal's penchant for backslash escaping it,
# although it isn't actually supposed to be escaped according to rfc2445 TEXT
escapableCharList = '\\;,Nn"'
def stringToTextValues(s, listSeparator=',', charList=None, strict=False):
"""Returns list of strings."""
if charList is None:
charList = escapableCharList
def escapableChar (c):
return c in charList
def error(msg):
if strict:
raise ParseError(msg)
else:
#logger.error(msg)
print msg
#vars which control state machine
charIterator = enumerate(s)
state = "read normal"
current = []
results = []
while True:
try:
charIndex, char = charIterator.next()
except:
char = "eof"
if state == "read normal":
if char == '\\':
state = "read escaped char"
elif char == listSeparator:
state = "read normal"
current = "".join(current)
results.append(current)
current = []
elif char == "eof":
state = "end"
else:
state = "read normal"
current.append(char)
elif state == "read escaped char":
if escapableChar(char):
state = "read normal"
if char in 'nN':
current.append('\n')
else:
current.append(char)
else:
state = "read normal"
# leave unrecognized escaped characters for later passes
current.append('\\' + char)
elif state == "end": #an end state
if len(current) or len(results) == 0:
current = "".join(current)
results.append(current)
return results
elif state == "error": #an end state
return results
else:
state = "error"
error("error: unknown state: '%s' reached in %s" % (state, s))
def stringToDurations(s, strict=False):
"""Returns list of timedelta objects."""
def makeTimedelta(sign, week, day, hour, minute, sec):
if sign == "-": sign = -1
else: sign = 1
week = int(week)
day = int(day)
hour = int(hour)
minute = int(minute)
sec = int(sec)
return sign * datetime.timedelta(weeks=week, days=day, hours=hour, minutes=minute, seconds=sec)
def error(msg):
if strict:
raise ParseError(msg)
else:
raise ParseError(msg)
#logger.error(msg)
#vars which control state machine
charIterator = enumerate(s)
state = "start"
durations = []
current = ""
sign = None
week = 0
day = 0
hour = 0
minute = 0
sec = 0
while True:
try:
charIndex, char = charIterator.next()
except:
charIndex += 1
char = "eof"
if state == "start":
if char == '+':
state = "start"
sign = char
elif char == '-':
state = "start"
sign = char
elif char.upper() == 'P':
state = "read field"
elif char == "eof":
state = "error"
error("got end-of-line while reading in duration: " + s)
elif char in string.digits:
state = "read field"
current = current + char #update this part when updating "read field"
else:
state = "error"
print "got unexpected character %s reading in duration: %s" % (char, s)
error("got unexpected character %s reading in duration: %s" % (char, s))
elif state == "read field":
if (char in string.digits):
state = "read field"
current = current + char #update part above when updating "read field"
elif char.upper() == 'T':
state = "read field"
elif char.upper() == 'W':
state = "read field"
week = current
current = ""
elif char.upper() == 'D':
state = "read field"
day = current
current = ""
elif char.upper() == 'H':
state = "read field"
hour = current
current = ""
elif char.upper() == 'M':
state = "read field"
minute = current
current = ""
elif char.upper() == 'S':
state = "read field"
sec = current
current = ""
elif char == ",":
state = "start"
durations.append( makeTimedelta(sign, week, day, hour, minute, sec) )
current = ""
sign = None
week = None
day = None
hour = None
minute = None
sec = None
elif char == "eof":
state = "end"
else:
state = "error"
error("got unexpected character reading in duration: " + s)
elif state == "end": #an end state
#print "stuff: %s, durations: %s" % ([current, sign, week, day, hour, minute, sec], durations)
if (sign or week or day or hour or minute or sec):
durations.append( makeTimedelta(sign, week, day, hour, minute, sec) )
return durations
elif state == "error": #an end state
error("in error state")
return durations
else:
state = "error"
error("error: unknown state: '%s' reached in %s" % (state, s))
def parseDtstart(contentline, allowSignatureMismatch=False):
"""Convert a contentline's value into a date or date-time.
A variety of clients don't serialize dates with the appropriate VALUE
parameter, so rather than failing on these (technically invalid) lines,
if allowSignatureMismatch is True, try to parse both varieties.
"""
tzinfo = getTzid(getattr(contentline, 'tzid_param', None))
valueParam = getattr(contentline, 'value_param', 'DATE-TIME').upper()
if valueParam == "DATE":
return stringToDate(contentline.value)
elif valueParam == "DATE-TIME":
try:
return stringToDateTime(contentline.value, tzinfo)
except:
if allowSignatureMismatch:
return stringToDate(contentline.value)
else:
raise
def stringToPeriod(s, tzinfo=None):
values = string.split(s, "/")
start = stringToDateTime(values[0], tzinfo)
valEnd = values[1]
if isDuration(valEnd): #period-start = date-time "/" dur-value
delta = stringToDurations(valEnd)[0]
return (start, delta)
else:
return (start, stringToDateTime(valEnd, tzinfo))
def getTransition(transitionTo, year, tzinfo):
"""Return the datetime of the transition to/from DST, or None."""
def firstTransition(iterDates, test):
"""
Return the last date not matching test, or None if all tests matched.
"""
success = None
for dt in iterDates:
if not test(dt):
success = dt
else:
if success is not None:
return success
return success # may be None
def generateDates(year, month=None, day=None):
"""Iterate over possible dates with unspecified values."""
months = range(1, 13)
days = range(1, 32)
hours = range(0, 24)
if month is None:
for month in months:
yield datetime.datetime(year, month, 1)
elif day is None:
for day in days:
try:
yield datetime.datetime(year, month, day)
except ValueError:
pass
else:
for hour in hours:
yield datetime.datetime(year, month, day, hour)
assert transitionTo in ('daylight', 'standard')
if transitionTo == 'daylight':
def test(dt): return tzinfo.dst(dt) != zeroDelta
elif transitionTo == 'standard':
def test(dt): return tzinfo.dst(dt) == zeroDelta
newyear = datetime.datetime(year, 1, 1)
monthDt = firstTransition(generateDates(year), test)
if monthDt is None:
return newyear
elif monthDt.month == 12:
return None
else:
# there was a good transition somewhere in a non-December month
month = monthDt.month
day = firstTransition(generateDates(year, month), test).day
uncorrected = firstTransition(generateDates(year, month, day), test)
if transitionTo == 'standard':
# assuming tzinfo.dst returns a new offset for the first
# possible hour, we need to add one hour for the offset change
# and another hour because firstTransition returns the hour
# before the transition
return uncorrected + datetime.timedelta(hours=2)
else:
return uncorrected + datetime.timedelta(hours=1)
def tzinfo_eq(tzinfo1, tzinfo2, startYear = 2000, endYear=2020):
"""Compare offsets and DST transitions from startYear to endYear."""
if tzinfo1 == tzinfo2:
return True
elif tzinfo1 is None or tzinfo2 is None:
return False
def dt_test(dt):
if dt is None:
return True
return tzinfo1.utcoffset(dt) == tzinfo2.utcoffset(dt)
if not dt_test(datetime.datetime(startYear, 1, 1)):
return False
for year in xrange(startYear, endYear):
for transitionTo in 'daylight', 'standard':
t1=getTransition(transitionTo, year, tzinfo1)
t2=getTransition(transitionTo, year, tzinfo2)
if t1 != t2 or not dt_test(t1):
return False
return True
#------------------- Testing and running functions -----------------------------
if __name__ == '__main__':
import tests
tests._test()
| 39.673448 | 106 | 0.517369 |
4a1da7f53bffa9dfd6d061b5ca60bed559e2c638 | 11,213 | py | Python | data/prepare_retailrocket_dataset.py | btwardow/dml4rec | 3bc2ca4d85ef9f8d94f0ee0a07d1d67cec671e66 | [
"MIT"
] | 6 | 2021-03-29T14:47:43.000Z | 2021-11-19T08:19:25.000Z | data/prepare_retailrocket_dataset.py | btwardow/dml4rec | 3bc2ca4d85ef9f8d94f0ee0a07d1d67cec671e66 | [
"MIT"
] | null | null | null | data/prepare_retailrocket_dataset.py | btwardow/dml4rec | 3bc2ca4d85ef9f8d94f0ee0a07d1d67cec671e66 | [
"MIT"
] | 2 | 2021-01-24T17:16:36.000Z | 2021-10-19T07:50:06.000Z | import csv
import os
import json
import gzip
import math
from collections import deque
from datetime import datetime
import numpy as np
import pandas as pd
from rec.dataset.dataset import Dataset
import rec.model as m
directory = 'data/retailrocket/'
directory_input = directory + 'raw/'
input_path_events = directory_input + 'sorted_events.csv'
input_path_items = [
directory_input + 'sorted_item_properties_part1.csv', directory_input + 'sorted_item_properties_part2.csv'
]
input_category_tree = directory_input + 'category_tree.csv'
items_jsonl_path = directory + 'items'
events_jsonl_path = directory + 'sessions'
delimiter = ','
datasets = 5
datasets_dir_prefix = 'data/dataset/RR'
datasets_dirs = []
timestamp_first_event = 1430622004384
timestamp_last_event = 1442545187788
class RetailRocket:
def __init__(self):
self.items = dict()
self.category_tree = dict()
self.users_sessions = dict()
self.next_session_id = 0
self.items_in_datasets = dict()
self.items_all_properties = set()
self.items_mutable_properties = set()
for i in range(datasets):
self.items_in_datasets[i] = set()
def prepare_items(self):
self._read_category_tree()
for input_path in input_path_items:
self._add_items_properties(input_path)
self._find_immutable_properties()
def generate_events_file(self):
rows = self._prepare_events()
data = self._filter_events(rows)
self._save_events_to_file(data)
def save_items_to_file(self):
print('Saving all items...')
with gzip.open(f'{datasets_dir_prefix}/items.jsonl.gz', 'wt') as f:
for item in self.items.values():
f.write(item.transform_into_jsonl_format())
f.write('\n')
print('Saving splited items...')
for i in range(datasets):
items_set = self.items_in_datasets[i]
with gzip.open(f'{datasets_dir_prefix}-{i+1}/items.jsonl.gz', 'wt') as f:
for item_id in items_set:
item_jsonl = self.items[item_id].transform_into_jsonl_format()
f.write(item_jsonl)
f.write('\n')
def _prepare_events(self):
rows = []
with open(input_path_events) as input_file:
csv_reader = csv.reader(input_file, delimiter=delimiter)
next(csv_reader, None)
for line in csv_reader:
event_jsonl = self._prepare_event_in_jsonl(line)
if event_jsonl is not None:
ev_dict = json.loads(event_jsonl)
file_no = self.calculate_file_no(ev_dict['timestamp'])
row = [ev_dict['sessionId'], ev_dict['clickedItem'], ev_dict['timestamp'], event_jsonl, file_no]
rows.append(row)
return rows
def _filter_events(self, rows):
columns = ['session_id', 'item_id', 'timestamp', 'event_jsonl', 'file_no']
return self._filter_data(pd.DataFrame(rows, columns=columns))
def _save_events_to_file(self, data):
for i in range(datasets):
d = f'{datasets_dir_prefix}-{i+1}'
os.makedirs(d, exist_ok=True)
datasets_dirs.append(d)
os.makedirs(datasets_dir_prefix, exist_ok=True)
datasets_dirs.append(datasets_dir_prefix)
print('Saving all events dataset...')
with gzip.open(f'{datasets_dir_prefix}/sessions.jsonl.gz', 'wt') as f:
for _, row in data.iterrows():
f.write(row['event_jsonl'] + '\n')
print('Saving splited events datasets...')
outputs = [gzip.open(f'{datasets_dir_prefix}-{i+1}/sessions.jsonl.gz', 'wt') for i in range(datasets)]
for _, row in data.iterrows():
if row['file_no'] < datasets:
if row['item_id'] in self.items:
outputs[row['file_no']].write(row['event_jsonl'] + '\n')
self.items_in_datasets[row['file_no']].add(row['item_id'])
else:
print(f'Item id: {row.item_id} is clicked but not in items dataset')
map(lambda f: f.close(), outputs)
def _add_items_properties(self, path):
with open(path) as input_file:
csv_reader = csv.reader(input_file, delimiter=delimiter)
next(csv_reader, None)
for line in csv_reader:
self._add_item_property(line)
def _add_item_property(self, line):
assert len(line) == 4
timestamp = int(line[0])
item_id = line[1]
property_name = line[2]
value = line[3].strip().split(' ')
if len(value) == 1: # single value, no array is neccessary
value = value[0]
if item_id not in self.items.keys():
self.items[item_id] = Item(item_id)
self.items[item_id].add_property(property_name, timestamp, value)
if property_name == "categoryid" and value in self.category_tree:
category_path_ids = self._read_path_to_root(value)
self.items[item_id].add_property("category_path_ids", timestamp, category_path_ids)
def _read_path_to_root(self, leaf):
current_node = leaf
result = deque([current_node])
while self.category_tree[current_node] != current_node:
current_node = self.category_tree[current_node]
result.appendleft(current_node)
return result
def _read_category_tree(self):
with open(input_category_tree) as input_file:
csv_reader = csv.reader(input_file, delimiter=delimiter)
next(csv_reader, None)
for line in csv_reader:
if line[1] != "":
self.category_tree[int(line[0])] = int(line[1])
else: # when line describes root category
self.category_tree[int(line[0])] = int(line[0])
def _find_immutable_properties(self):
for item_id, item in self.items.items():
for k, v in item.properties.items(): # k = property name, v = list of tuples (timestamp, value)
self.items_all_properties.add(k)
if len(v) > 1: # if for all timestamps there is the same value => not muttable
for el in v:
if el[1] != v[0][1]:
self.items_mutable_properties.add(k)
break
print(
f'All items properties number: {len(self.items_all_properties)}, mutable: {len(self.items_mutable_properties)}'
)
for item_id, item in self.items.items():
for k, v in item.properties.items():
if k in self.items_mutable_properties:
item.mutable_properties[k] = v
else:
item.immutable_properties[k] = v[0][1] # take first value
@staticmethod
def normalize_context(r):
d = dict()
attribs = []
for k, values in r.items():
if not isinstance(values, list):
values = [values]
for v in values:
if v.startswith('n'): # number
f = float(v[1:])
if math.isinf(f):
print(f'Infinity! Bad value for {k} : {v}. Skipping...')
continue
d[k] = f
else:
attribs.append(f'{k}|{v}')
d['properties'] = attribs
return d
def _prepare_event_in_jsonl(self, line):
def converter(o):
if isinstance(o, datetime):
return o.__str__()
timestamp = int(line[0])
user_id = int(line[1])
item_id = line[3]
if user_id not in self.users_sessions:
self.users_sessions[user_id] = [timestamp, self.next_session_id]
self.next_session_id += 1
else:
if timestamp - self.users_sessions[user_id][0] > 30 * 60 * 1000: # 30 min * 60s * 1000ms
self.users_sessions[user_id] = [timestamp, self.next_session_id]
self.next_session_id += 1
else:
self.users_sessions[user_id][0] = timestamp # update last activity in session
if item_id in self.items:
data = {
m.TIMESTAMP: timestamp,
m.EVENT_USER_ID: user_id,
m.EVENT_TYPE: line[2],
m.EVENT_ITEM: item_id,
m.EVENT_SESSION_ID: self.users_sessions[user_id][1]
}
context = self._prepare_context(item_id, timestamp)
if len(context) > 0:
data[m.EVENT_CONTEXT] = RetailRocket.normalize_context(context)
return json.dumps(data, default=converter, separators=(',', ':'))
def _prepare_context(self, item_id, timestamp):
context = {}
for property, values in self.items[item_id].mutable_properties.items():
ts, val = 0, 0
for time, value in values:
if timestamp >= time > ts:
ts = time
val = value
if ts > 0:
context[property] = val
return context
@staticmethod
def _filter_data(data): # based on 130L session-rec/preprocessing/preprocess_retailrocket.py
session_lengths = data.groupby('session_id').size()
data = data[np.in1d(data.session_id, session_lengths[session_lengths > 1].index)]
item_supports = data.groupby('item_id').size()
data = data[np.in1d(data.item_id, item_supports[item_supports >= 5].index)]
session_lengths = data.groupby('session_id').size()
data = data[np.in1d(data.session_id, session_lengths[session_lengths >= 2].index)]
return data
@staticmethod
def calculate_file_no(ts):
return int((ts - timestamp_first_event) / (1000 * 60 * 60 * 24 * 27)) # 1000ms * 60s * 60min * 24h * 27d
class Item:
def __init__(self, id):
self.id = str(id)
self.properties = dict() # all properties
self.immutable_properties = dict() # add to items.jsonl
self.mutable_properties = dict() # add to sessions.jsonl in context field
def add_property(self, property, timestamp, value):
if property not in self.properties.keys():
self.properties[property] = list()
self.properties[property].append((timestamp, value))
def transform_into_jsonl_format(self):
dt = {m.ITEM_ID: self.id}
dt.update(RetailRocket.normalize_context(self.immutable_properties))
return json.dumps(dt, separators=(',', ':'))
if __name__ == '__main__':
items = RetailRocket()
items.prepare_items()
items.generate_events_file()
items.save_items_to_file()
for ds_dir in datasets_dirs:
ds_name = ds_dir.split('/')[-1]
ds = Dataset(ds_name).load_from_file(f'{ds_dir}/sessions.jsonl.gz', f'{ds_dir}/items.jsonl.gz')
ds.write_to_file(f'{ds_dir}/dataset.pkl.gz')
ds = Dataset(ds_name).load_with_no_context(f'{ds_dir}/sessions.jsonl.gz')
ds.write_to_file(f'{ds_dir}/dataset_no_context.pkl.gz')
| 38.010169 | 123 | 0.595915 |
4a1da855698e8928b8bf01d8e594a7b036de6893 | 1,159 | py | Python | main.py | quervernetzt/active-directory-search-user-in-group | 80cadad6dc210095ac88b18b0eee6b0ade246f2d | [
"MIT"
] | null | null | null | main.py | quervernetzt/active-directory-search-user-in-group | 80cadad6dc210095ac88b18b0eee6b0ade246f2d | [
"MIT"
] | null | null | null | main.py | quervernetzt/active-directory-search-user-in-group | 80cadad6dc210095ac88b18b0eee6b0ade246f2d | [
"MIT"
] | null | null | null | from tests.tests_search import TestCasesSearch
from solution.group import Group
from solution.search import Search
if __name__ == "__main__":
###################################
# Tests
###################################
tests: TestCasesSearch = TestCasesSearch()
tests.execute_tests_is_user_in_group_none_input()
tests.execute_tests_is_user_in_group_user_in_root_group()
tests.execute_tests_is_user_in_group_user_in_sub_group()
tests.execute_tests_is_user_in_group_user_in_nested_sub_group()
###################################
# Demo
###################################
search_instance: Search = Search()
root_group: Group = Group("root_group")
root_group.add_user("user1")
root_group.add_user("user2")
root_group.add_user("user3")
sub_group: Group = Group("sub_group")
sub_group.add_user("user11")
sub_group.add_user("user12")
root_group.add_group(sub_group)
result: bool = search_instance.is_user_in_group("user12", root_group)
result_1: bool = search_instance.is_user_in_group("user5", root_group)
print(result) # -> True
print(result_1) # -> False
| 31.324324 | 74 | 0.647972 |
4a1daa471b61c17b61d5ac7fd5f6a1f8d79e31ef | 103 | py | Python | Miniature Hospital Management System/hospital_app/apps.py | Abdullah0297445/Django-Projects | 4864f48357cea4355bb618f46ef4930ac6e41126 | [
"MIT"
] | null | null | null | Miniature Hospital Management System/hospital_app/apps.py | Abdullah0297445/Django-Projects | 4864f48357cea4355bb618f46ef4930ac6e41126 | [
"MIT"
] | null | null | null | Miniature Hospital Management System/hospital_app/apps.py | Abdullah0297445/Django-Projects | 4864f48357cea4355bb618f46ef4930ac6e41126 | [
"MIT"
] | null | null | null | from django.apps import AppConfig
class hospitalAppConfig(AppConfig):
name = 'hospital_app'
| 17.166667 | 36 | 0.737864 |
4a1daa728e552abcb559c054f1880fa9356b861f | 666 | py | Python | abupy/AlphaBu/__init__.py | luqin/firefly | 2e5ab17f2d20deb3c68c927f6208ea89db7c639d | [
"MIT"
] | 1 | 2019-05-28T05:54:42.000Z | 2019-05-28T05:54:42.000Z | abupy/AlphaBu/__init__.py | momantang/cobrass | f11435d4836aa29078a3cd4beb4ca88967300c84 | [
"Apache-2.0"
] | 9 | 2020-03-24T16:45:25.000Z | 2022-03-11T23:40:51.000Z | abupy/AlphaBu/__init__.py | luqin/firefly | 2e5ab17f2d20deb3c68c927f6208ea89db7c639d | [
"MIT"
] | 1 | 2021-09-08T17:39:58.000Z | 2021-09-08T17:39:58.000Z | from __future__ import absolute_import
from .ABuPickBase import AbuPickTimeWorkBase, AbuPickStockWorkBase
from .ABuPickStockMaster import AbuPickStockMaster
from .ABuPickStockWorker import AbuPickStockWorker
from .ABuPickTimeWorker import AbuPickTimeWorker
from .ABuPickTimeMaster import AbuPickTimeMaster
from . import ABuPickStockExecute
from . import ABuPickTimeExecute
# noinspection all
from . import ABuAlpha as alpha
__all__ = [
'AbuPickTimeWorkBase',
'AbuPickStockWorkBase',
'AbuPickStockMaster',
'AbuPickStockWorker',
'AbuPickTimeWorker',
'AbuPickTimeMaster',
'ABuPickStockExecute',
'ABuPickTimeExecute',
'alpha'
]
| 23.785714 | 66 | 0.795796 |
4a1dab25a34c6d2b76df54050bab0ede4c738591 | 6,119 | py | Python | cm/app/api_v1/my_calculation_module_directory/CM/CM_TUW23/f2_investment.py | HotMaps/dh_economic_assessment | 28393250b8f7a68552b90f7d8612fef216cc69e0 | [
"Apache-2.0"
] | null | null | null | cm/app/api_v1/my_calculation_module_directory/CM/CM_TUW23/f2_investment.py | HotMaps/dh_economic_assessment | 28393250b8f7a68552b90f7d8612fef216cc69e0 | [
"Apache-2.0"
] | 1 | 2022-03-08T21:10:00.000Z | 2022-03-08T21:10:00.000Z | cm/app/api_v1/my_calculation_module_directory/CM/CM_TUW23/f2_investment.py | HotMaps/dh_economic_assessment | 28393250b8f7a68552b90f7d8612fef216cc69e0 | [
"Apache-2.0"
] | null | null | null | import os
import sys
import numpy as np
from osgeo import gdal
path = os.path.dirname(os.path.dirname(os.path.dirname(os.path.
abspath(__file__))))
if path not in sys.path:
sys.path.append(path)
from CM.CM_TUW0.rem_mk_dir import rm_file
from CM.CM_TUW19 import run_cm as CM19
def annuity(r, period):
period = int(period)
r = float(r)
if r == 0:
return 1
return ((1+r)**period - 1) / (r*(1+r)**period)
def dh_demand(c1, c2, raster_plotratio, raster_hdm, start_year, last_year,
accumulated_energy_saving, dh_connection_rate_1st_year,
dh_connection_rate_last_year, depr_period, interest,
output_layers, dA_slope=0.0486, dA_intercept=0.0007,
dataType='float32'):
'''
Important Note:
1) Here, for the calculation of plot ratio, I used gross floor area raster
in one hectar resolution (unit: m2). It should be divided by 1e4 to get the
plot ratio.
2) if you have plot ratio raster, remove the 1e4 factor.
3) Distribution cost is calculated for those pixel that their corresponding
pipe diameter is equal or greater than 0.
the input heat density map should be in GWh/km2.
'''
horizon = int(last_year) - int(start_year) + 1
horizon = int(horizon)
if horizon > int(depr_period):
horizon = depr_period
remaining_years = 0
else:
remaining_years = int(depr_period) - int(horizon)
energy_reduction_factor = (1-float(accumulated_energy_saving))**(1/(horizon-1))
hdm_ds = gdal.Open(raster_hdm)
hdm_band = hdm_ds.GetRasterBand(1)
hdm = hdm_band.ReadAsArray().astype(float)
geo_transform = hdm_ds.GetGeoTransform()
plotRatio_ds = gdal.Open(raster_plotratio)
plotRatio_band = plotRatio_ds.GetRasterBand(1)
# gfa in hectar should be devided by 10000 to get right values for
# plot ratio (m2/m2).
plotRatio = plotRatio_band.ReadAsArray().astype(float)/10000.0
hdm_ds = plotRatio_ds = None
row, col = np.nonzero((hdm > 0).astype(int) * (plotRatio > 0.0).astype(int))
# unit conversion from MWh/ha to GJ/m2
sparseDemand = 0.00036*hdm[row, col]
PR_sparse = plotRatio[row, col]
# the unit for L is m however in each m2
# L is in m: to get the value for each pixel (ha) you should multiply it
# by 10000 because 1 pixel has 10000 m2
# The following formulation of L comes from Persson et al. 2019 paper with
# the title "Heat Roadmap Europe: Heat distribution costs"
L = 1 / ((PR_sparse <= 0.4).astype(int) * (137.5*PR_sparse + 5) + (PR_sparse > 0.4).astype(int) * 60)
# initialize the variables
q = 0
# q_new = dh_connection_rate_1st_year * sparseDemand
q_tot = np.copy(sparseDemand)
q_max = np.zeros_like(q_tot)
for i in range(horizon):
q_tot = sparseDemand * energy_reduction_factor**i
q_new = q_tot * (float(dh_connection_rate_1st_year) + i * (float(dh_connection_rate_last_year) - float(dh_connection_rate_1st_year))/(horizon-1))
# q_new is a non-zero sparse matrix. The development of demand can be
# checked by comparing just one element of q_new with q_max.
if q_new[0] > q_max[0]:
q_max = np.copy(q_new)
q += q_new / (1 + float(interest))**i
if remaining_years > 0:
if interest > 0:
alpha_horizon = annuity(interest, horizon-1)
alpha_depr = annuity(interest, depr_period)
rest_annuity_factor = alpha_depr - alpha_horizon
q = q + q_new * rest_annuity_factor
else:
q = q + q_new * remaining_years
linearHeatDensity = q_max / L
# this step is performed to avoid negative average pipe diameter
LHD_THRESHOLD = -dA_intercept/dA_slope
filtered_LHD = (np.log(linearHeatDensity) < LHD_THRESHOLD).astype(int)
elements = np.nonzero(filtered_LHD)[0]
dA = dA_slope * (np.log(linearHeatDensity)) + dA_slope
dA[elements] = 0
# lower limit of linear heat densities at 1.5 GJ/m was set. Below this
# threshold, pipe diameters of 0.02m were applied uniformly for all hectare
# grid cells with present heat density values above zero.
# Note: linearHeatDensity is calculated for cells with heat demand above zero
dA[((linearHeatDensity < 1.5).astype(int) * (dA > 0).astype(int)).astype(bool)] = 0.02
q_max[elements] = 0
denominator = q / L
""" # old code
cf1, cf2 = cost_factors(c1, c2, PR_sparse)
divisor = cf1[1] + cf2[1]*dA
"""
divisor = c1 + c2*dA
divisor[elements] = 0
investment = divisor/denominator
finalInvestment = np.zeros_like(hdm, dtype=dataType)
# from Euro/GJ/m2 to Euro/MWh/m2
finalInvestment[row, col] = investment*3.6
maxDHdem_arr = np.zeros_like(finalInvestment, dtype=dataType)
# max DH demand density in MWh within the study horizon
maxDHdem_arr[row, col] = q_max*10000/3.6
invest_Euro_arr = maxDHdem_arr * finalInvestment
hdm_last_year = np.zeros_like(finalInvestment, dtype=dataType)
# total demand in the last year of study horizon in MWh/ha
hdm_last_year[row, col] = q_tot*10000/3.6
# Length of pipes (L)
length = np.zeros_like(finalInvestment, dtype=dataType)
length[row, col] = L
length[row, col][elements] = 0
maxDHdem, invest_Euro, hdm_cut_last_year, total_pipe_length = output_layers
rm_file(maxDHdem, invest_Euro, hdm_cut_last_year, total_pipe_length)
CM19.main(maxDHdem, geo_transform, dataType, maxDHdem_arr)
CM19.main(invest_Euro, geo_transform, dataType, invest_Euro_arr)
CM19.main(hdm_cut_last_year, geo_transform, dataType, hdm_last_year)
CM19.main(total_pipe_length, geo_transform, dataType, length)
"""
# sum(MWh/ha) and convert to GWh
first_year_dem_all = np.sum(hdm)/1000
# demand in GWh for pixels with Plot Ratio > 0
first_year_dem = np.sum(sparseDemand)*10000/3600
# total demand in last year in GWh for pixels with Plot Ration > 0
last_year_dem = np.sum(hdm_last_year)/1000
return first_year_dem_all, first_year_dem, last_year_dem
"""
| 44.664234 | 153 | 0.680994 |
4a1dac3a1e7b43e1a13aa25af08d5de86c5d4b5a | 361 | py | Python | psatlib/contingency.py | nie93/psatlib | 19947658fb4162f325e729eab086121947d8306a | [
"MIT"
] | null | null | null | psatlib/contingency.py | nie93/psatlib | 19947658fb4162f325e729eab086121947d8306a | [
"MIT"
] | 2 | 2018-06-19T20:53:13.000Z | 2018-11-01T20:11:45.000Z | psatlib/contingency.py | nie93/psatlib | 19947658fb4162f325e729eab086121947d8306a | [
"MIT"
] | null | null | null | """ Contingency functions management. (To be dropped if not necessary)"""
__author__ = "Zhijie Nie"
import sys
if sys.version_info[0] == 2:
if sys.version_info[1] == 5:
from psat_python25 import *
elif sys.version_info[1] == 7:
from psat_python27 import *
elif sys.version_info[0] == 3:
from psat_python34 import *
error = psat_error() | 24.066667 | 73 | 0.684211 |
4a1dac6ee14f83bfb763df7a5be3ad344de97294 | 2,197 | py | Python | unical_accounts/models.py | alranel/uniAuth | 1d9dd044b7c3722d40162fc116d536fe3dfd5c7b | [
"Apache-2.0"
] | null | null | null | unical_accounts/models.py | alranel/uniAuth | 1d9dd044b7c3722d40162fc116d536fe3dfd5c7b | [
"Apache-2.0"
] | null | null | null | unical_accounts/models.py | alranel/uniAuth | 1d9dd044b7c3722d40162fc116d536fe3dfd5c7b | [
"Apache-2.0"
] | 1 | 2020-01-09T08:57:28.000Z | 2020-01-09T08:57:28.000Z | import pycountry
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.contrib.auth.models import AbstractUser
from django.contrib.contenttypes.models import ContentType
from django.conf import settings
class User(AbstractUser):
GENDER= (
( 'male', _('Maschio')),
( 'female', _('Femmina')),
( 'other', _('Altro')),
)
first_name = models.CharField(_('Name'), max_length=30,
blank=True, null=True)
last_name = models.CharField(_('Surname'), max_length=30,
blank=True, null=True)
is_active = models.BooleanField(_('active'), default=True)
email = models.EmailField('email address', blank=True, null=True)
taxpayer_id = models.CharField(_('Taxpayer\'s identification number'),
max_length=32,
blank=True, null=True)
gender = models.CharField(_('Genere'), choices=GENDER,
max_length=12, blank=True, null=True)
place_of_birth = models.CharField('Luogo di nascita', max_length=30,
blank=True, null=True,
choices=[(i.name, i.name) for i in pycountry.countries])
birth_date = models.DateField('Data di nascita',
null=True, blank=True)
persistent_id = models.CharField(_('SAML Persistent Stored ID'),
max_length=30,
blank=True, null=True)
# short_description = models.CharField(_('Descrizione breve'), max_length=33, blank=True, null=True)
# bio = models.TextField('Biografia, note', max_length=2048, blank=True, null=True)
# avatar = models.ImageField('Avatar, foto', upload_to='avatars/', null=True, blank=True)
# webpage_url = models.CharField(_('Pagina web'), max_length=512, blank=True, null=True)
class Meta:
ordering = ['username']
verbose_name_plural = _("Users")
def __str__(self):
return '{} {}'.format(self.first_name, self.last_name)
| 45.770833 | 104 | 0.577151 |
4a1dacaacc72e64dff6c38cd7602e3a9155640fd | 8,000 | py | Python | custom/icds/management/commands/sanitize_phone_numbers.py | kkrampa/commcare-hq | d64d7cad98b240325ad669ccc7effb07721b4d44 | [
"BSD-3-Clause"
] | 1 | 2020-05-05T13:10:01.000Z | 2020-05-05T13:10:01.000Z | custom/icds/management/commands/sanitize_phone_numbers.py | kkrampa/commcare-hq | d64d7cad98b240325ad669ccc7effb07721b4d44 | [
"BSD-3-Clause"
] | 1 | 2019-12-09T14:00:14.000Z | 2019-12-09T14:00:14.000Z | custom/icds/management/commands/sanitize_phone_numbers.py | MaciejChoromanski/commcare-hq | fd7f65362d56d73b75a2c20d2afeabbc70876867 | [
"BSD-3-Clause"
] | 5 | 2015-11-30T13:12:45.000Z | 2019-07-01T19:27:07.000Z | from __future__ import absolute_import
from __future__ import unicode_literals
import csv342 as csv
import six
import sys
import time
from datetime import (
datetime,
date,
timedelta,
)
from io import open
from xml.etree import cElementTree as ElementTree
from django.core.management.base import BaseCommand
from corehq.apps.users.util import SYSTEM_USER_ID
from corehq.form_processor.backends.sql.dbaccessors import CaseReindexAccessor
from corehq.form_processor.interfaces.dbaccessors import CaseAccessors
from corehq.util.log import with_progress_bar
from corehq.apps.locations.models import SQLLocation
from corehq.apps.hqcase.utils import submit_case_blocks
from corehq.form_processor.backends.sql.dbaccessors import iter_all_rows
from dimagi.utils.chunked import chunked
from casexml.apps.case.mock import CaseBlock
from six.moves import range
DOMAIN = "icds-cas"
CASE_TYPE = "person"
DATE_OF_REGISTRATION_PROPERTY = "date_of_registration"
PHONE_NUMBER_PROPERTY = "contact_phone_number"
HAS_MOBILE_PROPERTY = "has_mobile"
HAS_MOBILE_PROPERTY_NO_VALUE = "no"
CASE_ITERATION_COUNT = 10000
MAX_RESCUE_EXCEPTIONS_ON_UPDATE = 5
CSV_HEADERS = ['Case Id']
TEST_STATES = [
'Test State',
'Test State 2',
'VL State',
'Trial State',
'Uttar Pradesh_GZB',
'AWW Test State',
]
class Command(BaseCommand):
help = """
Iterate person cases updated in last 180 days in a single partition,
Find the ones which are
- not deleted
- not belonging to test locations,
- with has_mobile case_property set to "no",
- if the contact_phone_number is "91", set phone to blank,
- and if it's present but is anything other than "91", note it down.
Returns two lists of case ids, the ones updated and the ones with unexpected phone numbers
"""
def __init__(self, *args, **kwargs):
super(Command, self).__init__(*args, **kwargs)
self.db_alias = None
self.log_progress = False
self.case_ids_with_unexpected_phone_number = []
self.test_locations = None
self.case_accessor = CaseAccessors(DOMAIN)
def add_arguments(self, parser):
parser.add_argument('db_alias')
parser.add_argument(
'--log',
action='store_true',
dest='log_progress',
default=False,
help="log progress"
)
def _store_case_ids_with_unexpected_phone_number(self):
if self.case_ids_with_unexpected_phone_number:
filename = 'unexpected_phone_numbers_with_91_part_%s_%s.csv' % (self.db_alias, datetime.utcnow())
with open(filename, 'w+b') as output:
writer = csv.writer(output)
for case_id in self.case_ids_with_unexpected_phone_number:
writer.writerow([case_id])
def _case_needs_to_be_updated(self, case):
if case.deleted:
return False
assert case.type == CASE_TYPE
if bool(case.owner_id) and case.owner_id in self.test_locations:
return False
if case.get_case_property(HAS_MOBILE_PROPERTY) == HAS_MOBILE_PROPERTY_NO_VALUE:
phone_number = case.get_case_property(PHONE_NUMBER_PROPERTY)
if not phone_number:
return False
if not phone_number == '91':
self.case_ids_with_unexpected_phone_number.append(case.case_id)
return False
return True
return False
def _find_case_ids_with_invalid_phone_number(self):
case_ids_with_invalid_phone_number = []
start_date = date.today() - timedelta(days=100)
reindex_accessor = CaseReindexAccessor(
case_type='person', limit_db_aliases=[self.db_alias],
start_date=start_date
)
filename = 'invalid_phone_numbers_with_91_part_%s_%s.csv' % (self.db_alias, datetime.utcnow())
with open(filename, 'w+b') as output:
cases_iterated = 0
writer = csv.writer(output)
writer.writerow(CSV_HEADERS)
if self.log_progress:
self.stdout.write('iterating now')
for case in iter_all_rows(reindex_accessor):
if self.log_progress and cases_iterated % CASE_ITERATION_COUNT == 0:
self.stdout.write("cases iterated: %s" % cases_iterated)
cases_iterated += 1
if self._case_needs_to_be_updated(case):
case_ids_with_invalid_phone_number.append(case.case_id)
writer.writerow([case.case_id])
return case_ids_with_invalid_phone_number
def _reassured_case_ids_to_update(self, chunk):
# reconfirm the cases before updating to avoid removing updates in between
# fetching case ids and updating
invalid_cases = self.case_accessor.get_cases(list(chunk))
case_ids_list = []
for invalid_case in invalid_cases:
if (invalid_case.get_case_property(HAS_MOBILE_PROPERTY) == HAS_MOBILE_PROPERTY_NO_VALUE and
invalid_case.get_case_property(PHONE_NUMBER_PROPERTY) == '91'):
case_ids_list.append(invalid_case.case_id)
return case_ids_list
def _submit_update_form(self, case_ids_list, exceptions_raised):
update_case_blocks = create_case_blocks(case_ids_list)
for attempt in range(MAX_RESCUE_EXCEPTIONS_ON_UPDATE):
try:
if update_case_blocks:
submit_case_blocks(update_case_blocks, DOMAIN, user_id=SYSTEM_USER_ID)
except Exception as e:
exc = sys.exc_info()
exceptions_raised += 1
if self.log_progress:
self.stdout.write("rescuing exception %s %s" % (exceptions_raised, six.text_type(e)))
if exceptions_raised > MAX_RESCUE_EXCEPTIONS_ON_UPDATE:
six.reraise(*exc)
else:
time.sleep(60) # wait for 1 min before trying again
else:
break
return exceptions_raised
def _update_cases(self, case_ids_with_invalid_phone_number):
exceptions_raised = 0
with open('invalid_phone_numbers_with_91_part_%s_updated.csv' % self.db_alias, 'w+b') as output:
writer = csv.writer(output)
writer.writerow(['Case Id'])
case_ids_to_update_chunk = list(chunked(case_ids_with_invalid_phone_number, 100))
for chunk in with_progress_bar(case_ids_to_update_chunk):
case_ids_list = self._reassured_case_ids_to_update(chunk)
[writer.writerow([case_id]) for case_id in case_ids_list]
exceptions_raised = self._submit_update_form(case_ids_list, exceptions_raised)
def handle(self, db_alias, log_progress, **options):
self.db_alias = db_alias
self.log_progress = log_progress
self.test_locations = find_test_locations()
case_ids_with_invalid_phone_number = self._find_case_ids_with_invalid_phone_number()
self._store_case_ids_with_unexpected_phone_number()
if self.log_progress:
self.stdout.write('starting update now for %s cases', len(case_ids_with_invalid_phone_number))
self._update_cases(case_ids_with_invalid_phone_number)
def create_case_blocks(case_ids):
case_blocks = []
for case_id in case_ids:
case_block = CaseBlock(case_id,
update={PHONE_NUMBER_PROPERTY: ''},
user_id=SYSTEM_USER_ID)
case_block = ElementTree.tostring(case_block.as_xml())
case_blocks.append(case_block)
return case_blocks
def find_test_locations():
test_locations = set()
for location in SQLLocation.active_objects.filter(name__in=TEST_STATES, domain=DOMAIN):
test_locations.update(location.get_descendants(include_self=True).values_list('location_id', flat=True))
return test_locations
| 40 | 112 | 0.67425 |
4a1daee1dad218c964895258b051f1446cf59455 | 14,871 | py | Python | keras/initializers.py | pokey/keras | 3e81b668ea0997d136f43baf32a57246ed6410a2 | [
"MIT"
] | 2 | 2018-01-14T04:05:14.000Z | 2021-07-11T04:41:54.000Z | keras/initializers.py | pokey/keras | 3e81b668ea0997d136f43baf32a57246ed6410a2 | [
"MIT"
] | null | null | null | keras/initializers.py | pokey/keras | 3e81b668ea0997d136f43baf32a57246ed6410a2 | [
"MIT"
] | null | null | null | from __future__ import absolute_import
import numpy as np
import six
from . import backend as K
from .utils.generic_utils import serialize_keras_object
from .utils.generic_utils import deserialize_keras_object
class Initializer(object):
"""Initializer base class: all initializers inherit from this class.
"""
def __call__(self, shape, dtype=None):
raise NotImplementedError
def get_config(self):
return {}
@classmethod
def from_config(cls, config):
return cls(**config)
class Zeros(Initializer):
"""Initializer that generates tensors initialized to 0."""
def __call__(self, shape, dtype=None):
return K.constant(0, shape=shape, dtype=dtype)
class Ones(Initializer):
"""Initializer that generates tensors initialized to 1."""
def __call__(self, shape, dtype=None):
return K.constant(1, shape=shape, dtype=dtype)
class Constant(Initializer):
"""Initializer that generates tensors initialized to a constant value.
# Arguments
value: float; the value of the generator tensors.
"""
def __init__(self, value=0):
self.value = value
def __call__(self, shape, dtype=None):
return K.constant(self.value, shape=shape, dtype=dtype)
def get_config(self):
return {'value': self.value}
class RandomNormal(Initializer):
"""Initializer that generates tensors with a normal distribution.
# Arguments
mean: a python scalar or a scalar tensor. Mean of the random values
to generate.
stddev: a python scalar or a scalar tensor. Standard deviation of the
random values to generate.
seed: A Python integer. Used to seed the random generator.
"""
def __init__(self, mean=0., stddev=0.05, seed=None):
self.mean = mean
self.stddev = stddev
self.seed = seed
def __call__(self, shape, dtype=None):
return K.random_normal(shape, self.mean, self.stddev,
dtype=dtype, seed=self.seed)
def get_config(self):
return {
'mean': self.mean,
'stddev': self.stddev,
'seed': self.seed
}
class RandomUniform(Initializer):
"""Initializer that generates tensors with a uniform distribution.
# Arguments
minval: A python scalar or a scalar tensor. Lower bound of the range
of random values to generate.
maxval: A python scalar or a scalar tensor. Upper bound of the range
of random values to generate. Defaults to 1 for float types.
seed: A Python integer. Used to seed the random generator.
"""
def __init__(self, minval=-0.05, maxval=0.05, seed=None):
self.minval = minval
self.maxval = maxval
self.seed = seed
def __call__(self, shape, dtype=None):
return K.random_uniform(shape, self.minval, self.maxval,
dtype=dtype, seed=self.seed)
def get_config(self):
return {
'minval': self.minval,
'maxval': self.maxval,
'seed': self.seed,
}
class TruncatedNormal(Initializer):
"""Initializer that generates a truncated normal distribution.
These values are similar to values from a `random_normal_initializer`
except that values more than two standard deviations from the mean
are discarded and re-drawn. This is the recommended initializer for
neural network weights and filters.
# Arguments
mean: a python scalar or a scalar tensor. Mean of the random values
to generate.
stddev: a python scalar or a scalar tensor. Standard deviation of the
random values to generate.
seed: A Python integer. Used to seed the random generator.
"""
def __init__(self, mean=0., stddev=0.05, seed=None):
self.mean = mean
self.stddev = stddev
self.seed = seed
def __call__(self, shape, dtype=None):
return K.truncated_normal(shape, self.mean, self.stddev,
dtype=dtype, seed=self.seed)
def get_config(self):
return {
'mean': self.mean,
'stddev': self.stddev,
'seed': self.seed
}
class VarianceScaling(Initializer):
"""Initializer capable of adapting its scale to the shape of weights.
With `distribution="normal"`, samples are drawn from a truncated normal
distribution centered on zero, with `stddev = sqrt(scale / n)` where n is:
- number of input units in the weight tensor, if mode = "fan_in"
- number of output units, if mode = "fan_out"
- average of the numbers of input and output units, if mode = "fan_avg"
With `distribution="uniform"`,
samples are drawn from a uniform distribution
within [-limit, limit], with `limit = sqrt(3 * scale / n)`.
# Arguments
scale: Scaling factor (positive float).
mode: One of "fan_in", "fan_out", "fan_avg".
distribution: Random distribution to use. One of "normal", "uniform".
seed: A Python integer. Used to seed the random generator.
# Raises
ValueError: In case of an invalid value for the "scale", mode" or
"distribution" arguments.
"""
def __init__(self, scale=1.0,
mode='fan_in',
distribution='normal',
seed=None):
if scale <= 0.:
raise ValueError('`scale` must be a positive float. Got:', scale)
mode = mode.lower()
if mode not in {'fan_in', 'fan_out', 'fan_avg'}:
raise ValueError('Invalid `mode` argument: '
'expected on of {"fan_in", "fan_out", "fan_avg"} '
'but got', mode)
distribution = distribution.lower()
if distribution not in {'normal', 'uniform'}:
raise ValueError('Invalid `distribution` argument: '
'expected one of {"normal", "uniform"} '
'but got', distribution)
self.scale = scale
self.mode = mode
self.distribution = distribution
self.seed = seed
def __call__(self, shape, dtype=None):
fan_in, fan_out = _compute_fans(shape)
scale = self.scale
if self.mode == 'fan_in':
scale /= max(1., fan_in)
elif self.mode == 'fan_out':
scale /= max(1., fan_out)
else:
scale /= max(1., float(fan_in + fan_out) / 2)
if self.distribution == 'normal':
stddev = np.sqrt(scale)
return K.truncated_normal(shape, 0., stddev,
dtype=dtype, seed=self.seed)
else:
limit = np.sqrt(3. * scale)
return K.random_uniform(shape, -limit, limit,
dtype=dtype, seed=self.seed)
def get_config(self):
return {
'scale': self.scale,
'mode': self.mode,
'distribution': self.distribution,
'seed': self.seed
}
class Orthogonal(Initializer):
"""Initializer that generates a random orthogonal matrix.
# Arguments
gain: Multiplicative factor to apply to the orthogonal matrix.
seed: A Python integer. Used to seed the random generator.
# References
Saxe et al., http://arxiv.org/abs/1312.6120
"""
def __init__(self, gain=1., seed=None):
self.gain = gain
self.seed = seed
def __call__(self, shape, dtype=None):
num_rows = 1
for dim in shape[:-1]:
num_rows *= dim
num_cols = shape[-1]
flat_shape = (num_rows, num_cols)
if self.seed is not None:
np.random.seed(self.seed)
a = np.random.normal(0.0, 1.0, flat_shape)
u, _, v = np.linalg.svd(a, full_matrices=False)
# Pick the one with the correct shape.
q = u if u.shape == flat_shape else v
q = q.reshape(shape)
return self.gain * q[:shape[0], :shape[1]]
def get_config(self):
return {
'gain': self.gain,
'seed': self.seed
}
class Identity(Initializer):
"""Initializer that generates the identity matrix.
Only use for square 2D matrices.
# Arguments
gain: Multiplicative factor to apply to the identity matrix.
"""
def __init__(self, gain=1.):
self.gain = gain
def __call__(self, shape, dtype=None):
if len(shape) != 2 or shape[0] != shape[1]:
raise ValueError('Identity matrix initializer can only be used '
'for 2D square matrices.')
else:
return self.gain * np.identity(shape[0])
def get_config(self):
return {
'gain': self.gain
}
def lecun_uniform(seed=None):
"""LeCun uniform initializer.
It draws samples from a uniform distribution within [-limit, limit]
where `limit` is `sqrt(3 / fan_in)`
where `fan_in` is the number of input units in the weight tensor.
# Arguments
seed: A Python integer. Used to seed the random generator.
# Returns
An initializer.
# References
LeCun 98, Efficient Backprop,
http://yann.lecun.com/exdb/publis/pdf/lecun-98b.pdf
"""
return VarianceScaling(scale=1.,
mode='fan_in',
distribution='uniform',
seed=seed)
def glorot_normal(seed=None):
"""Glorot normal initializer, also called Xavier normal initializer.
It draws samples from a truncated normal distribution centered on 0
with `stddev = sqrt(2 / (fan_in + fan_out))`
where `fan_in` is the number of input units in the weight tensor
and `fan_out` is the number of output units in the weight tensor.
# Arguments
seed: A Python integer. Used to seed the random generator.
# Returns
An initializer.
# References
Glorot & Bengio, AISTATS 2010
http://jmlr.org/proceedings/papers/v9/glorot10a/glorot10a.pdf
"""
return VarianceScaling(scale=1.,
mode='fan_avg',
distribution='normal',
seed=seed)
def glorot_uniform(seed=None):
"""Glorot uniform initializer, also called Xavier uniform initializer.
It draws samples from a uniform distribution within [-limit, limit]
where `limit` is `sqrt(6 / (fan_in + fan_out))`
where `fan_in` is the number of input units in the weight tensor
and `fan_out` is the number of output units in the weight tensor.
# Arguments
seed: A Python integer. Used to seed the random generator.
# Returns
An initializer.
# References
Glorot & Bengio, AISTATS 2010
http://jmlr.org/proceedings/papers/v9/glorot10a/glorot10a.pdf
"""
return VarianceScaling(scale=1.,
mode='fan_avg',
distribution='uniform',
seed=seed)
def he_normal(seed=None):
"""He normal initializer.
It draws samples from a truncated normal distribution centered on 0
with `stddev = sqrt(2 / fan_in)`
where `fan_in` is the number of input units in the weight tensor.
# Arguments
seed: A Python integer. Used to seed the random generator.
# Returns
An initializer.
# References
He et al., http://arxiv.org/abs/1502.01852
"""
return VarianceScaling(scale=2.,
mode='fan_in',
distribution='normal',
seed=seed)
def he_uniform(seed=None):
"""He uniform variance scaling initializer.
It draws samples from a uniform distribution within [-limit, limit]
where `limit` is `sqrt(6 / fan_in)`
where `fan_in` is the number of input units in the weight tensor.
# Arguments
seed: A Python integer. Used to seed the random generator.
# Returns
An initializer.
# References
He et al., http://arxiv.org/abs/1502.01852
"""
return VarianceScaling(scale=2.,
mode='fan_in',
distribution='uniform',
seed=seed)
# Compatibility aliases
zero = zeros = Zeros
one = ones = Ones
constant = Constant
uniform = random_uniform = RandomUniform
normal = random_normal = RandomNormal
truncated_normal = TruncatedNormal
identity = Identity
orthogonal = Orthogonal
# Utility functions
def _compute_fans(shape, data_format='channels_last'):
"""Computes the number of input and output units for a weight shape.
# Arguments
shape: Integer shape tuple.
data_format: Image data format to use for convolution kernels.
Note that all kernels in Keras are standardized on the
`channels_last` ordering (even when inputs are set
to `channels_first`).
# Returns
A tuple of scalars, `(fan_in, fan_out)`.
# Raises
ValueError: in case of invalid `data_format` argument.
"""
if len(shape) == 2:
fan_in = shape[0]
fan_out = shape[1]
elif len(shape) in {3, 4, 5}:
# Assuming convolution kernels (1D, 2D or 3D).
# TH kernel shape: (depth, input_depth, ...)
# TF kernel shape: (..., input_depth, depth)
if data_format == 'channels_first':
receptive_field_size = np.prod(shape[2:])
fan_in = shape[1] * receptive_field_size
fan_out = shape[0] * receptive_field_size
elif data_format == 'channels_last':
receptive_field_size = np.prod(shape[:2])
fan_in = shape[-2] * receptive_field_size
fan_out = shape[-1] * receptive_field_size
else:
raise ValueError('Invalid data_format: ' + data_format)
else:
# No specific assumptions.
fan_in = np.sqrt(np.prod(shape))
fan_out = np.sqrt(np.prod(shape))
return fan_in, fan_out
def serialize(initializer):
return serialize_keras_object(initializer)
def deserialize(config, custom_objects=None):
return deserialize_keras_object(config,
module_objects=globals(),
custom_objects=custom_objects,
printable_module_name='initializer')
def get(identifier):
if isinstance(identifier, dict):
return deserialize(identifier)
elif isinstance(identifier, six.string_types):
config = {'class_name': str(identifier), 'config': {}}
return deserialize(config)
elif callable(identifier):
return identifier
else:
raise ValueError('Could not interpret initializer identifier:',
identifier)
| 31.707889 | 79 | 0.601574 |
4a1dafaf1931f61c7a6dddb9ec38637ccb75d528 | 2,230 | py | Python | Authda/models.py | kryptn/Authda | 4716a34c0758c3e38efa0254f29d8ca68f01d6a4 | [
"MIT"
] | null | null | null | Authda/models.py | kryptn/Authda | 4716a34c0758c3e38efa0254f29d8ca68f01d6a4 | [
"MIT"
] | null | null | null | Authda/models.py | kryptn/Authda | 4716a34c0758c3e38efa0254f29d8ca68f01d6a4 | [
"MIT"
] | null | null | null | import bcrypt
from flask_sqlalchemy import SQLAlchemy
db = SQLAlchemy()
class Status:
invited = 'invited'
rejected = 'rejected'
submitted = 'submitted'
resubmitted = 'resubmitted'
@property
def enum(self):
return (self.invited,
self.rejected,
self.submitted,
self.resubmitted)
class Invite(db.Model):
id = db.Column(db.Integer, primary_key=True)
email = db.Column(db.String)
referrer = db.Column(db.String)
status = db.Column(db.String(15), default=Status.submitted)
notes = db.Column(db.Text)
created = db.Column(db.DateTime, server_default=db.func.now())
last_modified = db.Column(db.DateTime, onupdate=db.func.now())
def __init__(self, email, referrer):
self.email = email
self.referrer = referrer
def invite(self):
self.status = Status.invited
db.session.commit()
def reject(self):
self.status = Status.rejected
db.session.commit()
@staticmethod
def get_pending():
query = Invite.status.in_((Status.submitted, Status.resubmitted))
return Invite.query.filter(query).all()
@staticmethod
def get_or_create(email, referrer):
result = Invite.query.filter_by(email=email).first()
if not result:
result = Invite(email, referrer)
db.session.add(result)
else:
result.status = Status.resubmitted
db.session.commit()
return result
def to_json(self):
return {'id': self.id,
'email': self.email,
'referrer': self.referrer,
'status': self.status,
'notes': self.notes,
'created': self.created,
'last_modified': self.last_modified,}
class User(db.Model):
__tablename__ = 'user'
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(30))
password = db.Column(db.String(72))
email = db.Column(db.String(64))
def __init__(self, username, password, email):
self.username = username
self.password = bcrypt.hashpw(password, bcrypt.gensalt())
self.email = email
| 26.235294 | 73 | 0.601794 |
4a1dafbba0a29ac2b5b5823dc945dff04bc7146c | 912 | py | Python | personal_utilities/chebyshev.py | dbstein/personal_utilities | 3a4c7d2416b13a87f88fc0e400b299d648e1e541 | [
"Apache-2.0"
] | null | null | null | personal_utilities/chebyshev.py | dbstein/personal_utilities | 3a4c7d2416b13a87f88fc0e400b299d648e1e541 | [
"Apache-2.0"
] | null | null | null | personal_utilities/chebyshev.py | dbstein/personal_utilities | 3a4c7d2416b13a87f88fc0e400b299d648e1e541 | [
"Apache-2.0"
] | null | null | null | import numpy as np
from .transformations import affine_transformation
def get_chebyshev_nodes(lb, ub, order):
"""
Provides chebyshev quadratures nodes
scaled to live on the interval [lb, ub], of specified order
The nodes are reversed from traditional chebyshev nodes
(so that the lowest valued node comes first)
Returns:
unscaled nodes
scaled nodes
scaling ratio
"""
xc, _ = np.polynomial.chebyshev.chebgauss(order)
x, rat = affine_transformation(xc[::-1], -1, 1, lb, ub, return_ratio=True)
return xc[::-1], x, rat
class ChebyshevInterpolater(object):
def __init__(self, x, data):
self.x = x
self.n = self.x.shape[0]
self.data = data
self.coefs = np.polynomial.chebyshev.chebfit(self.x, self.data, self.n-1)
def __call__(self, x_out):
return np.polynomial.chebyshev.chebval(x_out, self.coefs)
| 32.571429 | 81 | 0.66557 |
4a1db0117b5f1f8dca3c5ba77a8b9c55a9201f34 | 91 | py | Python | finances/apps.py | ericmuh/recruitment-system | d9964e7c48ac8af74995e28f489135c1d8f940be | [
"MIT"
] | 2 | 2021-02-07T12:08:47.000Z | 2021-02-22T07:12:53.000Z | finances/apps.py | Paphra/recruitment-cms | 2894d5285a6da4ff47c114377968c8fda95c24b6 | [
"MIT"
] | null | null | null | finances/apps.py | Paphra/recruitment-cms | 2894d5285a6da4ff47c114377968c8fda95c24b6 | [
"MIT"
] | null | null | null | from django.apps import AppConfig
class FinancesConfig(AppConfig):
name = 'finances'
| 15.166667 | 33 | 0.758242 |
4a1db1607f8dad83b44cb0dbba143a9d435220a0 | 3,102 | py | Python | Kaggle_FinalProject.py | iSriBalaji/MachineLearning | 6c1c5ac954485a0a385a9409cdbf00f7692cd4a1 | [
"MIT"
] | null | null | null | Kaggle_FinalProject.py | iSriBalaji/MachineLearning | 6c1c5ac954485a0a385a9409cdbf00f7692cd4a1 | [
"MIT"
] | null | null | null | Kaggle_FinalProject.py | iSriBalaji/MachineLearning | 6c1c5ac954485a0a385a9409cdbf00f7692cd4a1 | [
"MIT"
] | null | null | null | # Code you have previously used to load data
import pandas as pd
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_absolute_error
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeRegressor
# Set up code checking
import os
# if not os.path.exists("../input/train.csv"):
# os.symlink("../input/home-data-for-ml-course/train.csv", "../input/train.csv")
# os.symlink("../input/home-data-for-ml-course/test.csv", "../input/test.csv")
# from learntools.core import binder
# binder.bind(globals())
# from learntools.machine_learning.ex7 import *
# Path of the file to read. We changed the directory structure to simplify submitting to a competition
iowa_file_path = r'melb_data'
home_data = pd.read_csv(r'C:\Users\Sri Balaji\Desktop\Sri Tut\Machine Learning\melb_data.csv')
# Create target object and call it y
y = home_data.Price
# Create X
features = ['LotArea', 'YearBuilt', '1stFlrSF', '2ndFlrSF', 'FullBath', 'BedroomAbvGr', 'TotRmsAbvGrd']
X = home_data[features]
# Split into validation and training data
train_X, val_X, train_y, val_y = train_test_split(X, y, random_state=1)
# Specify Model
iowa_model = DecisionTreeRegressor(random_state=1)
# Fit Model
iowa_model.fit(train_X, train_y)
# Make validation predictions and calculate mean absolute error
val_predictions = iowa_model.predict(val_X)
val_mae = mean_absolute_error(val_predictions, val_y)
print("Validation MAE when not specifying max_leaf_nodes: {:,.0f}".format(val_mae))
# Using best value for max_leaf_nodes
iowa_model = DecisionTreeRegressor(max_leaf_nodes=100, random_state=1)
iowa_model.fit(train_X, train_y)
val_predictions = iowa_model.predict(val_X)
val_mae = mean_absolute_error(val_predictions, val_y)
print("Validation MAE for best value of max_leaf_nodes: {:,.0f}".format(val_mae))
# Define the model. Set random_state to 1
rf_model = RandomForestRegressor(random_state=1)
rf_model.fit(train_X, train_y)
rf_val_predictions = rf_model.predict(val_X)
rf_val_mae = mean_absolute_error(rf_val_predictions, val_y)
print("Validation MAE for Random Forest Model: {:,.0f}".format(rf_val_mae))
# To improve accuracy, create a new Random Forest model which you will train on all training data
rf_model_on_full_data = RandomForestRegressor(random_state=0)
# fit rf_model_on_full_data on all data from the training data
rf_model_on_full_data.fit(X,y)
# path to file you will use for predictions
test_data_path = '../input/test.csv'
# read test data file using pandas
test_data = pd.read_csv(test_data_path)
# create test_X which comes from test_data but includes only the columns you used for prediction.
# The list of columns is stored in a variable called features
test_X = test_data[features]
# make predictions which we will submit.
test_preds = rf_model_on_full_data.predict(test_X)
# The lines below shows how to save predictions in format used for competition scoring
# Just uncomment them.
output = pd.DataFrame({'Id': test_data.Id,
'SalePrice': test_preds})
output.to_csv('submission.csv', index=False)
| 38.775 | 103 | 0.779819 |
4a1db16677cd6bb753faa4af41f3f7666d60e789 | 14,728 | py | Python | sympy/functions/elementary/miscellaneous.py | lidavidm/sympy | 971aa94ee6d0774eacfb4aed6965195c4a59e104 | [
"BSD-3-Clause"
] | null | null | null | sympy/functions/elementary/miscellaneous.py | lidavidm/sympy | 971aa94ee6d0774eacfb4aed6965195c4a59e104 | [
"BSD-3-Clause"
] | null | null | null | sympy/functions/elementary/miscellaneous.py | lidavidm/sympy | 971aa94ee6d0774eacfb4aed6965195c4a59e104 | [
"BSD-3-Clause"
] | null | null | null | from __future__ import print_function, division
from sympy.core import S, C, sympify
from sympy.core.add import Add
from sympy.core.basic import Basic
from sympy.core.containers import Tuple
from sympy.core.numbers import Rational
from sympy.core.operations import LatticeOp, ShortCircuit
from sympy.core.function import Application, Lambda, ArgumentIndexError
from sympy.core.expr import Expr
from sympy.core.singleton import Singleton
from sympy.core.rules import Transform
from sympy.core.compatibility import as_int, with_metaclass, xrange
from sympy.core.logic import fuzzy_and
class IdentityFunction(with_metaclass(Singleton, Lambda)):
"""
The identity function
Examples
========
>>> from sympy import Id, Symbol
>>> x = Symbol('x')
>>> Id(x)
x
"""
__slots__ = []
nargs = 1
def __new__(cls):
x = C.Dummy('x')
#construct "by hand" to avoid infinite loop
return Expr.__new__(cls, Tuple(x), x)
Id = S.IdentityFunction
###############################################################################
############################# ROOT and SQUARE ROOT FUNCTION ###################
###############################################################################
def sqrt(arg):
"""The square root function
sqrt(x) -> Returns the principal square root of x.
Examples
========
>>> from sympy import sqrt, Symbol
>>> x = Symbol('x')
>>> sqrt(x)
sqrt(x)
>>> sqrt(x)**2
x
Note that sqrt(x**2) does not simplify to x.
>>> sqrt(x**2)
sqrt(x**2)
This is because the two are not equal to each other in general.
For example, consider x == -1:
>>> sqrt(x**2).subs(x, -1)
1
>>> x.subs(x, -1)
-1
This is because sqrt computes the principal square root, so the square may
put the argument in a different branch. This identity does hold if x is
positive:
>>> y = Symbol('y', positive=True)
>>> sqrt(y**2)
y
You can force this simplification by using the powdenest() function with
the force option set to True:
>>> from sympy import powdenest
>>> sqrt(x**2)
sqrt(x**2)
>>> powdenest(sqrt(x**2), force=True)
x
To get both branches of the square root you can use the RootOf function:
>>> from sympy import RootOf
>>> [ RootOf(x**2-3,i) for i in (0,1) ]
[-sqrt(3), sqrt(3)]
See Also
========
sympy.polys.rootoftools.RootOf, root
References
==========
* http://en.wikipedia.org/wiki/Square_root
* http://en.wikipedia.org/wiki/Principal_value
"""
# arg = sympify(arg) is handled by Pow
return C.Pow(arg, S.Half)
def root(arg, n):
"""The n-th root function (a shortcut for ``arg**(1/n)``)
root(x, n) -> Returns the principal n-th root of x.
Examples
========
>>> from sympy import root, Rational
>>> from sympy.abc import x, n
>>> root(x, 2)
sqrt(x)
>>> root(x, 3)
x**(1/3)
>>> root(x, n)
x**(1/n)
>>> root(x, -Rational(2, 3))
x**(-3/2)
To get all n n-th roots you can use the RootOf function.
The following examples show the roots of unity for n
equal 2, 3 and 4:
>>> from sympy import RootOf, I
>>> [ RootOf(x**2-1,i) for i in (0,1) ]
[-1, 1]
>>> [ RootOf(x**3-1,i) for i in (0,1,2) ]
[1, -1/2 - sqrt(3)*I/2, -1/2 + sqrt(3)*I/2]
>>> [ RootOf(x**4-1,i) for i in (0,1,2,3) ]
[-1, 1, -I, I]
SymPy, like other symbolic algebra systems, returns the
complex root of negative numbers. This is the principal
root and differs from the text-book result that one might
be expecting. For example, the cube root of -8 does not
come back as -2:
>>> root(-8, 3)
2*(-1)**(1/3)
The real_root function can be used to either make such a result
real or simply return the real root in the first place:
>>> from sympy import real_root
>>> real_root(_)
-2
>>> real_root(-32, 5)
-2
See Also
========
sympy.polys.rootoftools.RootOf
sympy.core.power.integer_nthroot
sqrt, real_root
References
==========
* http://en.wikipedia.org/wiki/Square_root
* http://en.wikipedia.org/wiki/real_root
* http://en.wikipedia.org/wiki/Root_of_unity
* http://en.wikipedia.org/wiki/Principal_value
* http://mathworld.wolfram.com/CubeRoot.html
"""
n = sympify(n)
return C.Pow(arg, 1/n)
def real_root(arg, n=None):
"""Return the real nth-root of arg if possible. If n is omitted then
all instances of -1**(1/odd) will be changed to -1.
Examples
========
>>> from sympy import root, real_root, Rational
>>> from sympy.abc import x, n
>>> real_root(-8, 3)
-2
>>> root(-8, 3)
2*(-1)**(1/3)
>>> real_root(_)
-2
See Also
========
sympy.polys.rootoftools.RootOf
sympy.core.power.integer_nthroot
root, sqrt
"""
if n is not None:
n = as_int(n)
rv = C.Pow(arg, Rational(1, n))
if n % 2 == 0:
return rv
else:
rv = sympify(arg)
n1pow = Transform(lambda x: S.NegativeOne,
lambda x:
x.is_Pow and
x.base is S.NegativeOne and
x.exp.is_Rational and
x.exp.p == 1 and x.exp.q % 2)
return rv.xreplace(n1pow)
###############################################################################
############################# MINIMUM and MAXIMUM #############################
###############################################################################
class MinMaxBase(Expr, LatticeOp):
def __new__(cls, *args, **assumptions):
if not args:
raise ValueError("The Max/Min functions must have arguments.")
args = (sympify(arg) for arg in args)
# first standard filter, for cls.zero and cls.identity
# also reshape Max(a, Max(b, c)) to Max(a, b, c)
try:
_args = frozenset(cls._new_args_filter(args))
except ShortCircuit:
return cls.zero
# second filter
# variant I: remove ones which can be removed
# args = cls._collapse_arguments(set(_args), **assumptions)
# variant II: find local zeros
args = cls._find_localzeros(set(_args), **assumptions)
_args = frozenset(args)
if not _args:
return cls.identity
elif len(_args) == 1:
return set(_args).pop()
else:
# base creation
obj = Expr.__new__(cls, _args, **assumptions)
obj._argset = _args
return obj
@classmethod
def _new_args_filter(cls, arg_sequence):
"""
Generator filtering args.
first standard filter, for cls.zero and cls.identity.
Also reshape Max(a, Max(b, c)) to Max(a, b, c),
and check arguments for comparability
"""
for arg in arg_sequence:
# pre-filter, checking comparability of arguments
if (arg.is_real is False) or (arg is S.ComplexInfinity):
raise ValueError("The argument '%s' is not comparable." % arg)
if arg == cls.zero:
raise ShortCircuit(arg)
elif arg == cls.identity:
continue
elif arg.func == cls:
for x in arg.iter_basic_args():
yield x
else:
yield arg
@classmethod
def _find_localzeros(cls, values, **options):
"""
Sequentially allocate values to localzeros.
When a value is identified as being more extreme than another member it
replaces that member; if this is never true, then the value is simply
appended to the localzeros.
"""
localzeros = set()
for v in values:
is_newzero = True
localzeros_ = list(localzeros)
for z in localzeros_:
if id(v) == id(z):
is_newzero = False
elif cls._is_connected(v, z):
is_newzero = False
if cls._is_asneeded(v, z):
localzeros.remove(z)
localzeros.update([v])
if is_newzero:
localzeros.update([v])
return localzeros
@classmethod
def _is_connected(cls, x, y):
"""
Check if x and y are connected somehow.
"""
if (x == y) or isinstance(x > y, bool) or isinstance(x < y, bool):
return True
if x.is_Number and y.is_Number:
return True
return False
@classmethod
def _is_asneeded(cls, x, y):
"""
Check if x and y satisfy relation condition.
The relation condition for Max function is x > y,
for Min function is x < y. They are defined in children Max and Min
classes through the method _rel(cls, x, y)
"""
if (x == y):
return False
if x.is_Number and y.is_Number:
if cls._rel(x, y):
return True
xy = cls._rel(x, y)
if isinstance(xy, bool):
if xy:
return True
return False
yx = cls._rel_inversed(x, y)
if isinstance(yx, bool):
if yx:
return False # never occurs?
return True
return False
def _eval_derivative(self, s):
# f(x).diff(s) -> x.diff(s) * f.fdiff(1)(s)
i = 0
l = []
for a in self.args:
i += 1
da = a.diff(s)
if da is S.Zero:
continue
try:
df = self.fdiff(i)
except ArgumentIndexError:
df = Function.fdiff(self, i)
l.append(df * da)
return Add(*l)
@property
def is_real(self):
return fuzzy_and(arg.is_real for arg in self.args)
class Max(MinMaxBase, Application):
"""
Return, if possible, the maximum value of the list.
When number of arguments is equal one, then
return this argument.
When number of arguments is equal two, then
return, if possible, the value from (a, b) that is >= the other.
In common case, when the length of list greater than 2, the task
is more complicated. Return only the arguments, which are greater
than others, if it is possible to determine directional relation.
If is not possible to determine such a relation, return a partially
evaluated result.
Assumptions are used to make the decision too.
Also, only comparable arguments are permitted.
Examples
========
>>> from sympy import Max, Symbol, oo
>>> from sympy.abc import x, y
>>> p = Symbol('p', positive=True)
>>> n = Symbol('n', negative=True)
>>> Max(x, -2) #doctest: +SKIP
Max(x, -2)
>>> Max(x, -2).subs(x, 3)
3
>>> Max(p, -2)
p
>>> Max(x, y) #doctest: +SKIP
Max(x, y)
>>> Max(x, y) == Max(y, x)
True
>>> Max(x, Max(y, z)) #doctest: +SKIP
Max(x, y, z)
>>> Max(n, 8, p, 7, -oo) #doctest: +SKIP
Max(8, p)
>>> Max (1, x, oo)
oo
Algorithm
The task can be considered as searching of supremums in the
directed complete partial orders [1]_.
The source values are sequentially allocated by the isolated subsets
in which supremums are searched and result as Max arguments.
If the resulted supremum is single, then it is returned.
The isolated subsets are the sets of values which are only the comparable
with each other in the current set. E.g. natural numbers are comparable with
each other, but not comparable with the `x` symbol. Another example: the
symbol `x` with negative assumption is comparable with a natural number.
Also there are "least" elements, which are comparable with all others,
and have a zero property (maximum or minimum for all elements). E.g. `oo`.
In case of it the allocation operation is terminated and only this value is
returned.
Assumption:
- if A > B > C then A > C
- if A==B then B can be removed
References
==========
.. [1] http://en.wikipedia.org/wiki/Directed_complete_partial_order
.. [2] http://en.wikipedia.org/wiki/Lattice_%28order%29
See Also
========
Min : find minimum values
"""
zero = S.Infinity
identity = S.NegativeInfinity
@classmethod
def _rel(cls, x, y):
"""
Check if x > y.
"""
return (x > y)
@classmethod
def _rel_inversed(cls, x, y):
"""
Check if x < y.
"""
return (x < y)
def fdiff( self, argindex ):
from sympy.functions.special.delta_functions import Heaviside
n = len(self.args)
if 0 < argindex and argindex <= n:
argindex -= 1
if n == 2:
return Heaviside( self.args[argindex] - self.args[1-argindex] )
newargs = tuple([self.args[i] for i in xrange(n) if i != argindex])
return Heaviside( self.args[argindex] - Max(*newargs) )
else:
raise ArgumentIndexError(self, argindex)
class Min(MinMaxBase, Application):
"""
Return, if possible, the minimum value of the list.
Examples
========
>>> from sympy import Min, Symbol, oo
>>> from sympy.abc import x, y
>>> p = Symbol('p', positive=True)
>>> n = Symbol('n', negative=True)
>>> Min(x, -2) #doctest: +SKIP
Min(x, -2)
>>> Min(x, -2).subs(x, 3)
-2
>>> Min(p, -3)
-3
>>> Min(x, y) #doctest: +SKIP
Min(x, y)
>>> Min(n, 8, p, -7, p, oo) #doctest: +SKIP
Min(n, -7)
See Also
========
Max : find maximum values
"""
zero = S.NegativeInfinity
identity = S.Infinity
@classmethod
def _rel(cls, x, y):
"""
Check if x < y.
"""
return (x < y)
@classmethod
def _rel_inversed(cls, x, y):
"""
Check if x > y.
"""
return (x > y)
def fdiff( self, argindex ):
from sympy.functions.special.delta_functions import Heaviside
n = len(self.args)
if 0 < argindex and argindex <= n:
argindex -= 1
if n == 2:
return Heaviside( self.args[1-argindex] - self.args[argindex] )
newargs = tuple([ self.args[i] for i in xrange(n) if i != argindex])
return Heaviside( Min(*newargs) - self.args[argindex] )
else:
raise ArgumentIndexError(self, argindex)
| 26.536937 | 80 | 0.544541 |
4a1db1da613d143233f5561646a1828a90110f45 | 3,133 | py | Python | sample_code_mixed_words_model_meta.py | onai/code-mixing-toolbox | b5b53ca12f8f356fb8b38cceb82105c8c2f7f03c | [
"Apache-2.0"
] | null | null | null | sample_code_mixed_words_model_meta.py | onai/code-mixing-toolbox | b5b53ca12f8f356fb8b38cceb82105c8c2f7f03c | [
"Apache-2.0"
] | null | null | null | sample_code_mixed_words_model_meta.py | onai/code-mixing-toolbox | b5b53ca12f8f356fb8b38cceb82105c8c2f7f03c | [
"Apache-2.0"
] | null | null | null | from fastText import load_model
import joblib
import numpy as np
import sys
def isEnglish(s):
try:
s.encode(encoding='utf-8').decode('ascii')
except UnicodeDecodeError:
return False
else:
return True
def is_fence_word(w_embed, center1, center2):
distance1 = np.linalg.norm(w_embed - centers[cluster1])
distance2 = np.linalg.norm(w_embed - centers[cluster2])
the_dist = abs(distance1 - distance2) / (np.linalg.norm(centers[cluster1] - centers[cluster2]))
return the_dist < 0.1
if __name__ == '__main__':
language_model_f = sys.argv[1]
cluster_model_f = sys.argv[2]
sentences = sys.argv[3]
cluster1 = int(sys.argv[4])
cluster2 = int(sys.argv[5])
model = load_model(language_model_f)
kmeans = joblib.load(cluster_model_f)
centers = kmeans.cluster_centers_
sentence_dist = {}
with open(sentences) as handle:
for new_line in handle:
if len(new_line.split()) < 10:
continue
new_line = new_line.strip()
#if not isEnglish(new_line):
# continue
if len(set(new_line.split())) < 10:
continue
words = new_line.split()
v = model.get_sentence_vector(new_line)
if kmeans.predict([v])[0] != cluster1 and kmeans.predict([v])[0] != cluster2:
continue
word_embeds = [model.get_word_vector(w) for w in words]
word_embeds_normed = [w / np.linalg.norm(w) for w in word_embeds]
fence_words = []
non_fence_words = []
nf_embeds = []
for i, w in enumerate(words):
embed = word_embeds[i]
if is_fence_word(embed, kmeans.cluster_centers_[cluster1], kmeans.cluster_centers_[cluster2]):
fence_words.append(w)
else:
non_fence_words.append(w)
nf_embeds.append(word_embeds[i])
u = len(fence_words)
n_f = len(non_fence_words)
if len(nf_embeds) == 0:
print('ZERO_DENOM', new_line.strip())
continue
word_preds = kmeans.predict(nf_embeds)
lang_freqs = {}
for pred in word_preds:
if pred in lang_freqs:
lang_freqs[pred] += 1
else:
lang_freqs[pred] = 1
max_lang = 0
for k, v in lang_freqs.items():
if v > max_lang:
max_lang = v
max_wi = max_lang
n = len(words)
if n - u == 0:
print('ZERO_DENOM', new_line.strip())
continue
cmi = (n_f - max_wi) / float(n - u)
# distance1 = np.linalg.norm(v - centers[cluster1])
# distance2 = np.linalg.norm(v - centers[cluster2])
# the_dist = abs(distance1 - distance2) / (np.linalg.norm(centers[cluster1] - centers[cluster2]))
print(cmi, new_line)
| 28.743119 | 110 | 0.532397 |
4a1db20289da0e284fc2ec5b7c6baf51bc7fe618 | 184 | py | Python | basic_grammar/tuple_usecase.py | OnoYuta/python_programing | 5d191bef5666c0a826f6daa0bd45bc9dd6603d59 | [
"MIT"
] | null | null | null | basic_grammar/tuple_usecase.py | OnoYuta/python_programing | 5d191bef5666c0a826f6daa0bd45bc9dd6603d59 | [
"MIT"
] | null | null | null | basic_grammar/tuple_usecase.py | OnoYuta/python_programing | 5d191bef5666c0a826f6daa0bd45bc9dd6603d59 | [
"MIT"
] | null | null | null | # 選択肢が書き換えられないようにlistではなくtupleを使う
chose_from_two = ('A', 'B', 'C')
answer = []
answer.append('A')
answer.append('C')
print(chose_from_two)
# ('A', 'B', 'C')
print(answer)
# ['A', 'C'] | 18.4 | 33 | 0.625 |
4a1db24c1cc44f6ced025c7dd4965261762a90d1 | 13,019 | py | Python | rubymarshal/reader.py | Inejka/rvdata2_parser | 2f99c84b552b634012f13b2259945622cf5a85e3 | [
"WTFPL"
] | null | null | null | rubymarshal/reader.py | Inejka/rvdata2_parser | 2f99c84b552b634012f13b2259945622cf5a85e3 | [
"WTFPL"
] | null | null | null | rubymarshal/reader.py | Inejka/rvdata2_parser | 2f99c84b552b634012f13b2259945622cf5a85e3 | [
"WTFPL"
] | null | null | null | import io
import re
import zlib
link_error_str = "LINK_ERROR_IND_"
class MyList(list):
def __hash__(self):
temp_hash = 0
for i in self:
temp_hash += hash(i)
return temp_hash
class MyDict(dict):
def __hash__(self):
temp_hash = 0
for i in self:
temp_hash += hash(i)
return temp_hash
from rubymarshal.classes import (
UsrMarshal,
Symbol,
UserDef,
Extended,
Module,
RubyString,
RubyObject,
registry as global_registry,
)
from rubymarshal.constants import (
TYPE_NIL,
TYPE_TRUE,
TYPE_FALSE,
TYPE_FIXNUM,
TYPE_IVAR,
TYPE_STRING,
TYPE_SYMBOL,
TYPE_ARRAY,
TYPE_HASH,
TYPE_FLOAT,
TYPE_BIGNUM,
TYPE_REGEXP,
TYPE_USRMARSHAL,
TYPE_SYMLINK,
TYPE_LINK,
TYPE_DATA,
TYPE_OBJECT,
TYPE_STRUCT,
TYPE_MODULE,
TYPE_CLASS,
TYPE_USERDEF,
TYPE_EXTENDED,
)
from rubymarshal.utils import read_ushort, read_sbyte, read_ubyte
__author__ = "Matthieu Gallet"
class Reader:
def __init__(self, fd, registry=None):
self.symbols = []
self.objects = []
self.fd = fd
self.registry = registry or global_registry
self.link_errors = False
self.link_errors_massive = []
def read(self, token=None):
if token is None:
token = self.fd.read(1)
# From https://docs.ruby-lang.org/en/2.1.0/marshal_rdoc.html:
# The stream contains only one copy of each object for all objects except
# true, false, nil, Fixnums and Symbols.
object_index = None
if token in (
TYPE_IVAR,
# TYPE_EXTENDED, TYPE_UCLASS, ????
TYPE_CLASS,
TYPE_MODULE,
TYPE_FLOAT,
TYPE_BIGNUM,
TYPE_REGEXP,
TYPE_ARRAY,
TYPE_HASH,
TYPE_STRUCT,
TYPE_OBJECT,
TYPE_DATA,
TYPE_USRMARSHAL,
):
self.objects.append(None)
object_index = len(self.objects)
result = None
if token == TYPE_NIL:
pass
elif token == TYPE_TRUE:
result = True
elif token == TYPE_FALSE:
result = False
elif token == TYPE_IVAR:
sub_token = self.fd.read(1)
result = self.read(sub_token)
flags = None
if sub_token == TYPE_REGEXP:
options = ord(self.fd.read(1))
flags = 0
if options & 1:
flags |= re.IGNORECASE
if options & 4:
flags |= re.MULTILINE
attributes = self.read_attributes()
KOSTYA = False
if sub_token in (TYPE_STRING, TYPE_REGEXP):
encoding = self._get_encoding(attributes)
try:
result = result.decode(encoding)
except UnicodeDecodeError as u:
try:
# result = str(result)
# result = result.encode("latin1")
result = zlib.decompress(result)
result = result.decode("utf-8")
KOSTYA = True
except:
result = result.decode("unicode-escape")
# -------------------------------------------------------------------------------------------------------------------------------
pass
# string instance attributes are discarded
if attributes and sub_token == TYPE_STRING:
result = RubyString(result, attributes)
if sub_token == TYPE_REGEXP:
result = re.compile(str(result), flags)
elif attributes:
# if hasattr(result, "set_attributes"):
try:
result.set_attributes(attributes)
except:
result.attributes = attributes
# -------------------------------------------------------------------------------------------------------------------------------
if KOSTYA:
result.IS_COMPRESSED = True
elif token == TYPE_STRING:
size = self.read_long()
result = self.fd.read(size)
elif token == TYPE_SYMBOL:
result = self.read_symreal()
elif token == TYPE_FIXNUM:
result = self.read_long()
elif token == TYPE_ARRAY:
num_elements = self.read_long()
# noinspection PyUnusedLocal
result = MyList()
for x in range(num_elements):
result.append(self.read())
elif token == TYPE_HASH:
num_elements = self.read_long()
result = MyDict()
for x in range(num_elements):
key = self.read()
value = self.read()
result[key] = value
result = result
elif token == TYPE_FLOAT:
size = self.read_long()
floatn = self.fd.read(size)
floatn = floatn.split(b"\0")
result = float(floatn[0].decode("utf-8"))
elif token == TYPE_BIGNUM:
sign = 1 if self.fd.read(1) == b"+" else -1
num_elements = self.read_long()
result = 0
factor = 1
for x in range(num_elements):
result += self.read_short() * factor
factor *= 2 ** 16
result *= sign
elif token == TYPE_REGEXP:
size = self.read_long()
result = self.fd.read(size)
elif token == TYPE_USRMARSHAL:
class_symbol = self.read()
if not isinstance(class_symbol, Symbol):
raise ValueError("invalid class name: %r" % class_symbol)
class_name = class_symbol.name
attr_list = self.read()
python_class = self.registry.get(class_name, UsrMarshal)
if not issubclass(python_class, UsrMarshal):
raise ValueError(
"invalid class mapping for %r: %r should be a subclass of %r."
% (class_name, python_class, UsrMarshal)
)
result = python_class(class_name)
result.marshal_load(attr_list)
elif token == TYPE_SYMLINK:
result = self.read_symlink()
elif token == TYPE_LINK:
link_id = self.read_long()
if object_index and link_id >= object_index:
raise ValueError(
"invalid link destination: %d should be lower than %d."
% (link_id, object_index)
)
try:
result = self.objects[link_id]
except:
result = link_error_str + str(len(self.link_errors_massive))
self.link_errors = True
self.link_errors_massive.append(link_id)
elif token == TYPE_USERDEF:
class_symbol = self.read()
private_data = self.read(TYPE_STRING)
if not isinstance(class_symbol, Symbol):
raise ValueError("invalid class name: %r" % class_symbol)
class_name = class_symbol.name
python_class = self.registry.get(class_name, UserDef)
if not issubclass(python_class, UserDef):
raise ValueError(
"invalid class mapping for %r: %r should be a subclass of %r."
% (class_name, python_class, UserDef)
)
result = python_class(class_name)
# noinspection PyProtectedMember
result._load(private_data)
elif token == TYPE_MODULE:
data = self.read(TYPE_STRING)
module_name = data.decode()
result = Module(module_name, None)
elif token == TYPE_OBJECT:
class_symbol = self.read()
assert isinstance(class_symbol, Symbol)
class_name = class_symbol.name
python_class = self.registry.get(class_name, RubyObject)
if not issubclass(python_class, RubyObject):
raise ValueError(
"invalid class mapping for %r: %r should be a subclass of %r."
% (class_name, python_class, RubyObject)
)
attributes = self.read_attributes()
result = python_class(class_name, attributes)
elif token == TYPE_EXTENDED:
class_name = self.read(TYPE_STRING)
result = Extended(class_name, None)
elif token == TYPE_CLASS:
data = self.read(TYPE_STRING)
class_name = data.decode()
if class_name in self.registry:
result = self.registry[class_name]
else:
result = type(
class_name.rpartition(":")[2],
(RubyObject,),
{"ruby_class_name": class_name},
)
else:
raise ValueError("token %s is not recognized" % token)
if object_index is not None:
self.objects[object_index - 1] = result
return result
@staticmethod
def _get_encoding(attrs):
encoding = "latin1"
if attrs.get("E") is True:
encoding = "utf-8"
elif "encoding" in attrs:
encoding = attrs["encoding"].decode()
return encoding
def read_attributes(self):
attr_count = self.read_long()
attrs = {}
for x in range(attr_count):
attr_name = self.read()
attr_value = self.read()
attrs[attr_name.name] = attr_value
return attrs
def read_short(self):
return read_ushort(self.fd)
def read_long(self):
length = read_sbyte(self.fd)
if length == 0:
return 0
if 5 < length < 128:
return length - 5
elif -129 < length < -5:
return length + 5
result = 0
factor = 1
for s in range(abs(length)):
result += read_ubyte(self.fd) * factor
factor *= 256
if length < 0:
result = result - factor
return result
def read_symbol(self):
ivar = 0
while True:
token = self.fd.read(1)
if token == TYPE_IVAR:
ivar = 1
continue
elif token == TYPE_SYMBOL:
return self.read_symreal()
elif token == TYPE_SYMLINK:
if ivar:
raise ValueError("dump format error (symlink with encoding)")
return self.read_symlink()
raise ValueError("error while reading symbol with token %r" % token)
def read_symlink(self):
symlink_id = self.read_long()
return self.symbols[symlink_id]
def read_symreal(self):
size = self.read_long()
result = self.fd.read(size)
result = Symbol(result.decode("utf-8"))
self.symbols.append(result)
return result
def fix_link_errors(self, obj):
if isinstance(obj, list):
self.fix_link_errors_iterate_throught_list(obj)
if isinstance(obj, RubyObject):
self.fix_link_errors_iterate_throught_ruby(obj)
pass
def fix_link_errors_iterate_throught_list(self, obj):
for i in range(len(obj)):
if isinstance(obj[i], list):
self.fix_link_errors_iterate_throught_list(obj[i])
if isinstance(obj[i], RubyObject):
self.fix_link_errors_iterate_throught_ruby(obj[i])
if isinstance(obj[i], str):
if link_error_str in obj[i]:
obj[i] = self.objects[self.link_errors_massive[self.get_error_index(obj[i])]]
def fix_link_errors_iterate_throught_ruby(self, obj):
attrs = obj.attributes
for i in attrs:
if isinstance(obj.attributes[i], list):
self.fix_link_errors_iterate_throught_list(obj.attributes[i])
if isinstance(obj.attributes[i], RubyObject):
self.fix_link_errors_iterate_throught_ruby(obj.attributes[i])
if isinstance(obj.attributes[i], str):
if link_error_str in obj.attributes[i]:
obj.attributes[i] = self.objects[self.link_errors_massive[self.get_error_index(obj.attributes[i])]]
def get_error_index(self, string):
return int((str.split(string, link_error_str))[1])
def load(fd, registry=None):
assert fd.read(1) == b"\x04"
assert fd.read(1) == b"\x08"
loader = Reader(fd, registry=registry)
to_return = loader.read()
if loader.link_errors:
loader.fix_link_errors(to_return)
pass
return to_return
def loads(byte_text, registry=None):
return load(io.BytesIO(byte_text), registry=registry)
| 34.81016 | 153 | 0.529764 |
4a1db27406b1279987d4e01fd3ca6777f37ac5b3 | 588 | py | Python | leopy/algorithm/recursion.py | pobingxiaoxiao/leopy | c6fc291cfcd5a8fa58614873edac9b09f20f2e0a | [
"MIT"
] | null | null | null | leopy/algorithm/recursion.py | pobingxiaoxiao/leopy | c6fc291cfcd5a8fa58614873edac9b09f20f2e0a | [
"MIT"
] | null | null | null | leopy/algorithm/recursion.py | pobingxiaoxiao/leopy | c6fc291cfcd5a8fa58614873edac9b09f20f2e0a | [
"MIT"
] | 1 | 2019-06-10T02:09:46.000Z | 2019-06-10T02:09:46.000Z | def recursion_sum(data_list):
if data_list == []:
return 0
else:
sum_1 = data_list.pop(0)
return sum_1+recursion_sum(data_list)
def recursion_listCount(data_list):
if data_list == []:
return 0
else:
data_list.pop(0)
return 1+recursion_listCount(data_list)
def recursion_max(data_list):
maximum = data_list[0]
if len(data_list)==1:
return maximum
else:
data_list.pop(0)
find_max = recursion_max(data_list)
if maximum < find_max:
maximum = find_max
return maximum
| 23.52 | 47 | 0.613946 |
4a1db306666b3dfb3d27309790117993457e4cf7 | 2,562 | py | Python | topo_processor/metadata/csv_loader/tests/csv_loader_test.py | linz/processor-aerial-imagery | cf6425560cea381278fe7857865e3d9158b28d7e | [
"MIT"
] | null | null | null | topo_processor/metadata/csv_loader/tests/csv_loader_test.py | linz/processor-aerial-imagery | cf6425560cea381278fe7857865e3d9158b28d7e | [
"MIT"
] | 15 | 2020-07-21T04:56:31.000Z | 2020-09-21T06:28:57.000Z | topo_processor/metadata/csv_loader/tests/csv_loader_test.py | linz/processor-aerial-imagery | cf6425560cea381278fe7857865e3d9158b28d7e | [
"MIT"
] | null | null | null | import csv
import os
import tempfile
import pytest
from topo_processor.metadata.csv_loader.csv_loader import read_csv
def test_read_csv() -> None:
metadata_path = os.path.join(os.getcwd(), "test_data", "historical_aerial_photos_metadata.csv")
metadata = read_csv(metadata_path, "raw_filename", "sufi")
assert len(metadata) == 5
assert list(metadata.keys()) == ["WRONG_PHOTO_TYPE", "MULTIPLE_ASSET", "CONTROL", "WRONG_SURVEY", "CONTROL_2"]
def test_error_on_wrong_file_name() -> None:
metadata_path = "./data/historical_aerial_photos_metadata.csv"
with pytest.raises(Exception, match=r"^Cannot find "):
read_csv(metadata_path, "raw_filename", "sufi")
def test_error_on_duplicate_file() -> None:
temp_file = tempfile.NamedTemporaryFile()
header = [
"WKT",
"sufi",
"survey",
"run",
"photo_no",
"alternate_survey_name",
"camera",
"camera_sequence_no",
"nominal_focal_length",
"altitude",
"scale",
"photocentre_lat",
"photocentre_lon",
"date",
"film",
"film_sequence_no",
"photo_type",
"format",
"source",
"physical_film_condition",
"image_anomalies",
"scanned",
"raw_filename",
"released_filename",
"when_scanned",
"photo_version",
]
row = [
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"WRONG_PHOTO_TYPE",
"",
"",
"",
]
with open(temp_file.name, "a", encoding="utf-8") as csv_file:
writer = csv.writer(csv_file)
writer.writerow(header)
writer.writerow(row)
writer.writerow(row)
with pytest.raises(Exception, match=r'Duplicate "WRONG_PHOTO_TYPE" found in "' + temp_file.name + '"'):
read_csv(temp_file.name, "raw_filename", "sufi")
def test_read_csv_column_filter() -> None:
metadata_path = os.path.join(os.getcwd(), "test_data", "historical_survey_footprint_metadata.csv")
metadata = read_csv(metadata_path, "SURVEY", columns=["NAME"])
assert len(metadata) == 4
assert list(metadata.keys()) == ["SURVEY_1", "SURVEY_3", "SURVEY_2", "SURVEY_NO_NAME"]
assert list(metadata.values()) == [{"NAME": "TE KUITI 1"}, {"NAME": "AUCKLAND 1"}, {"NAME": "WELLINGTON 2"}, {"NAME": ""}]
| 25.62 | 126 | 0.554254 |
4a1db3c071ddb26e3c870ed3b1cb393528c49529 | 2,623 | py | Python | improviser-client/improviser_client/api/exercises_to_tags/post_exercises_to_tags_resource_list.py | acidjunk/improviser-api-client | f4c9ad69f2cbb0ac677cf49115f0f541035160da | [
"MIT"
] | null | null | null | improviser-client/improviser_client/api/exercises_to_tags/post_exercises_to_tags_resource_list.py | acidjunk/improviser-api-client | f4c9ad69f2cbb0ac677cf49115f0f541035160da | [
"MIT"
] | null | null | null | improviser-client/improviser_client/api/exercises_to_tags/post_exercises_to_tags_resource_list.py | acidjunk/improviser-api-client | f4c9ad69f2cbb0ac677cf49115f0f541035160da | [
"MIT"
] | null | null | null | from typing import Any, Dict, Optional, Union
import httpx
from ...client import Client
from ...models.exercise_to_tag import ExerciseToTag
from ...types import UNSET, Response, Unset
def _get_kwargs(
*,
client: Client,
json_body: ExerciseToTag,
x_fields: Union[Unset, str] = UNSET,
) -> Dict[str, Any]:
url = "{}/v1/exercises-to-tags/".format(client.base_url)
headers: Dict[str, Any] = client.get_headers()
cookies: Dict[str, Any] = client.get_cookies()
if x_fields is not UNSET:
headers["x-fields"] = x_fields
json_json_body = json_body.to_dict()
return {
"url": url,
"headers": headers,
"cookies": cookies,
"timeout": client.get_timeout(),
"json": json_json_body,
}
def _parse_response(*, response: httpx.Response) -> Optional[ExerciseToTag]:
if response.status_code == 200:
response_200 = ExerciseToTag.from_dict(response.json())
return response_200
return None
def _build_response(*, response: httpx.Response) -> Response[ExerciseToTag]:
return Response(
status_code=response.status_code,
content=response.content,
headers=response.headers,
parsed=_parse_response(response=response),
)
def sync_detailed(
*,
client: Client,
json_body: ExerciseToTag,
x_fields: Union[Unset, str] = UNSET,
) -> Response[ExerciseToTag]:
kwargs = _get_kwargs(
client=client,
json_body=json_body,
x_fields=x_fields,
)
response = httpx.post(
**kwargs,
)
return _build_response(response=response)
def sync(
*,
client: Client,
json_body: ExerciseToTag,
x_fields: Union[Unset, str] = UNSET,
) -> Optional[ExerciseToTag]:
""" """
return sync_detailed(
client=client,
json_body=json_body,
x_fields=x_fields,
).parsed
async def asyncio_detailed(
*,
client: Client,
json_body: ExerciseToTag,
x_fields: Union[Unset, str] = UNSET,
) -> Response[ExerciseToTag]:
kwargs = _get_kwargs(
client=client,
json_body=json_body,
x_fields=x_fields,
)
async with httpx.AsyncClient() as _client:
response = await _client.post(**kwargs)
return _build_response(response=response)
async def asyncio(
*,
client: Client,
json_body: ExerciseToTag,
x_fields: Union[Unset, str] = UNSET,
) -> Optional[ExerciseToTag]:
""" """
return (
await asyncio_detailed(
client=client,
json_body=json_body,
x_fields=x_fields,
)
).parsed
| 22.042017 | 76 | 0.632101 |
4a1db642ce67f01fbf13e5e1e9b4ac1d315b4015 | 3,497 | py | Python | DepreciatedPlayers/Player_Nega.py | karlflores/WatchYourBackProject | 00a7c32e46ea0b75580d17ea6a22372e4a005627 | [
"Unlicense"
] | null | null | null | DepreciatedPlayers/Player_Nega.py | karlflores/WatchYourBackProject | 00a7c32e46ea0b75580d17ea6a22372e4a005627 | [
"Unlicense"
] | null | null | null | DepreciatedPlayers/Player_Nega.py | karlflores/WatchYourBackProject | 00a7c32e46ea0b75580d17ea6a22372e4a005627 | [
"Unlicense"
] | null | null | null | from Constants import constant
from DepreciatedBoard.Board import Board
from Agents.Negamax import Negamax
# from Agents.GreedyAlphaBeta import GreedyAlphaBetaMinimax
class Player:
def __init__(self, colour):
if colour == 'white':
self.colour = constant.WHITE_PIECE
elif colour == 'black':
self.colour = constant.BLACK_PIECE
self.available_moves = []
# each players internal board representation
self.board = Board()
# TODO -- need to see if this works correctly
self.minimax = Negamax(self.board, self.colour)
self.opponent = self.board.get_opp_piece_type(self.colour)
# self.search_algorithm = Minimax(self.board,self.available_moves,self.colour)
# print(self.opponent)
self.depth_eval = 0
self.minimax_val = 0
self.policy_vector = 0
def update(self, action):
# update the board based on the action of the opponent
if self.board.phase == constant.PLACEMENT_PHASE:
# update board also returns the pieces of the board that will be eliminated
self.board.update_board(action, self.opponent)
# self.board.eliminated_pieces[self.opponent]
self.minimax.update_board(self.board)
elif self.board.phase == constant.MOVING_PHASE:
if isinstance(action[0], tuple) is False:
print("ERROR: action is not a tuple")
return
move_type = self.board.convert_coord_to_move_type(action[0], action[1])
# update the player board representation with the action
self.board.update_board((action[0], move_type), self.opponent)
self.minimax.update_board(self.board)
def action(self, turns):
self.minimax.update_board(self.board)
# print(self.board.piece_pos)
# if action is called first the board representation move counter will be zero
# this indicates that this player is the first one to move
# if update is called before action the board representation counter will be 1,
# this indicates that the player is the second to move
if turns == 0 and self.board.phase == constant.MOVING_PHASE:
self.board.move_counter = 0
self.board.phase = constant.MOVING_PHASE
# create the node to search on
# update the board representation and the available moves
# print(self.minimax.available_actions)
# best_move = self.minimax.alpha_beta_minimax(3)
best_move = self.minimax.itr_negamax()
# best_move = self.minimax.alpha_beta(3)
self.depth_eval = self.minimax.eval_depth
self.minimax_val = self.minimax.minimax_val
# do an alpha beta search on this node
# once we have found the best move we must apply it to the board representation
if self.board.phase == constant.PLACEMENT_PHASE:
# print(best_move)
self.board.update_board(best_move, self.colour)
self.minimax.update_board(self.board)
return best_move
else:
if best_move is None:
return None
# (best_move is None)
# print(best_move[0],best_move[1])
new_pos = Board.convert_move_type_to_coord(best_move[0], best_move[1])
self.board.update_board(best_move, self.colour)
self.minimax.update_board(self.board)
return best_move[0], new_pos | 39.292135 | 87 | 0.648556 |
4a1db7bfbe0a19db3510ab41a966d2a31fe0e487 | 21,292 | py | Python | esrally/mechanic/team.py | akhil-rane/rally | bc5513ca3a5a9faaebabf952af196335467a289a | [
"Apache-2.0"
] | null | null | null | esrally/mechanic/team.py | akhil-rane/rally | bc5513ca3a5a9faaebabf952af196335467a289a | [
"Apache-2.0"
] | null | null | null | esrally/mechanic/team.py | akhil-rane/rally | bc5513ca3a5a9faaebabf952af196335467a289a | [
"Apache-2.0"
] | null | null | null | import os
import logging
import configparser
from enum import Enum
import tabulate
from esrally import exceptions, PROGRAM_NAME
from esrally.utils import console, repo, io, modules
TEAM_FORMAT_VERSION = 1
def _path_for(team_root_path, team_member_type):
root_path = os.path.join(team_root_path, team_member_type, "v{}".format(TEAM_FORMAT_VERSION))
if not os.path.exists(root_path):
raise exceptions.SystemSetupError("Path {} for {} does not exist.".format(root_path, team_member_type))
return root_path
def list_cars(cfg):
loader = CarLoader(team_path(cfg))
cars = []
for name in loader.car_names():
cars.append(loader.load_car(name))
# first by type, then by name (we need to run the sort in reverse for that)
# idiomatic way according to https://docs.python.org/3/howto/sorting.html#sort-stability-and-complex-sorts
cars = sorted(sorted(cars, key=lambda c: c.name), key=lambda c: c.type)
console.println("Available cars:\n")
console.println(tabulate.tabulate([[c.name, c.type, c.description] for c in cars], headers=["Name", "Type", "Description"]))
def load_car(repo, name, car_params=None):
class Component:
def __init__(self, root_path, entry_point):
self.root_path = root_path
self.entry_point = entry_point
root_path = None
# preserve order as we append to existing config files later during provisioning.
all_config_paths = []
all_config_base_vars = {}
all_car_vars = {}
all_env = {}
for n in name:
descriptor = CarLoader(repo).load_car(n, car_params)
for p in descriptor.config_paths:
if p not in all_config_paths:
all_config_paths.append(p)
for p in descriptor.root_paths:
# probe whether we have a root path
if BootstrapHookHandler(Component(root_path=p, entry_point=Car.entry_point)).can_load():
if not root_path:
root_path = p
# multiple cars are based on the same hook
elif root_path != p:
raise exceptions.SystemSetupError("Invalid car: {}. Multiple bootstrap hooks are forbidden.".format(name))
all_config_base_vars.update(descriptor.config_base_variables)
all_car_vars.update(descriptor.variables)
# env needs to be merged individually, consider ES_JAVA_OPTS="-Xms1G" and ES_JAVA_OPTS="-ea".
# We want it to be ES_JAVA_OPTS="-Xms1G -ea" in the end.
for k, v in descriptor.env.items():
# merge
if k not in all_env:
all_env[k] = v
else: # merge
# assume we need to separate with a space
all_env[k] = all_env[k] + " " + v
if len(all_config_paths) == 0:
raise exceptions.SystemSetupError("At least one config base is required for car {}".format(name))
variables = {}
# car variables *always* take precedence over config base variables
variables.update(all_config_base_vars)
variables.update(all_car_vars)
return Car(name, root_path, all_config_paths, variables, all_env)
def list_plugins(cfg):
plugins = PluginLoader(team_path(cfg)).plugins()
if plugins:
console.println("Available Elasticsearch plugins:\n")
console.println(tabulate.tabulate([[p.name, p.config] for p in plugins], headers=["Name", "Configuration"]))
else:
console.println("No Elasticsearch plugins are available.\n")
def load_plugin(repo, name, config, plugin_params=None):
return PluginLoader(repo).load_plugin(name, config, plugin_params)
def load_plugins(repo, plugin_names, plugin_params=None):
def name_and_config(p):
plugin_spec = p.split(":")
if len(plugin_spec) == 1:
return plugin_spec[0], None
elif len(plugin_spec) == 2:
return plugin_spec[0], plugin_spec[1].split("+")
else:
raise ValueError("Unrecognized plugin specification [%s]. Use either 'PLUGIN_NAME' or 'PLUGIN_NAME:PLUGIN_CONFIG'." % plugin)
plugins = []
for plugin in plugin_names:
plugin_name, plugin_config = name_and_config(plugin)
plugins.append(load_plugin(repo, plugin_name, plugin_config, plugin_params))
return plugins
def team_path(cfg):
root_path = cfg.opts("mechanic", "team.path", mandatory=False)
if root_path:
return root_path
else:
distribution_version = cfg.opts("mechanic", "distribution.version", mandatory=False)
repo_name = cfg.opts("mechanic", "repository.name")
offline = cfg.opts("system", "offline.mode")
remote_url = cfg.opts("teams", "%s.url" % repo_name, mandatory=False)
root = cfg.opts("node", "root.dir")
team_repositories = cfg.opts("mechanic", "team.repository.dir")
teams_dir = os.path.join(root, team_repositories)
current_team_repo = repo.RallyRepository(remote_url, teams_dir, repo_name, "teams", offline)
current_team_repo.update(distribution_version)
return current_team_repo.repo_dir
class CarLoader:
def __init__(self, team_root_path):
self.cars_dir = _path_for(team_root_path, "cars")
self.logger = logging.getLogger(__name__)
def car_names(self):
def __car_name(path):
p, _ = io.splitext(path)
return io.basename(p)
def __is_car(path):
_, extension = io.splitext(path)
return extension == ".ini"
return map(__car_name, filter(__is_car, os.listdir(self.cars_dir)))
def _car_file(self, name):
return os.path.join(self.cars_dir, "{}.ini".format(name))
def load_car(self, name, car_params=None):
car_config_file = self._car_file(name)
if not io.exists(car_config_file):
raise exceptions.SystemSetupError("Unknown car [{}]. List the available cars with {} list cars.".format(name, PROGRAM_NAME))
config = self._config_loader(car_config_file)
root_paths = []
config_paths = []
config_base_vars = {}
description = self._value(config, ["meta", "description"], default="")
car_type = self._value(config, ["meta", "type"], default="car")
config_bases = self._value(config, ["config", "base"], default="").split(",")
for base in config_bases:
if base:
root_path = os.path.join(self.cars_dir, base)
root_paths.append(root_path)
config_paths.append(os.path.join(root_path, "templates"))
config_file = os.path.join(root_path, "config.ini")
if io.exists(config_file):
base_config = self._config_loader(config_file)
self._copy_section(base_config, "variables", config_base_vars)
# it's possible that some cars don't have a config base, e.g. mixins which only override variables
if len(config_paths) == 0:
self.logger.info("Car [%s] does not define any config paths. Assuming that it is used as a mixin.", name)
variables = self._copy_section(config, "variables", {})
# add all car params here to override any defaults
if car_params:
variables.update(car_params)
env = self._copy_section(config, "env", {})
return CarDescriptor(name, description, car_type, root_paths, config_paths, config_base_vars, variables, env)
def _config_loader(self, file_name):
config = configparser.ConfigParser(interpolation=configparser.ExtendedInterpolation())
# Do not modify the case of option keys but read them as is
config.optionxform = lambda option: option
config.read(file_name)
return config
def _value(self, cfg, section_path, default=None):
path = [section_path] if (isinstance(section_path, str)) else section_path
current_cfg = cfg
for k in path:
if k in current_cfg:
current_cfg = current_cfg[k]
else:
return default
return current_cfg
def _copy_section(self, cfg, section, target):
if section in cfg.sections():
for k, v in cfg[section].items():
target[k] = v
return target
class CarDescriptor:
def __init__(self, name, description, type, root_paths, config_paths, config_base_variables, variables, env):
self.name = name
self.description = description
self.type = type
self.root_paths = root_paths
self.config_paths = config_paths
self.config_base_variables = config_base_variables
self.variables = variables
self.env = env
def __hash__(self):
return hash(self.name)
def __eq__(self, other):
return isinstance(other, type(self)) and self.name == other.name
class Car:
# name of the initial Python file to load for cars.
entry_point = "config"
def __init__(self, names, root_path, config_paths, variables=None, env=None):
"""
Creates new settings for a benchmark candidate.
:param names: Descriptive name(s) for this car.
:param root_path: The root path from which bootstrap hooks should be loaded if any. May be ``None``.
:param config_paths: A non-empty list of paths where the raw config can be found.
:param variables: A dict containing variable definitions that need to be replaced.
:param env: Environment variables that should be set when launching the benchmark candidate.
"""
if env is None:
env = {}
if variables is None:
variables = {}
if isinstance(names, str):
self.names = [names]
else:
self.names = names
self.root_path = root_path
self.config_paths = config_paths
self.variables = variables
self.env = env
def mandatory_var(self, name):
try:
return self.variables[name]
except KeyError:
raise exceptions.SystemSetupError("Car \"{}\" requires config key \"{}\"".format(self.name, name))
@property
def name(self):
return "+".join(self.names)
# Adapter method for BootstrapHookHandler
@property
def config(self):
return self.name
@property
def safe_name(self):
return "_".join(self.names)
def __str__(self):
return self.name
class PluginLoader:
def __init__(self, team_root_path):
self.plugins_root_path = _path_for(team_root_path, "plugins")
self.logger = logging.getLogger(__name__)
def plugins(self):
known_plugins = self._core_plugins() + self._configured_plugins()
sorted(known_plugins, key=lambda p: p.name)
return known_plugins
def _core_plugins(self):
core_plugins = []
core_plugins_path = os.path.join(self.plugins_root_path, "core-plugins.txt")
if os.path.exists(core_plugins_path):
with open(core_plugins_path, mode="rt", encoding="utf-8") as f:
for line in f:
if not line.startswith("#"):
# be forward compatible and allow additional values (comma-separated). At the moment, we only use the plugin name.
values = line.strip().split(",")
core_plugins.append(PluginDescriptor(name=values[0], core_plugin=True))
return core_plugins
def _configured_plugins(self):
configured_plugins = []
# each directory is a plugin, each .ini is a config (just go one level deep)
for entry in os.listdir(self.plugins_root_path):
plugin_path = os.path.join(self.plugins_root_path, entry)
if os.path.isdir(plugin_path):
for child_entry in os.listdir(plugin_path):
if os.path.isfile(os.path.join(plugin_path, child_entry)) and io.has_extension(child_entry, ".ini"):
f, _ = io.splitext(child_entry)
plugin_name = self._file_to_plugin_name(entry)
config = io.basename(f)
configured_plugins.append(PluginDescriptor(name=plugin_name, config=config))
return configured_plugins
def _plugin_file(self, name, config):
return os.path.join(self._plugin_root_path(name), "%s.ini" % config)
def _plugin_root_path(self, name):
return os.path.join(self.plugins_root_path, self._plugin_name_to_file(name))
# As we allow to store Python files in the plugin directory and the plugin directory also serves as the root path of the corresponding
# module, we need to adhere to the Python restrictions here. For us, this is that hyphens in module names are not allowed. Hence, we
# need to switch from underscores to hyphens and vice versa.
#
# We are implicitly assuming that plugin names stick to the convention of hyphen separation to simplify implementation and usage a bit.
def _file_to_plugin_name(self, file_name):
return file_name.replace("_", "-")
def _plugin_name_to_file(self, plugin_name):
return plugin_name.replace("-", "_")
def _core_plugin(self, name):
return next((p for p in self._core_plugins() if p.name == name and p.config is None), None)
def load_plugin(self, name, config_names, plugin_params=None):
if config_names is not None:
self.logger.info("Loading plugin [%s] with configuration(s) [%s].", name, config_names)
else:
self.logger.info("Loading plugin [%s] with default configuration.", name)
root_path = self._plugin_root_path(name)
if not config_names:
# maybe we only have a config folder but nothing else (e.g. if there is only an install hook)
if io.exists(root_path):
return PluginDescriptor(name=name, config=config_names, root_path=root_path)
else:
core_plugin = self._core_plugin(name)
if core_plugin:
return core_plugin
# If we just have a plugin name then we assume that this is a community plugin and the user has specified a download URL
else:
self.logger.info("The plugin [%s] is neither a configured nor an official plugin. Assuming that this is a community "
"plugin not requiring any configuration and you have set a proper download URL.", name)
return PluginDescriptor(name)
else:
variables = {}
config_paths = []
# used for deduplication
known_config_bases = set()
# used to determine whether this is a core plugin
core_plugin = self._core_plugin(name)
for config_name in config_names:
config_file = self._plugin_file(name, config_name)
# Do we have an explicit configuration for this plugin?
if not io.exists(config_file):
if core_plugin:
raise exceptions.SystemSetupError("Plugin [%s] does not provide configuration [%s]. List the available plugins "
"and configurations with %s list elasticsearch-plugins "
"--distribution-version=VERSION." % (name, config_name, PROGRAM_NAME))
else:
raise exceptions.SystemSetupError("Unknown plugin [%s]. List the available plugins with %s list "
"elasticsearch-plugins --distribution-version=VERSION." % (name, PROGRAM_NAME))
config = configparser.ConfigParser(interpolation=configparser.ExtendedInterpolation())
# Do not modify the case of option keys but read them as is
config.optionxform = lambda option: option
config.read(config_file)
if "config" in config and "base" in config["config"]:
config_bases = config["config"]["base"].split(",")
for base in config_bases:
if base and base not in known_config_bases:
config_paths.append(os.path.join(root_path, base, "templates"))
known_config_bases.add(base)
if "variables" in config.sections():
for k, v in config["variables"].items():
variables[k] = v
# add all plugin params here to override any defaults
if plugin_params:
variables.update(plugin_params)
# maybe one of the configs is really just for providing variables. However, we still require one config base overall.
if len(config_paths) == 0:
raise exceptions.SystemSetupError("At least one config base is required for plugin [%s]" % name)
return PluginDescriptor(name=name, core_plugin=core_plugin is not None, config=config_names, root_path=root_path,
config_paths=config_paths, variables=variables)
class PluginDescriptor:
# name of the initial Python file to load for plugins.
entry_point = "plugin"
def __init__(self, name, core_plugin=False, config=None, root_path=None, config_paths=None, variables=None):
if config_paths is None:
config_paths = []
if variables is None:
variables = {}
self.name = name
self.core_plugin = core_plugin
self.config = config
self.root_path = root_path
self.config_paths = config_paths
self.variables = variables
def __str__(self):
return "Plugin descriptor for [%s]" % self.name
def __repr__(self):
r = []
for prop, value in vars(self).items():
r.append("%s = [%s]" % (prop, repr(value)))
return ", ".join(r)
def __hash__(self):
return hash(self.name) ^ hash(self.config) ^ hash(self.core_plugin)
def __eq__(self, other):
return isinstance(other, type(self)) and (self.name, self.config, self.core_plugin) == (other.name, other.config, other.core_plugin)
class BootstrapPhase(Enum):
post_install = 10
@classmethod
def valid(cls, name):
for n in BootstrapPhase.names():
if n == name:
return True
return False
@classmethod
def names(cls):
return [p.name for p in list(BootstrapPhase)]
class BootstrapHookHandler:
"""
Responsible for loading and executing component-specific intitialization code.
"""
def __init__(self, component, loader_class=modules.ComponentLoader):
"""
Creates a new BootstrapHookHandler.
:param component: The component that should be loaded. In practice, this is a PluginDescriptor or a Car instance.
:param loader_class: The implementation that loads the provided component's code.
"""
self.component = component
# Don't allow the loader to recurse. The subdirectories may contain Elasticsearch specific files which we do not want to add to
# Rally's Python load path. We may need to define a more advanced strategy in the future.
self.loader = loader_class(root_path=self.component.root_path, component_entry_point=self.component.entry_point, recurse=False)
self.hooks = {}
self.logger = logging.getLogger(__name__)
def can_load(self):
return self.loader.can_load()
def load(self):
root_module = self.loader.load()
try:
# every module needs to have a register() method
root_module.register(self)
except exceptions.RallyError:
# just pass our own exceptions transparently.
raise
except BaseException:
msg = "Could not load bootstrap hooks in [{}]".format(self.loader.root_path)
self.logger.exception(msg)
raise exceptions.SystemSetupError(msg)
def register(self, phase, hook):
self.logger.info("Registering bootstrap hook [%s] for phase [%s] in component [%s]", hook.__name__, phase, self.component.name)
if not BootstrapPhase.valid(phase):
raise exceptions.SystemSetupError("Unknown bootstrap phase [{}]. Valid phases are: {}.".format(phase, BootstrapPhase.names()))
if phase not in self.hooks:
self.hooks[phase] = []
self.hooks[phase].append(hook)
def invoke(self, phase, **kwargs):
if phase in self.hooks:
self.logger.info("Invoking phase [%s] for component [%s] in config [%s]", phase, self.component.name, self.component.config)
for hook in self.hooks[phase]:
self.logger.info("Invoking bootstrap hook [%s].", hook.__name__)
# hooks should only take keyword arguments to be forwards compatible with Rally!
hook(config_names=self.component.config, **kwargs)
else:
self.logger.debug("Component [%s] in config [%s] has no hook registered for phase [%s].",
self.component.name, self.component.config, phase)
| 43.364562 | 140 | 0.630472 |
4a1dba9fe73f65c511d86263fdbaeda7b751a440 | 1,513 | py | Python | PixivRanking.py | hsheric0210/PixivUtil2 | 95ca633ec869169d091be55695cf863a0b582d6a | [
"BSD-2-Clause"
] | 1 | 2022-02-08T10:29:40.000Z | 2022-02-08T10:29:40.000Z | PixivRanking.py | hsheric0210/PixivUtil2 | 95ca633ec869169d091be55695cf863a0b582d6a | [
"BSD-2-Clause"
] | null | null | null | PixivRanking.py | hsheric0210/PixivUtil2 | 95ca633ec869169d091be55695cf863a0b582d6a | [
"BSD-2-Clause"
] | null | null | null | import json
from PixivException import PixivException
class PixivRanking:
mode = ""
curr_page = 0
next_page = None
prev_page = None
curr_date = ""
next_date = None
prev_date = None
rank_total = 0
contents = list()
filters = None
def __init__(self, js_str, filters):
js_data = json.loads(js_str)
self.mode = js_data["mode"]
self.curr_date = js_data["date"]
self.next_date = js_data["next_date"]
self.prev_date = js_data["prev_date"]
self.curr_page = js_data["page"]
self.next_page = js_data["next"]
self.prev_page = js_data["prev"]
self.rank_total = js_data["rank_total"]
self.contents = js_data["contents"]
self.filters = filters
if self.filters is not None:
self.filter_contents()
def filter_contents(self):
for content in self.contents:
for filter_str in self.filters:
if content["illust_content_type"][filter_str]:
self.contents.remove(content)
break
class PixivNewIllust:
last_id = 0
images = None
type_mode = None
def __init__(self, js_str, type_mode):
js_data = json.loads(js_str)
if bool(js_data["error"]):
raise PixivException(js_data["message"], errorCode=PixivException.OTHER_ERROR)
self.last_id = js_data["body"]["lastId"]
self.images = js_data["body"]["illusts"]
self.type_mode = type_mode
| 27.017857 | 90 | 0.607403 |
4a1dbaaee7965d093e4b82b5af5b8c8c77a3e4b3 | 1,055 | py | Python | python/pycylon/examples/table_math_operators.py | chathurawidanage/cylon | ac61b7a50880138fe67de21adee208016a94979a | [
"Apache-2.0"
] | null | null | null | python/pycylon/examples/table_math_operators.py | chathurawidanage/cylon | ac61b7a50880138fe67de21adee208016a94979a | [
"Apache-2.0"
] | null | null | null | python/pycylon/examples/table_math_operators.py | chathurawidanage/cylon | ac61b7a50880138fe67de21adee208016a94979a | [
"Apache-2.0"
] | null | null | null | ##
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
from pycylon import Table
from pycylon import CylonContext
ctx: CylonContext = CylonContext(config=None, distributed=False)
data = [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]]
columns = ['col-1', 'col-2', 'col-3']
tb1: Table = Table.from_list(ctx, columns, data)
print(tb1)
scalar_value = 2
tb2 = -tb1
print("Negate")
print(tb2)
tb2 = tb1 + 2
print("Add")
print(tb2)
tb2 = tb1 - 2
print("Subtract")
print(tb2)
tb2 = tb1 * 2
print("Multiply")
print(tb2)
tb2 = tb1 / 2
print("Division")
print(tb2)
| 21.530612 | 74 | 0.708057 |
4a1dbd5e4608108903210d6751da9c63a297c61a | 2,941 | py | Python | spark-1.3.0/python/build/py4j/tests/java_map_test.py | iflink/spark | 3b6b0c2cbdc1f939fb60ef5717ffbe232ebceee5 | [
"Apache-2.0"
] | null | null | null | spark-1.3.0/python/build/py4j/tests/java_map_test.py | iflink/spark | 3b6b0c2cbdc1f939fb60ef5717ffbe232ebceee5 | [
"Apache-2.0"
] | null | null | null | spark-1.3.0/python/build/py4j/tests/java_map_test.py | iflink/spark | 3b6b0c2cbdc1f939fb60ef5717ffbe232ebceee5 | [
"Apache-2.0"
] | 1 | 2020-07-23T22:14:41.000Z | 2020-07-23T22:14:41.000Z | '''
Created on Feb 5, 2010
@author: barthelemy
'''
from __future__ import unicode_literals, absolute_import
from multiprocessing import Process
import subprocess
import time
import unittest
from py4j.java_gateway import JavaGateway
from py4j.tests.java_gateway_test import PY4J_JAVA_PATH, safe_shutdown
def start_example_server():
subprocess.call(["java", "-cp", PY4J_JAVA_PATH,
"py4j.examples.ExampleApplication"])
def start_example_app_process():
# XXX DO NOT FORGET TO KILL THE PROCESS IF THE TEST DOES NOT SUCCEED
p = Process(target=start_example_server)
p.start()
return p
def get_map():
return {"a": 1, "b": 2.0, "c": "z"}
class AutoConvertTest(unittest.TestCase):
def setUp(self):
# logger = logging.getLogger("py4j")
# logger.setLevel(logging.DEBUG)
# logger.addHandler(logging.StreamHandler())
self.p = start_example_app_process()
time.sleep(0.5)
self.gateway = JavaGateway(auto_convert=True)
def tearDown(self):
safe_shutdown(self)
self.p.join()
time.sleep(0.5)
def testAutoConvert(self):
dj = self.gateway.jvm.java.util.HashMap()
dj['b'] = 2
dj['a'] = 1
dp = {'a': 1, 'b': 2}
self.assertTrue(dj.equals(dp))
class Test(unittest.TestCase):
def setUp(self):
# logger = logging.getLogger("py4j")
# logger.setLevel(logging.DEBUG)
# logger.addHandler(logging.StreamHandler())
self.p = start_example_app_process()
time.sleep(0.5)
self.gateway = JavaGateway()
def tearDown(self):
safe_shutdown(self)
self.p.join()
time.sleep(0.5)
def equal_maps(self, m1, m2):
if len(m1) == len(m2):
equal = True
for k in m1:
equal = m1[k] == m2[k]
if not equal:
break
return equal
else:
return False
def testMap(self):
dp0 = {}
dp = get_map()
dj = self.gateway.jvm.java.util.HashMap()
self.equal_maps(dj, dp0)
dj["a"] = 1
dj["b"] = 2.0
dj["c"] = "z"
self.equal_maps(dj, dp)
del(dj["a"])
del(dp["a"])
dj2 = self.gateway.jvm.java.util.HashMap()
dj2["b"] = 2.0
dj2["c"] = "z"
dj3 = self.gateway.jvm.java.util.HashMap()
dj3["a"] = 1
dj3["b"] = 2.0
dj3["c"] = "z"
self.equal_maps(dj, dp)
self.assertTrue(dj == dj)
self.assertTrue(dj == dj2)
# Does not always work for some reason...
# Probably not worth supporting for now...
# self.assertTrue(dj < dj3)
self.assertTrue(dj != dp)
dps = {1: 1, 2: 2}
djs = self.gateway.jvm.java.util.HashMap()
djs[1] = 1
djs[2] = 2
self.assertEqual(str(djs), str(dps))
if __name__ == "__main__":
unittest.main()
| 24.714286 | 72 | 0.565794 |
4a1dbe69f4769cccdcc705d2c7843d9f46a58011 | 212 | py | Python | configs/_base_/schedules/imagenet_bs256.py | anthracene/mmclassification | 4b46fd6dc75d26b5604fdec75f6cc49e1d96d2a7 | [
"Apache-2.0"
] | 31 | 2020-11-14T02:47:54.000Z | 2021-12-14T06:26:10.000Z | configs/_base_/schedules/imagenet_bs256.py | anthracene/mmclassification | 4b46fd6dc75d26b5604fdec75f6cc49e1d96d2a7 | [
"Apache-2.0"
] | null | null | null | configs/_base_/schedules/imagenet_bs256.py | anthracene/mmclassification | 4b46fd6dc75d26b5604fdec75f6cc49e1d96d2a7 | [
"Apache-2.0"
] | 4 | 2021-01-14T18:12:38.000Z | 2021-11-11T11:46:50.000Z | # optimizer
optimizer = dict(type='SGD', lr=0.1, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(policy='step', step=[30, 60, 90])
total_epochs = 100
| 30.285714 | 71 | 0.731132 |
4a1dbe78a0b3edbd7171df2545b2f3e259980540 | 339 | py | Python | 8_Tran_Vu_Ngoc_Thach/bai 2.4.py | lpython2006e/exercies | 84343eae57d86708a7984aa02f77183a4688a508 | [
"MIT"
] | null | null | null | 8_Tran_Vu_Ngoc_Thach/bai 2.4.py | lpython2006e/exercies | 84343eae57d86708a7984aa02f77183a4688a508 | [
"MIT"
] | null | null | null | 8_Tran_Vu_Ngoc_Thach/bai 2.4.py | lpython2006e/exercies | 84343eae57d86708a7984aa02f77183a4688a508 | [
"MIT"
] | 8 | 2020-07-10T14:13:54.000Z | 2020-08-03T08:17:50.000Z | """Write a function that returns the elements on odd positions in a list."""
def odd_elements_list(a):
odd_list = []
for i in range(0, len(a)):
if i % 2 != 0:
odd_list.append(a[i])
else:
i += 1
return odd_list
mylist = [1, 4, 5, 100, 2, 1, -1, -7]
odd = odd_elements_list(mylist)
print(odd)
| 19.941176 | 76 | 0.569322 |
4a1dbe89ce8a5f1ad91bb7865aff8ba48addfce9 | 246 | py | Python | encoder/data_objects/__init__.py | fujiaxiang/Real-Time-Voice-Cloning | 3b182258724c7d2cda94d418a3ad0c03dd29b302 | [
"MIT"
] | null | null | null | encoder/data_objects/__init__.py | fujiaxiang/Real-Time-Voice-Cloning | 3b182258724c7d2cda94d418a3ad0c03dd29b302 | [
"MIT"
] | null | null | null | encoder/data_objects/__init__.py | fujiaxiang/Real-Time-Voice-Cloning | 3b182258724c7d2cda94d418a3ad0c03dd29b302 | [
"MIT"
] | null | null | null | from encoder.data_objects.speaker_verification_dataset import SpeakerVerificationDataset
from encoder.data_objects.speaker_verification_dataset import SpeakerVerificationDataLoader
from encoder.data_objects.iemocap_dataset import IemocapDataset
| 49.2 | 91 | 0.922764 |
4a1dbe92044620b666a68392c4f361306a2e6260 | 2,632 | py | Python | zulip_bots/zulip_bots/bots/merels/libraries/game_data.py | benjaoming/python-zulip-api | d46935218022d82fed262fb485e112caa1aefd11 | [
"Apache-2.0"
] | 1 | 2020-06-17T06:47:15.000Z | 2020-06-17T06:47:15.000Z | zulip_bots/zulip_bots/bots/merels/libraries/game_data.py | benjaoming/python-zulip-api | d46935218022d82fed262fb485e112caa1aefd11 | [
"Apache-2.0"
] | 7 | 2017-10-05T07:43:32.000Z | 2017-10-14T06:56:47.000Z | zulip_bots/zulip_bots/bots/merels/libraries/game_data.py | benjaoming/python-zulip-api | d46935218022d82fed262fb485e112caa1aefd11 | [
"Apache-2.0"
] | 1 | 2020-08-25T19:25:25.000Z | 2020-08-25T19:25:25.000Z | """This serves as a bridge between the database and the other modules.
In a nutshell, this module parses a tuple from database then translates it
into a more convenient naming for easier access. It also adds certain
functions that are useful for the function of the game.
"""
from . import mechanics
from .interface import construct_grid
class GameData():
def __init__(self, game_data=(
'merels', 'X', 0, 0, 'NNNNNNNNNNNNNNNNNNNNNNNN', '', 0)):
self.topic_name = game_data[0]
self.turn = game_data[1]
self.x_taken = game_data[2]
self.o_taken = game_data[3]
self.board = game_data[4]
self.hill_uid = game_data[5]
self.take_mode = game_data[6]
def __len__(self):
return len(self.construct())
def construct(self):
"""Constructs a tuple based on existing records
:return: A tuple containing all the game records
"""
res = (
self.topic_name, self.turn, self.x_taken, self.o_taken, self.board,
self.hill_uid, self.take_mode)
return res
def grid(self):
"""Returns the grid
:return: A 2-dimensional 7x7 list (the grid)
"""
return construct_grid(self.board)
def get_x_piece_possessed_not_on_grid(self):
"""Gets the amount of X pieces that the player X still have, but not
put yet on the grid
:return: Amount of pieces that X has, but not on grid
"""
return 9 - self.x_taken - mechanics.get_piece("X", self.grid())
def get_o_piece_possessed_not_on_grid(self):
"""Gets the amount of X pieces that the player O still have, but not
put yet on the grid
:return: Amount of pieces that O has, but not on grid
"""
return 9 - self.o_taken - mechanics.get_piece("O", self.grid())
def get_phase(self):
"""Gets the phase number for the current game
:return: A phase number (1, 2, or 3)
"""
return mechanics.get_phase_number(self.grid(), self.turn,
self.get_x_piece_possessed_not_on_grid(),
self.get_o_piece_possessed_not_on_grid())
def switch_turn(self):
"""Switches turn between X and O
:return: None
"""
if self.turn == "X":
self.turn = "O"
else:
self.turn = "X"
def toggle_take_mode(self):
"""Toggles take mode
:return: None
"""
if self.take_mode == 0:
self.take_mode = 1
else:
self.take_mode = 0
| 29.573034 | 83 | 0.590046 |
4a1dbfdf52541aaf98007989f74a978492dc6486 | 3,550 | py | Python | data_analysis/heat_maps/create_heatmaps/individual_populations_performance.py | migbash/energy-intern-backend | 41b9d2acf5e8d30c8d8d55b006c33b97267dc497 | [
"MIT"
] | null | null | null | data_analysis/heat_maps/create_heatmaps/individual_populations_performance.py | migbash/energy-intern-backend | 41b9d2acf5e8d30c8d8d55b006c33b97267dc497 | [
"MIT"
] | null | null | null | data_analysis/heat_maps/create_heatmaps/individual_populations_performance.py | migbash/energy-intern-backend | 41b9d2acf5e8d30c8d8d55b006c33b97267dc497 | [
"MIT"
] | 3 | 2019-09-11T04:14:15.000Z | 2021-03-01T15:16:57.000Z | import ast
import sys
import os
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from typing import List
""" Creates heat maps summarising the satisfaction levels for each agent type when they make up 100% of the population.
This is completed for each simulation version.
Parameters
---------
folderName : str
The output destination folder, used to organise output data.
exchangesArray : str
Array of the various number of exchanges per day that were simulated.
Note that this is immediately converted to type List[int].
daysOfInterest : str
Array containing the days to be analysed.
Note that this is immediately converted to type List[int].
"""
folderName: str = sys.argv[1]
exchangesArray: List[int] = ast.literal_eval(sys.argv[2])
daysOfInterest: List[int] = ast.literal_eval(sys.argv[3])
baseOutputDirectory: str = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))), folderName)
dataDir = baseOutputDirectory + "/comparativeHeatMaps/data/"
imageDir = baseOutputDirectory + "/comparativeHeatMaps/images/"
# Create the output directories if they do not already exist.
if not os.path.exists(imageDir):
os.makedirs(imageDir)
df = pd.read_csv(dataDir + "individual_populations_summary.csv")
selfish = df.pivot("Day", "Exchanges", "Selfish")
social = df.pivot("Day", "Exchanges", "SC_Social")
social_AE = df.pivot("Day", "Exchanges", "WSC_Social")
width: float = 0
height: float = 0
for d in range(len(daysOfInterest) + 1):
height += 0.5
for e in range(len(exchangesArray) + 2):
width += 0.5
width = (width * 3) + 0.5
height += 0.5
fig, (ax1, ax2, ax3) = plt.subplots(nrows=1, ncols=3, sharex='col', sharey='row', figsize=(width, height))
sns.heatmap(selfish, cmap="Reds", center= 0.5, vmin=0, vmax=1.0, ax=ax1, linewidths=0.1, linecolor="white", cbar=True, annot=True, annot_kws={"size": 10})
ax1.set_title(r"$\bf{Selfish}$", fontsize=12)
ax1.invert_yaxis()
ax1.set_ylabel('')
ax1.set_xlabel('')
ax1.set_xticklabels(ax1.get_xticklabels(), rotation = 0)
sns.heatmap(social_AE, cmap="Reds", center= 0.5, vmin=0, vmax=1.0, ax=ax2, linewidths=0.1, linecolor="white", cbar=True, annot=True, annot_kws={"size": 10})
ax2.set_title(r"$\bf{Social}$" + " " + r"$\bf{without}$" + " " + r"$\bf{Social}$" + " " + r"$\bf{Capital}$", fontsize=12)
ax2.invert_yaxis()
ax2.set_ylabel('')
ax2.set_xlabel('')
ax2.set_xticklabels(ax1.get_xticklabels(), rotation = 0)
sns.heatmap(social, cmap="Reds", center= 0.5, vmin=0, vmax=1.0, ax=ax3, linewidths=0.1, linecolor="white", cbar=True, annot=True, annot_kws={"size": 10})
ax3.set_title(r"$\bf{Social}$" + " " + r"$\bf{with}$" + " " + r"$\bf{Social}$" + " " + r"$\bf{Capital}$", fontsize=12)
ax3.invert_yaxis()
ax3.set_ylabel('')
ax3.set_xlabel('')
ax3.set_xticklabels(ax1.get_xticklabels(), rotation = 0)
fig.text(0.5, 0.06, 'Exchanges', ha='center', fontsize=14)
fig.text(0.07, 0.5, 'Day', va='center', rotation='vertical', fontsize=14)
fig.suptitle('Average Satisfaction in Single Strategy Populations', fontsize=14)
plt.subplots_adjust(hspace = .2)
plt.subplots_adjust(wspace = .2)
plt.subplots_adjust(top = .8)
plt.subplots_adjust(bottom = .2)
plt.subplots_adjust(left = .12)
plt.subplots_adjust(right = .95)
fname = imageDir + "individual_populations"
plt.savefig(fname,
dpi=None,
facecolor='w',
edgecolor='w',
orientation='landscape',
format=None,
transparent=False,
bbox_inches=None,
pad_inches=0,
metadata=None)
| 36.22449 | 156 | 0.700282 |
4a1dc0ea536020f110b45b8cedeeed1704f811a6 | 4,312 | py | Python | examples/FasterRCNN/dataset/dataset.py | s36934512/tensorpack | 78a16615f8c9e6993c2a14961ca3fdd05f7c273c | [
"Apache-2.0"
] | 4,404 | 2018-05-30T23:38:42.000Z | 2022-03-31T22:30:11.000Z | examples/FasterRCNN/dataset/dataset.py | s36934512/tensorpack | 78a16615f8c9e6993c2a14961ca3fdd05f7c273c | [
"Apache-2.0"
] | 771 | 2018-06-01T09:54:00.000Z | 2022-03-31T23:12:29.000Z | examples/FasterRCNN/dataset/dataset.py | s36934512/tensorpack | 78a16615f8c9e6993c2a14961ca3fdd05f7c273c | [
"Apache-2.0"
] | 1,412 | 2018-06-01T00:29:43.000Z | 2022-03-26T17:37:39.000Z | # -*- coding: utf-8 -*-
from collections import defaultdict
__all__ = ['DatasetRegistry', 'DatasetSplit']
class DatasetSplit():
"""
A class to load datasets, evaluate results for a datast split (e.g., "coco_train_2017")
To use your own dataset that's not in COCO format, write a subclass that
implements the interfaces.
"""
def training_roidbs(self):
"""
Returns:
roidbs (list[dict]):
Produce "roidbs" as a list of dict, each dict corresponds to one image with k>=0 instances.
and the following keys are expected for training:
file_name: str, full path to the image
boxes: numpy array of kx4 floats, each row is [x1, y1, x2, y2]
class: numpy array of k integers, in the range of [1, #categories], NOT [0, #categories)
is_crowd: k booleans. Use k False if you don't know what it means.
segmentation: k lists of numpy arrays.
Each list of numpy arrays corresponds to the mask for one instance.
Each numpy array in the list is a polygon of shape Nx2,
because one mask can be represented by N polygons.
Each row in the Nx2 array is a (x, y) coordinate.
If your segmentation annotations are originally masks rather than polygons,
either convert it, or the augmentation will need to be changed or skipped accordingly.
Include this field only if training Mask R-CNN.
Coordinates in boxes & polygons are absolute coordinates in unit of pixels, unless
cfg.DATA.ABSOLUTE_COORD is False.
"""
raise NotImplementedError()
def inference_roidbs(self):
"""
Returns:
roidbs (list[dict]):
Each dict corresponds to one image to run inference on. The
following keys in the dict are expected:
file_name (str): full path to the image
image_id (str): an id for the image. The inference results will be stored with this id.
"""
raise NotImplementedError()
def eval_inference_results(self, results, output=None):
"""
Args:
results (list[dict]): the inference results as dicts.
Each dict corresponds to one __instance__. It contains the following keys:
image_id (str): the id that matches `inference_roidbs`.
category_id (int): the category prediction, in range [1, #category]
bbox (list[float]): x1, y1, x2, y2
score (float):
segmentation: the segmentation mask in COCO's rle format.
output (str): the output file or directory to optionally save the results to.
Returns:
dict: the evaluation results.
"""
raise NotImplementedError()
class DatasetRegistry():
_registry = {}
_metadata_registry = defaultdict(dict)
@staticmethod
def register(name, func):
"""
Args:
name (str): the name of the dataset split, e.g. "coco_train2017"
func: a function which returns an instance of `DatasetSplit`
"""
assert name not in DatasetRegistry._registry, "Dataset {} was registered already!".format(name)
DatasetRegistry._registry[name] = func
@staticmethod
def get(name):
"""
Args:
name (str): the name of the dataset split, e.g. "coco_train2017"
Returns:
DatasetSplit
"""
assert name in DatasetRegistry._registry, "Dataset {} was not registered!".format(name)
return DatasetRegistry._registry[name]()
@staticmethod
def register_metadata(name, key, value):
"""
Args:
name (str): the name of the dataset split, e.g. "coco_train2017"
key: the key of the metadata, e.g., "class_names"
value: the value of the metadata
"""
DatasetRegistry._metadata_registry[name][key] = value
@staticmethod
def get_metadata(name, key):
"""
Args:
name (str): the name of the dataset split, e.g. "coco_train2017"
key: the key of the metadata, e.g., "class_names"
Returns:
value
"""
return DatasetRegistry._metadata_registry[name][key]
| 35.344262 | 103 | 0.612477 |
4a1dc1222cf77efc73a21763cb4b30cf5f52c1cc | 3,520 | py | Python | ddpg.py | Kaixhin/spinning-up-basic | ac5f2219c3c6c657b460c3150572d8deb3abfba0 | [
"MIT"
] | 190 | 2019-01-18T16:20:05.000Z | 2022-01-20T05:28:03.000Z | ddpg.py | Kaixhin/spinning-up-basic | ac5f2219c3c6c657b460c3150572d8deb3abfba0 | [
"MIT"
] | 2 | 2019-01-26T18:16:33.000Z | 2019-03-03T23:05:06.000Z | ddpg.py | Kaixhin/spinning-up-basic | ac5f2219c3c6c657b460c3150572d8deb3abfba0 | [
"MIT"
] | 17 | 2019-01-24T11:17:59.000Z | 2021-06-27T10:08:44.000Z | from collections import deque
import random
import torch
from torch import optim
from tqdm import tqdm
from env import Env
from hyperparams import ACTION_NOISE, OFF_POLICY_BATCH_SIZE as BATCH_SIZE, DISCOUNT, HIDDEN_SIZE, LEARNING_RATE, MAX_STEPS, POLYAK_FACTOR, REPLAY_SIZE, TEST_INTERVAL, UPDATE_INTERVAL, UPDATE_START
from models import Actor, Critic, create_target_network, update_target_network
from utils import plot
env = Env()
actor = Actor(env.observation_space.shape[0], env.action_space.shape[0], HIDDEN_SIZE, stochastic=False, layer_norm=True)
critic = Critic(env.observation_space.shape[0], env.action_space.shape[0], HIDDEN_SIZE, state_action=True, layer_norm=True)
target_actor = create_target_network(actor)
target_critic = create_target_network(critic)
actor_optimiser = optim.Adam(actor.parameters(), lr=LEARNING_RATE)
critic_optimiser = optim.Adam(critic.parameters(), lr=LEARNING_RATE)
D = deque(maxlen=REPLAY_SIZE)
def test(actor):
with torch.no_grad():
env = Env()
state, done, total_reward = env.reset(), False, 0
while not done:
action = torch.clamp(actor(state), min=-1, max=1) # Use purely exploitative policy at test time
state, reward, done = env.step(action)
total_reward += reward
return total_reward
state, done = env.reset(), False
pbar = tqdm(range(1, MAX_STEPS + 1), unit_scale=1, smoothing=0)
for step in pbar:
with torch.no_grad():
if step < UPDATE_START:
# To improve exploration take actions sampled from a uniform random distribution over actions at the start of training
action = torch.tensor([[2 * random.random() - 1]])
else:
# Observe state s and select action a = clip(μ(s) + ε, a_low, a_high)
action = torch.clamp(actor(state) + ACTION_NOISE * torch.randn(1, 1), min=-1, max=1)
# Execute a in the environment and observe next state s', reward r, and done signal d to indicate whether s' is terminal
next_state, reward, done = env.step(action)
# Store (s, a, r, s', d) in replay buffer D
D.append({'state': state, 'action': action, 'reward': torch.tensor([reward]), 'next_state': next_state, 'done': torch.tensor([done], dtype=torch.float32)})
state = next_state
# If s' is terminal, reset environment state
if done:
state = env.reset()
if step > UPDATE_START and step % UPDATE_INTERVAL == 0:
# Randomly sample a batch of transitions B = {(s, a, r, s', d)} from D
batch = random.sample(D, BATCH_SIZE)
batch = {k: torch.cat([d[k] for d in batch], dim=0) for k in batch[0].keys()}
# Compute targets
y = batch['reward'] + DISCOUNT * (1 - batch['done']) * target_critic(batch['next_state'], target_actor(batch['next_state']))
# Update Q-function by one step of gradient descent
value_loss = (critic(batch['state'], batch['action']) - y).pow(2).mean()
critic_optimiser.zero_grad()
value_loss.backward()
critic_optimiser.step()
# Update policy by one step of gradient ascent
policy_loss = -critic(batch['state'], actor(batch['state'])).mean()
actor_optimiser.zero_grad()
policy_loss.backward()
actor_optimiser.step()
# Update target networks
update_target_network(critic, target_critic, POLYAK_FACTOR)
update_target_network(actor, target_actor, POLYAK_FACTOR)
if step > UPDATE_START and step % TEST_INTERVAL == 0:
actor.eval()
total_reward = test(actor)
pbar.set_description('Step: %i | Reward: %f' % (step, total_reward))
plot(step, total_reward, 'ddpg')
actor.train()
| 42.926829 | 196 | 0.710227 |
4a1dc14fd1c9f6fab7de6590537de88e041b8dc2 | 1,091 | py | Python | core/src/zeit/cms/repository/browser/unknown.py | rickdg/vivi | 16134ac954bf8425646d4ad47bdd1f372e089355 | [
"BSD-3-Clause"
] | 5 | 2019-05-16T09:51:29.000Z | 2021-05-31T09:30:03.000Z | core/src/zeit/cms/repository/browser/unknown.py | rickdg/vivi | 16134ac954bf8425646d4ad47bdd1f372e089355 | [
"BSD-3-Clause"
] | 107 | 2019-05-24T12:19:02.000Z | 2022-03-23T15:05:56.000Z | core/src/zeit/cms/repository/browser/unknown.py | rickdg/vivi | 16134ac954bf8425646d4ad47bdd1f372e089355 | [
"BSD-3-Clause"
] | 3 | 2020-08-14T11:01:17.000Z | 2022-01-08T17:32:19.000Z | # coding: utf8
import pprint
import zeit.cms.interfaces
import zope.component
class View(object):
def get_excerpt(self):
data = self.context.data.strip()
if len(data) < 100:
return data
return data[:100] + u'…'
def get_properties(self):
properties = zeit.connector.interfaces.IWebDAVReadProperties(
self.context)
return pprint.pformat(dict(properties))
class Edit(object):
def __call__(self):
context_url = zope.component.getMultiAdapter(
(self.context, self.request),
name='absolute_url')()
self.request.response.redirect(
context_url + '/@@view.html')
return ''
class Metadata(object):
@property
def dav_resource_type(self):
return zeit.cms.interfaces.IWebDAVReadProperties(self.context).get(
('type', 'http://namespaces.zeit.de/CMS/meta'))
class DragPane(object):
@property
def uniqueId(self):
return self.context.uniqueId
@property
def name(self):
return self.context.__name__
| 22.265306 | 75 | 0.628781 |
4a1dc1e523b20881e39f77e5e24cd2b574de469e | 44,147 | py | Python | second/pytorch/train.py | panfengsu/train_point_pillars | 016a126e080044c7b5f3a19c7404a79c0689c650 | [
"Apache-2.0"
] | null | null | null | second/pytorch/train.py | panfengsu/train_point_pillars | 016a126e080044c7b5f3a19c7404a79c0689c650 | [
"Apache-2.0"
] | null | null | null | second/pytorch/train.py | panfengsu/train_point_pillars | 016a126e080044c7b5f3a19c7404a79c0689c650 | [
"Apache-2.0"
] | null | null | null | import os
import pathlib
import pickle
import shutil
import time
from functools import partial
import fire
import numpy as np
import torch
from google.protobuf import text_format
from tensorboardX import SummaryWriter
import torchplus
import second.data.kitti_common as kitti
from second.builder import target_assigner_builder, voxel_builder
from second.data.preprocess import merge_second_batch
from second.protos import pipeline_pb2
from second.pytorch.builder import (box_coder_builder, input_reader_builder,
lr_scheduler_builder, optimizer_builder,
second_builder)
from second.utils.eval import get_coco_eval_result, get_official_eval_result
from second.utils.progress_bar import ProgressBar
from second.pytorch.utils import get_paddings_indicator
def _get_pos_neg_loss(cls_loss, labels):
# cls_loss: [N, num_anchors, num_class]
# labels: [N, num_anchors]
batch_size = cls_loss.shape[0]
if cls_loss.shape[-1] == 1 or len(cls_loss.shape) == 2:
cls_pos_loss = (labels > 0).type_as(cls_loss) * cls_loss.view(
batch_size, -1)
cls_neg_loss = (labels == 0).type_as(cls_loss) * cls_loss.view(
batch_size, -1)
cls_pos_loss = cls_pos_loss.sum() / batch_size
cls_neg_loss = cls_neg_loss.sum() / batch_size
else:
cls_pos_loss = cls_loss[..., 1:].sum() / batch_size
cls_neg_loss = cls_loss[..., 0].sum() / batch_size
return cls_pos_loss, cls_neg_loss
def _flat_nested_json_dict(json_dict, flatted, sep=".", start=""):
for k, v in json_dict.items():
if isinstance(v, dict):
_flat_nested_json_dict(v, flatted, sep, start + sep + k)
else:
flatted[start + sep + k] = v
def flat_nested_json_dict(json_dict, sep=".") -> dict:
"""flat a nested json-like dict. this function make shadow copy.
"""
flatted = {}
for k, v in json_dict.items():
if isinstance(v, dict):
_flat_nested_json_dict(v, flatted, sep, k)
else:
flatted[k] = v
return flatted
def example_convert_to_torch(example, dtype=torch.float32,
device=None) -> dict:
device = device or torch.device("cuda:0")
example_torch = {}
float_names = [
"voxels", "anchors", "reg_targets", "reg_weights", "bev_map", "rect",
"Trv2c", "P2"
]
for k, v in example.items():
if k in float_names:
example_torch[k] = torch.as_tensor(v, dtype=dtype, device=device)
elif k in ["coordinates", "labels", "num_points"]:
example_torch[k] = torch.as_tensor(
v, dtype=torch.int32, device=device)
elif k in ["anchors_mask"]:
example_torch[k] = torch.as_tensor(
v, dtype=torch.uint8, device=device)
else:
example_torch[k] = v
return example_torch
def train(config_path,
model_dir,
result_path=None,
create_folder=False,
display_step=50,
summary_step=5,
pickle_result=True):
"""train a VoxelNet model specified by a config file.
"""
if create_folder:
if pathlib.Path(model_dir).exists():
model_dir = torchplus.train.create_folder(model_dir)
model_dir = pathlib.Path(model_dir)
model_dir.mkdir(parents=True, exist_ok=True)
eval_checkpoint_dir = model_dir / 'eval_checkpoints'
eval_checkpoint_dir.mkdir(parents=True, exist_ok=True)
if result_path is None:
result_path = model_dir / 'results'
config_file_bkp = "pipeline.config"
config = pipeline_pb2.TrainEvalPipelineConfig()
with open(config_path, "r") as f:
proto_str = f.read()
text_format.Merge(proto_str, config)
shutil.copyfile(config_path, str(model_dir / config_file_bkp))
input_cfg = config.train_input_reader
eval_input_cfg = config.eval_input_reader
model_cfg = config.model.second
train_cfg = config.train_config
class_names = list(input_cfg.class_names)
######################
# BUILD VOXEL GENERATOR
######################
voxel_generator = voxel_builder.build(model_cfg.voxel_generator)
######################
# BUILD TARGET ASSIGNER
######################
bv_range = voxel_generator.point_cloud_range[[0, 1, 3, 4]]
box_coder = box_coder_builder.build(model_cfg.box_coder)
target_assigner_cfg = model_cfg.target_assigner
target_assigner = target_assigner_builder.build(target_assigner_cfg,
bv_range, box_coder)
######################
# BUILD NET
######################
center_limit_range = model_cfg.post_center_limit_range
# net = second_builder.build(model_cfg, voxel_generator, target_assigner)
net = second_builder.build(model_cfg, voxel_generator, target_assigner, input_cfg.batch_size)
net.cuda()
# net_train = torch.nn.DataParallel(net).cuda()
print("num_trainable parameters:", len(list(net.parameters())))
# for n, p in net.named_parameters():
# print(n, p.shape)
######################
# BUILD OPTIMIZER
######################
# we need global_step to create lr_scheduler, so restore net first.
torchplus.train.try_restore_latest_checkpoints(model_dir, [net])
gstep = net.get_global_step() - 1
optimizer_cfg = train_cfg.optimizer
if train_cfg.enable_mixed_precision:
net.half()
net.metrics_to_float()
net.convert_norm_to_float(net)
optimizer = optimizer_builder.build(optimizer_cfg, net.parameters())
if train_cfg.enable_mixed_precision:
loss_scale = train_cfg.loss_scale_factor
mixed_optimizer = torchplus.train.MixedPrecisionWrapper(
optimizer, loss_scale)
else:
mixed_optimizer = optimizer
# must restore optimizer AFTER using MixedPrecisionWrapper
torchplus.train.try_restore_latest_checkpoints(model_dir,
[mixed_optimizer])
lr_scheduler = lr_scheduler_builder.build(optimizer_cfg, optimizer, gstep)
if train_cfg.enable_mixed_precision:
float_dtype = torch.float16
else:
float_dtype = torch.float32
######################
# PREPARE INPUT
######################
dataset = input_reader_builder.build(
input_cfg,
model_cfg,
training=True,
voxel_generator=voxel_generator,
target_assigner=target_assigner)
eval_dataset = input_reader_builder.build(
eval_input_cfg,
model_cfg,
training=False,
voxel_generator=voxel_generator,
target_assigner=target_assigner)
def _worker_init_fn(worker_id):
time_seed = np.array(time.time(), dtype=np.int32)
np.random.seed(time_seed + worker_id)
print(f"WORKER {worker_id} seed:", np.random.get_state()[1][0])
dataloader = torch.utils.data.DataLoader(
dataset,
batch_size=input_cfg.batch_size,
shuffle=True,
num_workers=input_cfg.num_workers,
pin_memory=False,
collate_fn=merge_second_batch,
worker_init_fn=_worker_init_fn)
eval_dataloader = torch.utils.data.DataLoader(
eval_dataset,
batch_size=eval_input_cfg.batch_size,
shuffle=False,
num_workers=eval_input_cfg.num_workers,
pin_memory=False,
collate_fn=merge_second_batch)
data_iter = iter(dataloader)
######################
# TRAINING
######################
log_path = model_dir / 'log.txt'
logf = open(log_path, 'a')
logf.write(proto_str)
logf.write("\n")
summary_dir = model_dir / 'summary'
summary_dir.mkdir(parents=True, exist_ok=True)
writer = SummaryWriter(str(summary_dir))
total_step_elapsed = 0
remain_steps = train_cfg.steps - net.get_global_step()
t = time.time()
ckpt_start_time = t
total_loop = train_cfg.steps // train_cfg.steps_per_eval + 1
# total_loop = remain_steps // train_cfg.steps_per_eval + 1
clear_metrics_every_epoch = train_cfg.clear_metrics_every_epoch
if train_cfg.steps % train_cfg.steps_per_eval == 0:
total_loop -= 1
mixed_optimizer.zero_grad()
try:
for _ in range(total_loop):
if total_step_elapsed + train_cfg.steps_per_eval > train_cfg.steps:
steps = train_cfg.steps % train_cfg.steps_per_eval
else:
steps = train_cfg.steps_per_eval
for step in range(steps):
lr_scheduler.step()
try:
example = next(data_iter)
except StopIteration:
print("end epoch")
if clear_metrics_every_epoch:
net.clear_metrics()
data_iter = iter(dataloader)
example = next(data_iter)
example_torch = example_convert_to_torch(example, float_dtype)
batch_size = example["anchors"].shape[0]
example_tuple = list(example_torch.values())
example_tuple[11] = torch.from_numpy(example_tuple[11])
example_tuple[12] = torch.from_numpy(example_tuple[12])
assert 13==len(example_tuple), "something wring with training input size!"
# training example:[0:'voxels', 1:'num_points', 2:'coordinates', 3:'rect',
# 4:'Trv2c', 5:'P2',
# 6:'anchors', 7:'anchors_mask', 8:'labels', 9:'reg_targets', 10:'reg_weights',
# 11:'image_idx', 12:'image_shape']
# ret_dict = net(example_torch)
# training input from example
# print("example[0] size", example_tuple[0].size())
pillar_x = example_tuple[0][:,:,0].unsqueeze(0).unsqueeze(0)
pillar_y = example_tuple[0][:,:,1].unsqueeze(0).unsqueeze(0)
pillar_z = example_tuple[0][:,:,2].unsqueeze(0).unsqueeze(0)
pillar_i = example_tuple[0][:,:,3].unsqueeze(0).unsqueeze(0)
num_points_per_pillar = example_tuple[1].float().unsqueeze(0)
# Find distance of x, y, and z from pillar center
# assuming xyres_16.proto
coors_x = example_tuple[2][:, 3].float()
coors_y = example_tuple[2][:, 2].float()
# self.x_offset = self.vx / 2 + pc_range[0]
# self.y_offset = self.vy / 2 + pc_range[1]
# this assumes xyres 20
# x_sub = coors_x.unsqueeze(1) * 0.16 + 0.1
# y_sub = coors_y.unsqueeze(1) * 0.16 + -39.9
# here assumes xyres 16
x_sub = coors_x.unsqueeze(1) * 0.16 + 0.08
y_sub = coors_y.unsqueeze(1) * 0.16 + -39.6
ones = torch.ones([1, 100],dtype=torch.float32, device=pillar_x.device )
x_sub_shaped = torch.mm(x_sub, ones).unsqueeze(0).unsqueeze(0)
y_sub_shaped = torch.mm(y_sub, ones).unsqueeze(0).unsqueeze(0)
num_points_for_a_pillar = pillar_x.size()[3]
mask = get_paddings_indicator(num_points_per_pillar, num_points_for_a_pillar, axis=0)
mask = mask.permute(0, 2, 1)
mask = mask.unsqueeze(1)
mask = mask.type_as(pillar_x)
coors = example_tuple[2]
anchors = example_tuple[6]
labels = example_tuple[8]
reg_targets = example_tuple[9]
input = [pillar_x, pillar_y, pillar_z, pillar_i,
num_points_per_pillar, x_sub_shaped, y_sub_shaped, mask, coors,
anchors, labels, reg_targets]
ret_dict = net(input)
#print(ret_dict)
#print(len(ret_dict))
assert 10==len(ret_dict), "something wring with training output size!"
# return 0
# ret_dict {
# 0:"loss": loss,
# 1:"cls_loss": cls_loss,
# 2:"loc_loss": loc_loss,
# 3:"cls_pos_loss": cls_pos_loss,
# 4:"cls_neg_loss": cls_neg_loss,
# 5:"cls_preds": cls_preds,
# 6:"dir_loss_reduced": dir_loss_reduced,
# 7:"cls_loss_reduced": cls_loss_reduced,
# 8:"loc_loss_reduced": loc_loss_reduced,
# 9:"cared": cared,
# }
# cls_preds = ret_dict["cls_preds"]
cls_preds = ret_dict[5]
# loss = ret_dict["loss"].mean()
loss = ret_dict[0].mean()
# cls_loss_reduced = ret_dict["cls_loss_reduced"].mean()
cls_loss_reduced = ret_dict[7].mean()
# loc_loss_reduced = ret_dict["loc_loss_reduced"].mean()
loc_loss_reduced = ret_dict[8].mean()
# cls_pos_loss = ret_dict["cls_pos_loss"]
cls_pos_loss = ret_dict[3]
# cls_neg_loss = ret_dict["cls_neg_loss"]
cls_neg_loss = ret_dict[4]
# loc_loss = ret_dict["loc_loss"]
loc_loss = ret_dict[2]
# cls_loss = ret_dict["cls_loss"]
cls_loss = ret_dict[1]
# dir_loss_reduced = ret_dict["dir_loss_reduced"]
dir_loss_reduced = ret_dict[6]
# cared = ret_dict["cared"]
cared = ret_dict[9]
# labels = example_torch["labels"]
labels = example_tuple[8]
if train_cfg.enable_mixed_precision:
loss *= loss_scale
loss.backward()
torch.nn.utils.clip_grad_norm_(net.parameters(), 10.0)
mixed_optimizer.step()
mixed_optimizer.zero_grad()
net.update_global_step()
net_metrics = net.update_metrics(cls_loss_reduced,
loc_loss_reduced, cls_preds,
labels, cared)
step_time = (time.time() - t)
t = time.time()
metrics = {}
num_pos = int((labels > 0)[0].float().sum().cpu().numpy())
num_neg = int((labels == 0)[0].float().sum().cpu().numpy())
# if 'anchors_mask' not in example_torch:
# num_anchors = example_torch['anchors'].shape[1]
# else:
# num_anchors = int(example_torch['anchors_mask'][0].sum())
num_anchors = int(example_tuple[7][0].sum())
global_step = net.get_global_step()
if global_step % display_step == 0:
loc_loss_elem = [
float(loc_loss[:, :, i].sum().detach().cpu().numpy() /
batch_size) for i in range(loc_loss.shape[-1])
]
metrics["step"] = global_step
metrics["steptime"] = step_time
metrics.update(net_metrics)
metrics["loss"] = {}
metrics["loss"]["loc_elem"] = loc_loss_elem
metrics["loss"]["cls_pos_rt"] = float(
cls_pos_loss.detach().cpu().numpy())
metrics["loss"]["cls_neg_rt"] = float(
cls_neg_loss.detach().cpu().numpy())
# if unlabeled_training:
# metrics["loss"]["diff_rt"] = float(
# diff_loc_loss_reduced.detach().cpu().numpy())
if model_cfg.use_direction_classifier:
metrics["loss"]["dir_rt"] = float(
dir_loss_reduced.detach().cpu().numpy())
# metrics["num_vox"] = int(example_torch["voxels"].shape[0])
metrics["num_vox"] = int(example_tuple[0].shape[0])
metrics["num_pos"] = int(num_pos)
metrics["num_neg"] = int(num_neg)
metrics["num_anchors"] = int(num_anchors)
metrics["lr"] = float(
mixed_optimizer.param_groups[0]['lr'])
# metrics["image_idx"] = example['image_idx'][0]
metrics["image_idx"] = example_tuple[11][0]
flatted_metrics = flat_nested_json_dict(metrics)
flatted_summarys = flat_nested_json_dict(metrics, "/")
for k, v in flatted_summarys.items():
if isinstance(v, (list, tuple)):
v = {str(i): e for i, e in enumerate(v)}
writer.add_scalars(k, v, global_step)
else:
writer.add_scalar(k, v, global_step)
metrics_str_list = []
for k, v in flatted_metrics.items():
if isinstance(v, float):
metrics_str_list.append(f"{k}={v:.3}")
elif isinstance(v, (list, tuple)):
if v and isinstance(v[0], float):
v_str = ', '.join([f"{e:.3}" for e in v])
metrics_str_list.append(f"{k}=[{v_str}]")
else:
metrics_str_list.append(f"{k}={v}")
else:
metrics_str_list.append(f"{k}={v}")
log_str = ', '.join(metrics_str_list)
print(log_str, file=logf)
print(log_str)
ckpt_elasped_time = time.time() - ckpt_start_time
if ckpt_elasped_time > train_cfg.save_checkpoints_secs:
torchplus.train.save_models(model_dir, [net, optimizer],
net.get_global_step())
ckpt_start_time = time.time()
total_step_elapsed += steps
torchplus.train.save_models(model_dir, [net, optimizer],
net.get_global_step())
# Ensure that all evaluation points are saved forever
torchplus.train.save_models(eval_checkpoint_dir, [net, optimizer], net.get_global_step(), max_to_keep=100)
# net.eval()
# result_path_step = result_path / f"step_{net.get_global_step()}"
# result_path_step.mkdir(parents=True, exist_ok=True)
# print("#################################")
# print("#################################", file=logf)
# print("# EVAL")
# print("# EVAL", file=logf)
# print("#################################")
# print("#################################", file=logf)
# print("Generate output labels...")
# print("Generate output labels...", file=logf)
# t = time.time()
# dt_annos = []
# prog_bar = ProgressBar()
# prog_bar.start(len(eval_dataset) // eval_input_cfg.batch_size + 1)
# for example in iter(eval_dataloader):
# example = example_convert_to_torch(example, float_dtype)
# # evaluation example:[0:'voxels', 1:'num_points', 2:'coordinates', 3:'rect',
# # 4:'Trv2c', 5:'P2',
# # 6:'anchors', 7:'anchors_mask', 8:'image_idx', 9:'image_shape']
# example_tuple = list(example.values())
# example_tuple[8] = torch.from_numpy(example_tuple[8])
# example_tuple[9] = torch.from_numpy(example_tuple[9])
# if pickle_result:
# dt_annos += predict_kitti_to_anno(
# net, example_tuple, class_names, center_limit_range,
# model_cfg.lidar_input)
# else:
# _predict_kitti_to_file(net, example, result_path_step,
# class_names, center_limit_range,
# model_cfg.lidar_input)
#
# prog_bar.print_bar()
#
# sec_per_ex = len(eval_dataset) / (time.time() - t)
# print(f"avg forward time per example: {net.avg_forward_time:.3f}")
# print(
# f"avg postprocess time per example: {net.avg_postprocess_time:.3f}"
# )
#
# net.clear_time_metrics()
# print(f'generate label finished({sec_per_ex:.2f}/s). start eval:')
# print(
# f'generate label finished({sec_per_ex:.2f}/s). start eval:',
# file=logf)
# gt_annos = [
# info["annos"] for info in eval_dataset.dataset.kitti_infos
# ]
# if not pickle_result:
# dt_annos = kitti.get_label_annos(result_path_step)
# result, mAPbbox, mAPbev, mAP3d, mAPaos = get_official_eval_result(gt_annos, dt_annos, class_names,
# return_data=True)
# print(result, file=logf)
# print(result)
# writer.add_text('eval_result', result, global_step)
#
# for i, class_name in enumerate(class_names):
# writer.add_scalar('bev_ap:{}'.format(class_name), mAPbev[i, 1, 0], global_step)
# writer.add_scalar('3d_ap:{}'.format(class_name), mAP3d[i, 1, 0], global_step)
# writer.add_scalar('aos_ap:{}'.format(class_name), mAPaos[i, 1, 0], global_step)
# writer.add_scalar('bev_map', np.mean(mAPbev[:, 1, 0]), global_step)
# writer.add_scalar('3d_map', np.mean(mAP3d[:, 1, 0]), global_step)
# writer.add_scalar('aos_map', np.mean(mAPaos[:, 1, 0]), global_step)
#
# result = get_coco_eval_result(gt_annos, dt_annos, class_names)
# print(result, file=logf)
# print(result)
# if pickle_result:
# with open(result_path_step / "result.pkl", 'wb') as f:
# pickle.dump(dt_annos, f)
# writer.add_text('eval_result', result, global_step)
# net.train()
except Exception as e:
torchplus.train.save_models(model_dir, [net, optimizer],
net.get_global_step())
logf.close()
raise e
# save model before exit
torchplus.train.save_models(model_dir, [net, optimizer],
net.get_global_step())
logf.close()
def _predict_kitti_to_file(net,
example,
result_save_path,
class_names,
center_limit_range=None,
lidar_input=False):
batch_image_shape = example['image_shape']
batch_imgidx = example['image_idx']
predictions_dicts = net(example)
# t = time.time()
for i, preds_dict in enumerate(predictions_dicts):
image_shape = batch_image_shape[i]
img_idx = preds_dict["image_idx"]
if preds_dict["bbox"] is not None:
box_2d_preds = preds_dict["bbox"].data.cpu().numpy()
box_preds = preds_dict["box3d_camera"].data.cpu().numpy()
scores = preds_dict["scores"].data.cpu().numpy()
box_preds_lidar = preds_dict["box3d_lidar"].data.cpu().numpy()
# write pred to file
box_preds = box_preds[:, [0, 1, 2, 4, 5, 3,
6]] # lhw->hwl(label file format)
label_preds = preds_dict["label_preds"].data.cpu().numpy()
# label_preds = np.zeros([box_2d_preds.shape[0]], dtype=np.int32)
result_lines = []
for box, box_lidar, bbox, score, label in zip(
box_preds, box_preds_lidar, box_2d_preds, scores,
label_preds):
if not lidar_input:
if bbox[0] > image_shape[1] or bbox[1] > image_shape[0]:
continue
if bbox[2] < 0 or bbox[3] < 0:
continue
# print(img_shape)
if center_limit_range is not None:
limit_range = np.array(center_limit_range)
if (np.any(box_lidar[:3] < limit_range[:3])
or np.any(box_lidar[:3] > limit_range[3:])):
continue
bbox[2:] = np.minimum(bbox[2:], image_shape[::-1])
bbox[:2] = np.maximum(bbox[:2], [0, 0])
result_dict = {
'name': class_names[int(label)],
'alpha': -np.arctan2(-box_lidar[1], box_lidar[0]) + box[6],
'bbox': bbox,
'location': box[:3],
'dimensions': box[3:6],
'rotation_y': box[6],
'score': score,
}
result_line = kitti.kitti_result_line(result_dict)
result_lines.append(result_line)
else:
result_lines = []
result_file = f"{result_save_path}/{kitti.get_image_index_str(img_idx)}.txt"
result_str = '\n'.join(result_lines)
with open(result_file, 'w') as f:
f.write(result_str)
def predict_kitti_to_anno(net,
example,
class_names,
center_limit_range=None,
lidar_input=False,
global_set=None):
# evaluation example:[0:'voxels', 1:'num_points', 2:'coordinates', 3:'rect',
# 4:'Trv2c', 5:'P2',
# 6:'anchors', 7:'anchors_mask', 8:'image_idx', 9:'image_shape']
# batch_image_shape = example['image_shape']
batch_image_shape = example[9]
batch_imgidx = example[8]
pillar_x = example[0][:,:,0].unsqueeze(0).unsqueeze(0)
pillar_y = example[0][:,:,1].unsqueeze(0).unsqueeze(0)
pillar_z = example[0][:,:,2].unsqueeze(0).unsqueeze(0)
pillar_i = example[0][:,:,3].unsqueeze(0).unsqueeze(0)
num_points_per_pillar = example[1].float().unsqueeze(0)
# Find distance of x, y, and z from pillar center
# assuming xyres_16.proto
coors_x = example[2][:, 3].float()
coors_y = example[2][:, 2].float()
x_sub = coors_x.unsqueeze(1) * 0.16 + 0.1
y_sub = coors_y.unsqueeze(1) * 0.16 + -39.9
ones = torch.ones([1, 100],dtype=torch.float32, device=pillar_x.device )
x_sub_shaped = torch.mm(x_sub, ones).unsqueeze(0).unsqueeze(0)
y_sub_shaped = torch.mm(y_sub, ones).unsqueeze(0).unsqueeze(0)
num_points_for_a_pillar = pillar_x.size()[3]
mask = get_paddings_indicator(num_points_per_pillar, num_points_for_a_pillar, axis=0)
mask = mask.permute(0, 2, 1)
mask = mask.unsqueeze(1)
mask = mask.type_as(pillar_x)
coors = example[2]
anchors = example[6]
anchors_mask = example[7]
anchors_mask = torch.as_tensor(anchors_mask, dtype=torch.uint8, device=pillar_x.device)
anchors_mask = anchors_mask.byte()
rect = example[3]
Trv2c = example[4]
P2 = example[5]
image_idx = example[8]
input = [pillar_x, pillar_y, pillar_z, pillar_i,
num_points_per_pillar, x_sub_shaped, y_sub_shaped, mask, coors,
anchors, anchors_mask, rect, Trv2c, P2, image_idx]
predictions_dicts = net(input)
# predictions_dict = {
# 0:"bbox": box_2d_preds,
# 1:"box3d_camera": final_box_preds_camera,
# 2:"box3d_lidar": final_box_preds,
# 3:"scores": final_scores,
# 4:"label_preds": label_preds,
# 5:"image_idx": img_idx,
# }
annos = []
for i, preds_dict in enumerate(predictions_dicts):
image_shape = batch_image_shape[i]
# img_idx = preds_dict["image_idx"]
img_idx = preds_dict[5]
# if preds_dict["bbox"] is not None:
if preds_dict[0] is not None:
# box_2d_preds = preds_dict["bbox"].detach().cpu().numpy()
box_2d_preds = preds_dict[0].detach().cpu().numpy()
# box_preds = preds_dict["box3d_camera"].detach().cpu().numpy()
box_preds = preds_dict[1].detach().cpu().numpy()
# scores = preds_dict["scores"].detach().cpu().numpy()
scores = preds_dict[3].detach().cpu().numpy()
# box_preds_lidar = preds_dict["box3d_lidar"].detach().cpu().numpy()
box_preds_lidar = preds_dict[2].detach().cpu().numpy()
# write pred to file
# label_preds = preds_dict["label_preds"].detach().cpu().numpy()
label_preds = preds_dict[4].detach().cpu().numpy()
# label_preds = np.zeros([box_2d_preds.shape[0]], dtype=np.int32)
anno = kitti.get_start_result_anno()
num_example = 0
for box, box_lidar, bbox, score, label in zip(
box_preds, box_preds_lidar, box_2d_preds, scores,
label_preds):
if not lidar_input:
if bbox[0] > image_shape[1] or bbox[1] > image_shape[0]:
continue
if bbox[2] < 0 or bbox[3] < 0:
continue
# print(img_shape)
if center_limit_range is not None:
limit_range = np.array(center_limit_range)
if (np.any(box_lidar[:3] < limit_range[:3])
or np.any(box_lidar[:3] > limit_range[3:])):
continue
image_shape = [image_shape[0], image_shape[1]]
bbox[2:] = np.minimum(bbox[2:], image_shape[::-1])
bbox[:2] = np.maximum(bbox[:2], [0, 0])
anno["name"].append(class_names[int(label)])
anno["truncated"].append(0.0)
anno["occluded"].append(0)
anno["alpha"].append(-np.arctan2(-box_lidar[1], box_lidar[0]) +
box[6])
anno["bbox"].append(bbox)
anno["dimensions"].append(box[3:6])
anno["location"].append(box[:3])
anno["rotation_y"].append(box[6])
if global_set is not None:
for i in range(100000):
if score in global_set:
score -= 1 / 100000
else:
global_set.add(score)
break
anno["score"].append(score)
num_example += 1
if num_example != 0:
anno = {n: np.stack(v) for n, v in anno.items()}
annos.append(anno)
else:
annos.append(kitti.empty_result_anno())
else:
annos.append(kitti.empty_result_anno())
num_example = annos[-1]["name"].shape[0]
annos[-1]["image_idx"] = np.array(
[img_idx] * num_example, dtype=np.int64)
return annos
def evaluate(config_path,
model_dir,
result_path=None,
predict_test=False,
ckpt_path=None,
ref_detfile=None,
pickle_result=True):
model_dir = pathlib.Path(model_dir)
if predict_test:
result_name = 'predict_test'
else:
result_name = 'eval_results'
if result_path is None:
result_path = model_dir / result_name
else:
result_path = pathlib.Path(result_path)
config = pipeline_pb2.TrainEvalPipelineConfig()
with open(config_path, "r") as f:
proto_str = f.read()
text_format.Merge(proto_str, config)
input_cfg = config.eval_input_reader
model_cfg = config.model.second
train_cfg = config.train_config
class_names = list(input_cfg.class_names)
center_limit_range = model_cfg.post_center_limit_range
######################
# BUILD VOXEL GENERATOR
######################
voxel_generator = voxel_builder.build(model_cfg.voxel_generator)
bv_range = voxel_generator.point_cloud_range[[0, 1, 3, 4]]
box_coder = box_coder_builder.build(model_cfg.box_coder)
target_assigner_cfg = model_cfg.target_assigner
target_assigner = target_assigner_builder.build(target_assigner_cfg,
bv_range, box_coder)
# net = second_builder.build(model_cfg, voxel_generator, target_assigner)
net = second_builder.build(model_cfg, voxel_generator, target_assigner, input_cfg.batch_size)
net.cuda()
if train_cfg.enable_mixed_precision:
net.half()
net.metrics_to_float()
net.convert_norm_to_float(net)
if ckpt_path is None:
torchplus.train.try_restore_latest_checkpoints(model_dir, [net])
else:
torchplus.train.restore(ckpt_path, net)
eval_dataset = input_reader_builder.build(
input_cfg,
model_cfg,
training=False,
voxel_generator=voxel_generator,
target_assigner=target_assigner)
eval_dataloader = torch.utils.data.DataLoader(
eval_dataset,
batch_size=input_cfg.batch_size,
shuffle=False,
num_workers=input_cfg.num_workers,
pin_memory=False,
collate_fn=merge_second_batch)
if train_cfg.enable_mixed_precision:
float_dtype = torch.float16
else:
float_dtype = torch.float32
net.eval()
result_path_step = result_path / f"step_{net.get_global_step()}"
result_path_step.mkdir(parents=True, exist_ok=True)
t = time.time()
dt_annos = []
global_set = None
print("Generate output labels...")
bar = ProgressBar()
bar.start(len(eval_dataset) // input_cfg.batch_size + 1)
for example in iter(eval_dataloader):
# evaluation example:[0:'voxels', 1:'num_points', 2:'coordinates', 3:'rect',
# 4:'Trv2c', 5:'P2',
# 6:'anchors', 7:'anchors_mask', 8:'image_idx', 9:'image_shape']
example = example_convert_to_torch(example, float_dtype)
example_tuple = list(example.values())
example_tuple[8] = torch.from_numpy(example_tuple[8])
example_tuple[9] = torch.from_numpy(example_tuple[9])
if(example_tuple[6].size()[0] != input_cfg.batch_size):
continue
if pickle_result:
dt_annos += predict_kitti_to_anno(
net, example_tuple, class_names, center_limit_range,
model_cfg.lidar_input, global_set)
# print("shut train/py L703")
# return 0
else:
_predict_kitti_to_file(net, example, result_path_step, class_names,
center_limit_range, model_cfg.lidar_input)
bar.print_bar()
sec_per_example = len(eval_dataset) / (time.time() - t)
print(f'generate label finished({sec_per_example:.2f}/s). start eval:')
print(f"avg forward time per example: {net.avg_forward_time:.3f}")
print(f"avg postprocess time per example: {net.avg_postprocess_time:.3f}")
if not predict_test:
gt_annos = [info["annos"] for info in eval_dataset.dataset.kitti_infos]
if(len(gt_annos)%2 != 0):
del gt_annos[-1]
if not pickle_result:
dt_annos = kitti.get_label_annos(result_path_step)
result = get_official_eval_result(gt_annos, dt_annos, class_names)
print(result)
result = get_coco_eval_result(gt_annos, dt_annos, class_names)
print(result)
if pickle_result:
with open(result_path_step / "result.pkl", 'wb') as f:
pickle.dump(dt_annos, f)
def prediction_once(net,
example,
class_names,
batch_image_shape,
center_limit_range=None,
lidar_input=False,
global_set=None):
# predictions_dicts = net(example)
# input_names = ['voxels', 'num_points', 'coordinates', 'rect', 'Trv2c', 'P2',
# 'anchors', 'anchors_mask', 'labels', 'image_idx', 'image_shape']
pillar_x = example[0][:,:,0].unsqueeze(0).unsqueeze(0)
pillar_y = example[0][:,:,1].unsqueeze(0).unsqueeze(0)
pillar_z = example[0][:,:,2].unsqueeze(0).unsqueeze(0)
pillar_i = example[0][:,:,3].unsqueeze(0).unsqueeze(0)
num_points_per_pillar = example[1].float().unsqueeze(0)
# Find distance of x, y, and z from pillar center
# assuming xyres_16.proto
coors_x = example[2][:, 3].float()
coors_y = example[2][:, 2].float()
x_sub = coors_x.unsqueeze(1) * 0.16 + 0.1
y_sub = coors_y.unsqueeze(1) * 0.16 + -39.9
ones = torch.ones([1, 100],dtype=torch.float32, device=pillar_x.device )
x_sub_shaped = torch.mm(x_sub, ones).unsqueeze(0).unsqueeze(0)
y_sub_shaped = torch.mm(y_sub, ones).unsqueeze(0).unsqueeze(0)
num_points_for_a_pillar = pillar_x.size()[3]
mask = get_paddings_indicator(num_points_per_pillar, num_points_for_a_pillar, axis=0)
mask = mask.permute(0, 2, 1)
mask = mask.unsqueeze(1)
mask = mask.type_as(pillar_x)
coors = example[2]
print(pillar_x.size())
print(pillar_y.size())
print(pillar_z.size())
print(pillar_i.size())
print(num_points_per_pillar.size())
print(x_sub_shaped.size())
print(y_sub_shaped.size())
print(mask.size())
# input = [pillar_x, pillar_y, pillar_z, pillar_i, num_points_per_pillar, x_sub_shaped, y_sub_shaped, mask, coors]
# predictions_dicts = net(input)
# return 0
input_names = ["pillar_x", "pillar_y", "pillar_z", "pillar_i", "num_points_per_pillar", "x_sub_shaped", "y_sub_shaped", "mask"]
# input_names = ["pillar_x", "pillar_y", "pillar_z"]
# pillar_x = torch.ones([1, 8599, 100, 1],dtype=torch.float32, device=pillar_x.device )
# pillar_y = torch.ones([1, 8599, 100, 1],dtype=torch.float32, device=pillar_x.device )
# pillar_z = torch.ones([1, 8599, 100, 1],dtype=torch.float32, device=pillar_x.device )
# pillar_i = torch.ones([1, 8599, 100, 1],dtype=torch.float32, device=pillar_x.device )
# num_points_per_pillar = torch.ones([1, 8599],dtype=torch.float32, device=pillar_x.device )
# x_sub_shaped = torch.ones([1, 8599, 100, 1],dtype=torch.float32, device=pillar_x.device )
# y_sub_shaped = torch.ones([1, 8599, 100, 1],dtype=torch.float32, device=pillar_x.device )
# mask = torch.ones([1, 8599, 100, 1],dtype=torch.float32, device=pillar_x.device )
# wierd conv
pillar_x = torch.ones([1, 1, 12000, 100],dtype=torch.float32, device=pillar_x.device )
pillar_y = torch.ones([1, 1, 12000, 100],dtype=torch.float32, device=pillar_x.device )
pillar_z = torch.ones([1, 1, 12000, 100],dtype=torch.float32, device=pillar_x.device )
pillar_i = torch.ones([1, 1, 12000, 100],dtype=torch.float32, device=pillar_x.device )
num_points_per_pillar = torch.ones([1, 12000],dtype=torch.float32, device=pillar_x.device )
x_sub_shaped = torch.ones([1, 1, 12000, 100],dtype=torch.float32, device=pillar_x.device )
y_sub_shaped = torch.ones([1, 1, 12000, 100],dtype=torch.float32, device=pillar_x.device )
mask = torch.ones([1, 1, 12000, 100],dtype=torch.float32, device=pillar_x.device )
# deconv
# pillar_x = torch.ones([1, 100, 8599, 1],dtype=torch.float32, device=pillar_x.device )
# pillar_y = torch.ones([1, 100, 8599, 1],dtype=torch.float32, device=pillar_x.device )
# pillar_z = torch.ones([1, 100, 8599, 1],dtype=torch.float32, device=pillar_x.device )
# pillar_i = torch.ones([1, 100, 8599, 1],dtype=torch.float32, device=pillar_x.device )
# num_points_per_pillar = torch.ones([1, 8599],dtype=torch.float32, device=pillar_x.device )
# x_sub_shaped = torch.ones([1, 100,8599, 1],dtype=torch.float32, device=pillar_x.device )
# y_sub_shaped = torch.ones([1, 100,8599, 1],dtype=torch.float32, device=pillar_x.device )
# mask = torch.ones([1, 100, 8599, 1],dtype=torch.float32, device=pillar_x.device )
example1 = (pillar_x, pillar_y, pillar_z, pillar_i, num_points_per_pillar, x_sub_shaped, y_sub_shaped, mask)
# example1 = [pillar_x, pillar_y, pillar_z]
# example1 = [pillar_x, pillar_y, pillar_z, pillar_i, num_points, mask]
torch.onnx.export(net.voxel_feature_extractor, example1, "pp.onnx", verbose=False, input_names = input_names)
sp_f = torch.ones([1, 64, 496, 432],dtype=torch.float32, device=pillar_x.device )
torch.onnx.export(net.rpn, sp_f, "rpn.onnx", verbose=False)
return 0
def inference(config_path,
model_dir,
result_path=None,
predict_test=False,
ckpt_path=None,
ref_detfile=None):
model_dir = pathlib.Path(model_dir)
if predict_test:
result_name = 'predict_test'
else:
result_name = 'eval_results'
if result_path is None:
result_path = model_dir / result_name
else:
result_path = pathlib.Path(result_path)
config = pipeline_pb2.TrainEvalPipelineConfig()
with open(config_path, "r") as f:
proto_str = f.read()
text_format.Merge(proto_str, config)
input_cfg = config.eval_input_reader
model_cfg = config.model.second
train_cfg = config.train_config
class_names = list(input_cfg.class_names)
center_limit_range = model_cfg.post_center_limit_range
######################
# BUILD VOXEL GENERATOR
######################
voxel_generator = voxel_builder.build(model_cfg.voxel_generator)
bv_range = voxel_generator.point_cloud_range[[0, 1, 3, 4]]
box_coder = box_coder_builder.build(model_cfg.box_coder)
target_assigner_cfg = model_cfg.target_assigner
target_assigner = target_assigner_builder.build(target_assigner_cfg,
bv_range, box_coder)
# net = second_builder.build(model_cfg, voxel_generator, target_assigner)
batch_size = 1
num_worker = 1
net = second_builder.build(model_cfg, voxel_generator, target_assigner, 1)
net.cuda()
if train_cfg.enable_mixed_precision:
net.half()
net.metrics_to_float()
net.convert_norm_to_float(net)
if ckpt_path is None:
torchplus.train.try_restore_latest_checkpoints(model_dir, [net])
else:
torchplus.train.restore(ckpt_path, net)
eval_dataset = input_reader_builder.build(
input_cfg,
model_cfg,
training=False,
voxel_generator=voxel_generator,
target_assigner=target_assigner)
eval_dataloader = torch.utils.data.DataLoader(
eval_dataset,
batch_size=1,
shuffle=False,
num_workers=1,
pin_memory=False,
collate_fn=merge_second_batch)
if train_cfg.enable_mixed_precision:
float_dtype = torch.float16
else:
float_dtype = torch.float32
net.eval()
result_path_step = result_path / f"step_{net.get_global_step()}"
result_path_step.mkdir(parents=True, exist_ok=True)
t = time.time()
dt_annos = []
global_set = None
print("Generate output labels...")
bar = ProgressBar()
bar.start(len(eval_dataset) // input_cfg.batch_size + 1)
for example in iter(eval_dataloader):
example = example_convert_to_torch(example, float_dtype)
#torch.onnx.export(net, example, "pp.onnx", verbose=False)
#return 0
# predictions_dicts = net(example)
example_tuple = list(example.values())
batch_image_shape = example_tuple[8]
example_tuple[8] = torch.from_numpy(example_tuple[8])
example_tuple[9] = torch.from_numpy(example_tuple[9])
# print("before", example)
dt_annos = prediction_once(
net, example_tuple, class_names, batch_image_shape, center_limit_range,
model_cfg.lidar_input, global_set)
return 0
bar.print_bar()
if __name__ == '__main__':
fire.Fire()
| 43.927363 | 131 | 0.574603 |
4a1dc202d2dc676f2573bfb84a86b41a4e07e3ac | 5,327 | py | Python | Proyecto_parqueadero/Proyecto_parqueadero.py | Erikarodriguez22/Clasealgoritmosyprogramacion | 518b5d49bbb6e4217f1395117f67a1f412870987 | [
"MIT"
] | 1 | 2021-09-18T19:06:14.000Z | 2021-09-18T19:06:14.000Z | Proyecto_parqueadero/Proyecto_parqueadero.py | Erikarodriguez22/Clasealgoritmosyprogramacion | 518b5d49bbb6e4217f1395117f67a1f412870987 | [
"MIT"
] | null | null | null | Proyecto_parqueadero/Proyecto_parqueadero.py | Erikarodriguez22/Clasealgoritmosyprogramacion | 518b5d49bbb6e4217f1395117f67a1f412870987 | [
"MIT"
] | null | null | null | """"
Entradas:
Nombre-->str-->n
Numerocedula-->int-->cd
tipodevehiculo-->int-->tipo
Registroplaca-->str-->placa
Salida:
valorapagar-->int-->vpag
tipoparqueadero-->int-->tipo
valorapagarcondescuento-->desc
lugardeparqueo-->int-->parqueo
"""
print("¡¡BIENVENIDO AL PARQUEADERO DE LA UNIVERSIDAD EAN!! 🥳 🤓")
usuario=int(input("Si eres empleado del parqueadero ingresa 1 si eres usuario 0:"))
if(usuario==1):
empleado=(input("Digite Usuario: "))
contraseña=((input("Digite contraseña:")))
lista=["erika","1234"]
while True:
empleado=input("Digite Usuario: ")
contraseña=input("Digite contraseña: ")
if(empleado==lista[0]):
if(contraseña==lista[1]):
print("Bienvenido 🎉")
break
#Validación........................................
if(usuario==0):
n=input("Ingrese su nombre completo: ")
#Validación...................................................
while True:
cd=input("Ingrese su número de cédula: ")
try:
cd=int(cd)
if(cd>0):
break
else:
print("El número de cedula debe ser positivo⚠️")
except:
print("Ingrese solo numero su cédula ⚠️")
#----------------------------------------------------
while True:
tipo=input("1-->para carro 🚗, 2-->para moto 🏍️, 3-->para bicicleta🚲: ")
try:
tipo=int(tipo)
if(tipo==1 or tipo==2 or tipo==3):
break
else:
print("Número fuera del rango, ingrese un número nuevamente ⚠️")
except:
print("Ingrese solo números del 1 al 3")
if(tipo==1 or tipo==2):
placa=(input("Ingrese placa: "))
print(placa[0],placa[1],placa[2])
print(placa[3],placa[4],placa[5])
else:
registro=(input("Ingrese número de registro: "))
print(registro[0],registro[1],registro[2])
print(registro[3],registro[4],registro[5])
print(registro[6],registro[7])
#carro: 3 letras y 3 números
#moto: 3 letras y 3 números
#moto: 3 letras 2 números
#Moto: 3letras 2 números 1 letra
#bicicletas: 8 numeros
import time
#tiempo entrada carro
tiempo1_min=time.gmtime()
he=(tiempo1_min[3]*60)
me=tiempo1_min[4]
he_min=me+he
#ingreso
localtime=time.asctime( time.localtime(time.time()))
print("Hora ingreso 🕐 :", localtime)
cc=int(input("Ingrese su CC para saber si cuenta con un descuento 🤑 💲: "))
listaCc=[1019134469,1019134469,1019134469,1019134469]
listaCc.append(cc)
#cuentas cedulas hayprint(listaCc.count(cc))
if(listaCc.count(cc)==5):
print("tiene descuento del 20% por su fidelidad 💲",cc)
else:
print("no tine descuento♦️: ",cc)
"""
#Lugar para parquear:
parqueo=("Ingrese el piso en el que desea parquear:")
listaparp1c=[101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,160]
listaparp1c.append()
listaparqp1m=[161,162,163,164,165,166,167,168,169,170]
listaparqp1m.append()
listaparp1b=[171,172,173,174,175,176,177,178,179,180]
listaparqp1b.append()
listaparqueop2c=[201,202,203,204,205,206,207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,239,240,241,242,243,244,245,246,247,248,249,250,251,252,253,254,255,256,257,258,259,260]
listaparqueop2m=[261,262,263,264,265,266,267,268,269,270]
listaparqueop2b=[271,272,273,274,275,276,277,278,279,280]
listaparqueop3=[301,302,303],304,305,306,307,308,309,310,311,312,313,314,315,316,317,318,319,320,321,322,323,324,325,326,327,328,329,330,331,332,333,334,335,336,337,338,339,340,341,342,343,344,345,346,347,348,349,350,351,352,353,354,355,356,357,358,359,360]
listaparqueop3m=[361,362,363,364,365,366,367,368,369,370]
listaparqueop3b=[371,372,373,374,375,376,377,378,379,380]
"""
#Total a pagar
import time
tiempo2_min=time.gmtime()
hs=(tiempo2_min[3]*60)
ms=tiempo2_min[4]
hs_min=me+he
x=hs-he
if(x<0):
if(listaCc.count(cc)==5):
if(tipo==1):
tp=he_min*100-(he_min*100*0.2)
print("Total a pagar "+str(tp)+" COP")
elif(tipo==2):
tp=he_min*70-(he_min*70*0.2)
print("Total a pagar "+str(tp)+" COP")
else:
tp=he_min*60-(he_min*60*0.2)
print("Total a pagar "+str(tp)+" COP")
else:
if(tipo==1):
tp=he_min*100
print("Total a pagar "+str(tp)+" COP")
elif(tipo==2):
tp=he_min*70
print("Total a pagar "+str(tp)+" COP")
else:
tp=he_min*60
print("Total a pagar "+str(tp)+" COP")
else:
if(listaCc.count(cc)==5):
if(tipo==1):
tp=(hs_min-he_min)*100-((hs_min-he_min)*100*0.2)
print("Total a pagar "+str(tp)+" COP")
elif(tipo==2):
tp=(hs_min-he_min)*70-((hs_min-he_min)*70*0.2)
print("Total a pagar "+str(tp)+" COP")
else:
tp=(hs_min-he_min)*60-((hs_min-he_min)*60*0.2)
print("Total a pagar "+str(tp)+" COP")
else:
if(tipo==1):
tp=(hs_min-he_min)*100
print("Total a pagar "+str(tp)+" COP")
elif(tipo==2):
tp=(hs_min-he_min)*70
print("Total a pagar "+str(tp)+" COP")
else:
tp=(hs_min-he_min)*60
print("Total a pagar "+str(tp)+" COP")
print("Dirijase al cajero para realizar el pago💵 💸")
print("✨ Muchas gracias✨")
print("ESPERAMOS LE HAYA GUSTADO NUESTRO SERVICIO 🤝🏻")
print("¡¡¡VUELVA PRONTO!!!, FELIZ DÍA 🤗 😁 ")
| 30.44 | 257 | 0.635067 |
4a1dc255bf27523e4ff72381c2d0890bcdf89fb2 | 23 | py | Python | samtranslator/__init__.py | Rondineli/serverless-application-model | 895fcd5818cf7bcef1fe24afc3fd13b1eb796e17 | [
"Apache-2.0"
] | 1 | 2021-11-09T10:23:05.000Z | 2021-11-09T10:23:05.000Z | samtranslator/__init__.py | Rondineli/serverless-application-model | 895fcd5818cf7bcef1fe24afc3fd13b1eb796e17 | [
"Apache-2.0"
] | null | null | null | samtranslator/__init__.py | Rondineli/serverless-application-model | 895fcd5818cf7bcef1fe24afc3fd13b1eb796e17 | [
"Apache-2.0"
] | null | null | null | __version__ = "1.36.0"
| 11.5 | 22 | 0.652174 |
4a1dc4610a5aef21bcaaca3838652a385228fad5 | 289 | py | Python | molecule/ua/tests/test_default.py | GSA/datagov-deploy-common | 89371c869477ae9ddc9596c4c9248dd32b655fe6 | [
"CC0-1.0"
] | 3 | 2019-03-25T04:36:13.000Z | 2022-03-09T16:48:16.000Z | molecule/ua/tests/test_default.py | GSA/datagov-deploy-common | 89371c869477ae9ddc9596c4c9248dd32b655fe6 | [
"CC0-1.0"
] | 31 | 2019-04-02T19:58:51.000Z | 2021-09-23T23:34:44.000Z | molecule/ua/tests/test_default.py | GSA/datagov-deploy-common | 89371c869477ae9ddc9596c4c9248dd32b655fe6 | [
"CC0-1.0"
] | 2 | 2019-10-31T21:01:46.000Z | 2020-04-24T13:55:49.000Z | import os
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')
def test_ua_package(host):
package = host.package("ubuntu-advantage-tools")
assert package.is_installed
| 22.230769 | 63 | 0.785467 |
4a1dc586bea2fbc2e9ca442424b96944f701c733 | 1,319 | py | Python | flask/test/data/setup_tables.py | spaudanjo/boxtribute | c29311a38721277a07b5e1dbcf36cf12bcd8b727 | [
"Apache-2.0"
] | null | null | null | flask/test/data/setup_tables.py | spaudanjo/boxtribute | c29311a38721277a07b5e1dbcf36cf12bcd8b727 | [
"Apache-2.0"
] | null | null | null | flask/test/data/setup_tables.py | spaudanjo/boxtribute | c29311a38721277a07b5e1dbcf36cf12bcd8b727 | [
"Apache-2.0"
] | null | null | null | from data.base import create_default_bases
from data.box import create_default_box
from data.box_state import create_default_box_state
from data.location import create_default_location
from data.organisation import create_default_organisation
from data.product import create_default_product
from data.product_category import create_default_product_category
from data.product_gender import create_default_product_gender
from data.qr_code import create_default_qr_code, create_qr_code_without_box
from data.size_range import create_default_size_range
from data.user import create_default_users
from data.usergroup import create_default_usergroup
from data.usergroup_access_level import create_default_usergroup_access_level
from data.usergroup_base_access import create_default_usergroup_base_access_list
def setup_tables():
create_default_bases()
create_default_box()
create_default_box_state()
create_default_location()
create_default_organisation()
create_default_qr_code()
create_qr_code_without_box()
create_default_users()
create_default_usergroup()
create_default_usergroup_access_level()
create_default_usergroup_base_access_list()
create_default_product()
create_default_product_category()
create_default_product_gender()
create_default_size_range()
| 39.969697 | 80 | 0.8605 |
4a1dc6d2825fbccd6933d98485bebd6ceb241389 | 3,888 | py | Python | tensorflow_model_analysis/eval_saved_model/example_trainers/linear_classifier.py | hakanhp/chanel | 6825b60e86c46daabb18f40f1e45d3de2ff8e983 | [
"Apache-2.0"
] | null | null | null | tensorflow_model_analysis/eval_saved_model/example_trainers/linear_classifier.py | hakanhp/chanel | 6825b60e86c46daabb18f40f1e45d3de2ff8e983 | [
"Apache-2.0"
] | null | null | null | tensorflow_model_analysis/eval_saved_model/example_trainers/linear_classifier.py | hakanhp/chanel | 6825b60e86c46daabb18f40f1e45d3de2ff8e983 | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Trains and exports a simple linear classifier.
The true model is language == 'english'.
The model has the standard metrics added by LinearClassifier, plus additional
metrics added using tf.contrib.estimator.
This model also extracts an additional slice_key feature for evaluation
(this feature is not used in training).
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow_model_analysis.eval_saved_model import export
def simple_linear_classifier(export_path, eval_export_path):
"""Trains and exports a simple linear classifier."""
def eval_input_receiver_fn():
"""Eval input receiver function."""
serialized_tf_example = tf.placeholder(
dtype=tf.string, shape=[None], name='input_example_tensor')
language = tf.contrib.layers.sparse_column_with_keys(
'language', ['english', 'chinese'])
slice_key = tf.contrib.layers.sparse_column_with_hash_bucket(
'slice_key', 100)
age = tf.contrib.layers.real_valued_column('age')
label = tf.contrib.layers.real_valued_column('label')
all_features = [age, language, label, slice_key]
feature_spec = tf.contrib.layers.create_feature_spec_for_parsing(
all_features)
receiver_tensors = {'examples': serialized_tf_example}
features = tf.parse_example(serialized_tf_example, feature_spec)
return export.EvalInputReceiver(
features=features,
receiver_tensors=receiver_tensors,
labels=features['label'])
def input_fn():
"""Train input function."""
return {
'age':
tf.constant([[1], [2], [3], [4]]),
'language':
tf.SparseTensor(
values=['english', 'english', 'chinese', 'chinese'],
indices=[[0, 0], [1, 0], [2, 0], [3, 0]],
dense_shape=[4, 1])
}, tf.constant([[1], [1], [0], [0]])
language = tf.contrib.layers.sparse_column_with_keys('language',
['english', 'chinese'])
age = tf.contrib.layers.real_valued_column('age')
all_features = [age, language] # slice_key not used in training.
feature_spec = tf.contrib.layers.create_feature_spec_for_parsing(all_features)
def my_metrics(features, labels, predictions):
return {
'my_mean_prediction': tf.metrics.mean(predictions['logistic']),
'my_mean_age': tf.metrics.mean(features['age']),
'my_mean_label': tf.metrics.mean(labels),
'my_mean_age_times_label': tf.metrics.mean(labels * features['age']),
}
classifier = tf.estimator.LinearClassifier(feature_columns=all_features)
classifier = tf.contrib.estimator.add_metrics(classifier, my_metrics)
classifier.train(input_fn=input_fn, steps=1000)
export_dir = None
eval_export_dir = None
if export_path:
export_dir = classifier.export_savedmodel(
export_dir_base=export_path,
serving_input_receiver_fn=tf.estimator.export.
build_parsing_serving_input_receiver_fn(feature_spec))
if eval_export_path:
eval_export_dir = export.export_eval_savedmodel(
estimator=classifier,
export_dir_base=eval_export_path,
eval_input_receiver_fn=eval_input_receiver_fn)
return export_dir, eval_export_dir
| 37.028571 | 80 | 0.709362 |
4a1dc8c3187291f9843e2385e5074b0f232dbc1b | 315 | py | Python | src/test/python/unit/collector/test_const.py | ettoreleandrotognoli/etto-robot | 602b6c00ac925ccdbf33e60f06feb5835c246d31 | [
"Apache-2.0"
] | null | null | null | src/test/python/unit/collector/test_const.py | ettoreleandrotognoli/etto-robot | 602b6c00ac925ccdbf33e60f06feb5835c246d31 | [
"Apache-2.0"
] | 6 | 2020-12-17T10:19:15.000Z | 2021-03-31T23:23:19.000Z | src/test/python/unit/collector/test_const.py | ettoreleandrotognoli/etto-robot | 602b6c00ac925ccdbf33e60f06feb5835c246d31 | [
"Apache-2.0"
] | 1 | 2021-08-30T20:38:00.000Z | 2021-08-30T20:38:00.000Z | from aiounittest import AsyncTestCase
from robot.collector.shortcut import *
class ConstCollectorTest(AsyncTestCase):
async def test_const(self):
any_value = 'any value'
collector = const(any_value)
_, result = await collector(None, None)
self.assertEqual(result, any_value)
| 24.230769 | 47 | 0.707937 |
4a1dc8fe6c6558a3a3083b5f3b48d93812b13e68 | 7,818 | py | Python | nova/tests/console/test_console.py | bopopescu/nova_vmware_compute_driver | 60d3936b68030647b9f11970c9e0d060fc286dd9 | [
"Apache-2.0"
] | null | null | null | nova/tests/console/test_console.py | bopopescu/nova_vmware_compute_driver | 60d3936b68030647b9f11970c9e0d060fc286dd9 | [
"Apache-2.0"
] | null | null | null | nova/tests/console/test_console.py | bopopescu/nova_vmware_compute_driver | 60d3936b68030647b9f11970c9e0d060fc286dd9 | [
"Apache-2.0"
] | 2 | 2019-07-08T22:12:35.000Z | 2020-07-24T08:27:24.000Z | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2010 OpenStack, LLC.
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests For Console proxy."""
from nova.console import api as console_api
from nova.console import rpcapi as console_rpcapi
from nova import context
from nova import db
from nova import exception
from nova.openstack.common import cfg
from nova.openstack.common import importutils
from nova.openstack.common import rpc
from nova import test
CONF = cfg.CONF
CONF.import_opt('console_manager', 'nova.config')
CONF.import_opt('console_driver', 'nova.console.manager')
class ConsoleTestCase(test.TestCase):
"""Test case for console proxy manager"""
def setUp(self):
super(ConsoleTestCase, self).setUp()
self.flags(console_driver='nova.console.fake.FakeConsoleProxy',
stub_compute=True)
self.console = importutils.import_object(CONF.console_manager)
self.user_id = 'fake'
self.project_id = 'fake'
self.context = context.RequestContext(self.user_id, self.project_id)
self.host = 'test_compute_host'
def _create_instance(self):
"""Create a test instance"""
inst = {}
#inst['host'] = self.host
#inst['name'] = 'instance-1234'
inst['image_id'] = 1
inst['reservation_id'] = 'r-fakeres'
inst['launch_time'] = '10'
inst['user_id'] = self.user_id
inst['project_id'] = self.project_id
inst['instance_type_id'] = 1
inst['ami_launch_index'] = 0
return db.instance_create(self.context, inst)
def test_get_pool_for_instance_host(self):
pool = self.console.get_pool_for_instance_host(self.context,
self.host)
self.assertEqual(pool['compute_host'], self.host)
def test_get_pool_creates_new_pool_if_needed(self):
self.assertRaises(exception.NotFound,
db.console_pool_get_by_host_type,
self.context,
self.host,
self.console.host,
self.console.driver.console_type)
pool = self.console.get_pool_for_instance_host(self.context,
self.host)
pool2 = db.console_pool_get_by_host_type(self.context,
self.host,
self.console.host,
self.console.driver.console_type)
self.assertEqual(pool['id'], pool2['id'])
def test_get_pool_does_not_create_new_pool_if_exists(self):
pool_info = {'address': '127.0.0.1',
'username': 'test',
'password': '1234pass',
'host': self.console.host,
'console_type': self.console.driver.console_type,
'compute_host': 'sometesthostname'}
new_pool = db.console_pool_create(self.context, pool_info)
pool = self.console.get_pool_for_instance_host(self.context,
'sometesthostname')
self.assertEqual(pool['id'], new_pool['id'])
def test_add_console(self):
instance = self._create_instance()
self.console.add_console(self.context, instance['id'])
instance = db.instance_get(self.context, instance['id'])
pool = db.console_pool_get_by_host_type(self.context,
instance['host'], self.console.host,
self.console.driver.console_type)
console_instances = [con['instance_uuid'] for con in pool['consoles']]
self.assert_(instance['uuid'] in console_instances)
db.instance_destroy(self.context, instance['uuid'])
def test_add_console_does_not_duplicate(self):
instance = self._create_instance()
cons1 = self.console.add_console(self.context, instance['id'])
cons2 = self.console.add_console(self.context, instance['id'])
self.assertEqual(cons1, cons2)
db.instance_destroy(self.context, instance['uuid'])
def test_remove_console(self):
instance = self._create_instance()
console_id = self.console.add_console(self.context, instance['id'])
self.console.remove_console(self.context, console_id)
self.assertRaises(exception.NotFound,
db.console_get,
self.context,
console_id)
db.instance_destroy(self.context, instance['uuid'])
class ConsoleAPITestCase(test.TestCase):
"""Test case for console API"""
def setUp(self):
super(ConsoleAPITestCase, self).setUp()
self.context = context.RequestContext('fake', 'fake')
self.console_api = console_api.API()
self.fake_uuid = '00000000-aaaa-bbbb-cccc-000000000000'
self.fake_instance = {
'id': 1,
'uuid': self.fake_uuid,
'host': 'fake_host'
}
self.fake_console = {
'pool': {'host': 'fake_host'},
'id': 'fake_id'
}
def _fake_cast(_ctxt, _topic, _msg):
pass
self.stubs.Set(rpc, 'cast', _fake_cast)
def _fake_db_console_get(_ctxt, _console_uuid, _instance_uuid):
return self.fake_console
self.stubs.Set(db, 'console_get', _fake_db_console_get)
def _fake_db_console_get_all_by_instance(_ctxt, _instance_uuid):
return [self.fake_console]
self.stubs.Set(db, 'console_get_all_by_instance',
_fake_db_console_get_all_by_instance)
def _fake_instance_get_by_uuid(_ctxt, _instance_uuid):
return self.fake_instance
self.stubs.Set(db, 'instance_get_by_uuid', _fake_instance_get_by_uuid)
def test_get_consoles(self):
console = self.console_api.get_consoles(self.context, self.fake_uuid)
self.assertEqual(console, [self.fake_console])
def test_get_console(self):
console = self.console_api.get_console(self.context, self.fake_uuid,
'fake_id')
self.assertEqual(console, self.fake_console)
def test_delete_console(self):
self.mox.StubOutWithMock(console_rpcapi.ConsoleAPI, 'remove_console')
console_rpcapi.ConsoleAPI.remove_console(self.context, 'fake_id')
self.mox.ReplayAll()
self.console_api.delete_console(self.context, self.fake_uuid,
'fake_id')
def test_create_console(self):
self.mox.StubOutWithMock(console_rpcapi.ConsoleAPI, 'add_console')
console_rpcapi.ConsoleAPI.add_console(self.context,
self.fake_instance['id'])
self.mox.ReplayAll()
self.console_api.create_console(self.context, self.fake_uuid)
def test_get_backdoor_port(self):
self.mox.StubOutWithMock(console_rpcapi.ConsoleAPI,
'get_backdoor_port')
console_rpcapi.ConsoleAPI.get_backdoor_port(self.context, 'fake_host')
self.mox.ReplayAll()
self.console_api.get_backdoor_port(self.context, 'fake_host')
| 39.484848 | 78 | 0.629957 |
4a1dc912fac52d8e0b02f8c21c45410cc382b487 | 391 | py | Python | Diploma/wsgi.py | imuno/Diploma | 8d41dd8e09492900b72339fb5a1c753334deb1bc | [
"MIT"
] | 3 | 2019-09-07T15:01:53.000Z | 2020-01-15T09:17:47.000Z | Diploma/wsgi.py | imuno/Diploma | 8d41dd8e09492900b72339fb5a1c753334deb1bc | [
"MIT"
] | 22 | 2020-06-05T22:53:41.000Z | 2022-03-11T23:58:42.000Z | Diploma/wsgi.py | imuno/Diploma | 8d41dd8e09492900b72339fb5a1c753334deb1bc | [
"MIT"
] | 2 | 2020-01-15T09:14:33.000Z | 2020-10-25T19:02:53.000Z | """
WSGI config for Diploma project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'Diploma.settings')
application = get_wsgi_application()
| 23 | 78 | 0.785166 |
4a1dc9501d23ccdbc912598419b70cefdc4459e3 | 387 | py | Python | btplda/category_explainer.py | RaviSoji/btplda | 8e25e1863ae249b39d898b4435fb5d4451b3d1cd | [
"Apache-2.0"
] | null | null | null | btplda/category_explainer.py | RaviSoji/btplda | 8e25e1863ae249b39d898b4435fb5d4451b3d1cd | [
"Apache-2.0"
] | null | null | null | btplda/category_explainer.py | RaviSoji/btplda | 8e25e1863ae249b39d898b4435fb5d4451b3d1cd | [
"Apache-2.0"
] | null | null | null | # class Explainer:
# def __init__(self, fitted_plda_classifier):
# self.model =
#
#
# def explain_category(self, category, n_ts=5, ts_sz=1):
# assert category in self.model.get_categories
#
# teaching_sets =
#
# highlighter = FeatureHighlighter(
# highlighted, filters =
#
# return teaching_sets, highlighted_teaching_sets
| 25.8 | 60 | 0.633075 |
4a1dcaae4c90effd4b5fd01b9dab45a79c0e0f64 | 1,877 | py | Python | src/modules/report/base.py | sakshamtaneja21/kube-hunter | e982f291e928a54b4322b15ecc7d954cc477f24d | [
"Apache-2.0"
] | 1 | 2019-09-25T12:31:33.000Z | 2019-09-25T12:31:33.000Z | src/modules/report/base.py | sakshamtaneja21/kube-hunter | e982f291e928a54b4322b15ecc7d954cc477f24d | [
"Apache-2.0"
] | null | null | null | src/modules/report/base.py | sakshamtaneja21/kube-hunter | e982f291e928a54b4322b15ecc7d954cc477f24d | [
"Apache-2.0"
] | null | null | null | from .collector import services, vulnerabilities, hunters, services_lock, vulnerabilities_lock
from src.core.types import Discovery
class BaseReporter(object):
def get_nodes(self):
nodes = list()
node_locations = set()
services_lock.acquire()
for service in services:
node_location = str(service.host)
if node_location not in node_locations:
nodes.append({"type": "Node/Master", "location": str(service.host)})
node_locations.add(node_location)
services_lock.release()
return nodes
def get_services(self):
services_lock.acquire()
services_data = [{"service": service.get_name(),
"location": "{}:{}{}".format(service.host, service.port, service.get_path()),
"description": service.explain()}
for service in services]
services_lock.release()
return services_data
def get_vulnerabilities(self):
vulnerabilities_lock.acquire()
vulnerabilities_data = [{"location": vuln.location(),
"category": vuln.category.name,
"severity": vuln.get_severity(),
"vulnerability": vuln.get_name(),
"description": vuln.explain(),
"evidence": str(vuln.evidence),
"hunter": vuln.hunter.get_name()}
for vuln in vulnerabilities]
vulnerabilities_lock.release()
return vulnerabilities_data
def get_hunter_statistics(self):
hunters_data = list()
for hunter, docs in hunters.items():
if not Discovery in hunter.__mro__:
name, doc = hunter.parse_docs(docs)
hunters_data.append({"name": name, "description": doc, "vulnerabilities": hunter.publishedVulnerabilities})
return hunters_data
| 40.804348 | 123 | 0.606287 |
4a1dcb0433a8680a95fdddb0bc9ca439233f85dd | 18,251 | py | Python | diofant/tests/utilities/test_lambdify.py | rajkk1/diofant | 6b361334569e4ec2e8c7d30dc324387a4ad417c2 | [
"BSD-3-Clause"
] | null | null | null | diofant/tests/utilities/test_lambdify.py | rajkk1/diofant | 6b361334569e4ec2e8c7d30dc324387a4ad417c2 | [
"BSD-3-Clause"
] | 1 | 2021-06-23T08:27:17.000Z | 2021-06-23T08:27:17.000Z | diofant/tests/utilities/test_lambdify.py | rajkk1/diofant | 6b361334569e4ec2e8c7d30dc324387a4ad417c2 | [
"BSD-3-Clause"
] | 1 | 2021-06-23T07:58:58.000Z | 2021-06-23T07:58:58.000Z | import itertools
import math
import mpmath
import pytest
import diofant
from diofant import (ITE, And, Float, Function, I, Integral, Lambda, Matrix,
Max, Min, Not, Or, Piecewise, Rational, cos, exp, false,
lambdify, oo, pi, sin, sqrt, symbols, true)
from diofant.abc import t, w, x, y, z
from diofant.external import import_module
from diofant.printing.lambdarepr import LambdaPrinter
from diofant.utilities.decorator import conserve_mpmath_dps
from diofant.utilities.lambdify import (MATH_TRANSLATIONS, MPMATH_TRANSLATIONS,
NUMPY_TRANSLATIONS, _get_namespace,
implemented_function, lambdastr)
__all__ = ()
MutableDenseMatrix = Matrix
numpy = import_module('numpy')
with_numpy = pytest.mark.skipif(numpy is None,
reason="Couldn't import numpy.")
# ================= Test different arguments =======================
def test_no_args():
f = lambdify([], 1)
pytest.raises(TypeError, lambda: f(-1))
assert f() == 1
def test_single_arg():
f = lambdify(x, 2*x)
assert f(1) == 2
def test_list_args():
f = lambdify([x, y], x + y)
assert f(1, 2) == 3
def test_nested_args():
# issue sympy/sympy#2790
assert lambdify((x, (y, z)), x + y)(1, (2, 4)) == 3
assert lambdify((x, (y, (w, z))), w + x + y + z)(1, (2, (3, 4))) == 10
assert lambdify(x, x + 1, dummify=False)(1) == 2
def test_str_args():
f = lambdify('x,y,z', 'z,y,x')
assert f(3, 2, 1) == (1, 2, 3)
assert f(1.0, 2.0, 3.0) == (3.0, 2.0, 1.0)
# make sure correct number of args required
pytest.raises(TypeError, lambda: f(0))
def test_own_namespace():
def myfunc(x):
return 1
f = lambdify(x, sin(x), {'sin': myfunc})
assert f(0.1) == 1
assert f(100) == 1
def test_own_module():
f = lambdify(x, sin(x), math)
assert f(0) == 0.0
def test_bad_args():
# no vargs given
pytest.raises(TypeError, lambda: lambdify(1))
# same with vector exprs
pytest.raises(TypeError, lambda: lambdify([1, 2]))
# reserved name
pytest.raises(ValueError, lambda: lambdify((('__flatten_args__',),), 1))
pytest.raises(NameError, lambda: lambdify(x, 1, 'spam'))
def test__get_namespace():
pytest.raises(TypeError, lambda: _get_namespace(1))
def test_lambdastr():
assert lambdastr(x, x**2) == 'lambda x: (x**2)'
assert lambdastr(x, None, dummify=True).find('None') > 0
def test_atoms():
# Non-Symbol atoms should not be pulled out from the expression namespace
f = lambdify(x, pi + x, {'pi': 3.14})
assert f(0) == 3.14
f = lambdify(x, I + x, {'I': 1j})
assert f(1) == 1 + 1j
# ================= Test different modules =========================
# high precision output of sin(0.2*pi) is used to detect if precision is lost unwanted
@conserve_mpmath_dps
def test_diofant_lambda():
mpmath.mp.dps = 50
sin02 = mpmath.mpf('0.19866933079506121545941262711838975037020672954020')
f = lambdify(x, sin(x), 'diofant')
assert f(x) == sin(x)
prec = 1e-15
assert -prec < f(Rational(1, 5)).evalf() - Float(str(sin02)) < prec
@conserve_mpmath_dps
def test_math_lambda():
mpmath.mp.dps = 50
sin02 = mpmath.mpf('0.19866933079506121545941262711838975037020672954020')
f = lambdify(x, sin(x), 'math')
prec = 1e-15
assert -prec < f(0.2) - sin02 < prec
# if this succeeds, it can't be a python math function
pytest.raises(TypeError, lambda: f(x))
@conserve_mpmath_dps
def test_mpmath_lambda():
mpmath.mp.dps = 50
sin02 = mpmath.mpf('0.19866933079506121545941262711838975037020672954020')
f = lambdify(x, sin(x), 'mpmath')
prec = 1e-49 # mpmath precision is around 50 decimal places
assert -prec < f(mpmath.mpf('0.2')) - sin02 < prec
# if this succeeds, it can't be a mpmath function
pytest.raises(TypeError, lambda: f(x))
@conserve_mpmath_dps
def test_number_precision():
mpmath.mp.dps = 50
sin02 = mpmath.mpf('0.19866933079506121545941262711838975037020672954020')
f = lambdify(x, sin02, 'mpmath')
prec = 1e-49 # mpmath precision is around 50 decimal places
assert -prec < f(0) - sin02 < prec
@conserve_mpmath_dps
def test_mpmath_precision():
mpmath.mp.dps = 100
assert str(lambdify((), pi.evalf(100), 'mpmath')()) == str(pi.evalf(100))
# ================= Test Translations ==============================
# We can only check if all translated functions are valid. It has to be checked
# by hand if they are complete.
def test_math_transl():
for sym, mat in MATH_TRANSLATIONS.items():
assert sym in diofant.__dict__
assert mat in math.__dict__
def test_mpmath_transl():
for sym, mat in MPMATH_TRANSLATIONS.items():
assert sym in diofant.__dict__ or sym == 'Matrix'
assert mat in mpmath.__dict__
@with_numpy
def test_numpy_transl():
for sym, nump in NUMPY_TRANSLATIONS.items():
assert sym in diofant.__dict__
assert nump in numpy.__dict__
@with_numpy
def test_numpy_translation_abs():
f = lambdify(x, abs(x), 'numpy')
assert f(-1) == 1
assert f(1) == 1
# ================= Test some functions ============================
def test_exponentiation():
f = lambdify(x, x**2)
assert f(-1) == 1
assert f(0) == 0
assert f(1) == 1
assert f(-2) == 4
assert f(2) == 4
assert f(2.5) == 6.25
def test_sqrt():
f = lambdify(x, sqrt(x))
assert f(0) == 0.0
assert f(1) == 1.0
assert f(4) == 2.0
assert abs(f(2) - 1.414) < 0.001
assert f(6.25) == 2.5
def test_trig():
f = lambdify([x], [cos(x), sin(x)], 'math')
d = f(pi)
prec = 1e-11
assert -prec < d[0] + 1 < prec
assert -prec < d[1] < prec
d = f(3.14159)
prec = 1e-5
assert -prec < d[0] + 1 < prec
assert -prec < d[1] < prec
# ================= Test vectors ===================================
def test_vector_simple():
f = lambdify((x, y, z), (z, y, x))
assert f(3, 2, 1) == (1, 2, 3)
assert f(1.0, 2.0, 3.0) == (3.0, 2.0, 1.0)
# make sure correct number of args required
pytest.raises(TypeError, lambda: f(0))
def test_vector_discontinuous():
f = lambdify(x, (-1/x, 1/x))
pytest.raises(ZeroDivisionError, lambda: f(0))
assert f(1) == (-1.0, 1.0)
assert f(2) == (-0.5, 0.5)
assert f(-2) == (0.5, -0.5)
def test_trig_symbolic():
f = lambdify([x], [cos(x), sin(x)], 'math')
d = f(pi)
assert abs(d[0] + 1) < 0.0001
assert abs(d[1] - 0) < 0.0001
def test_trig_float():
f = lambdify([x], [cos(x), sin(x)])
d = f(3.14159)
assert abs(d[0] + 1) < 0.0001
assert abs(d[1] - 0) < 0.0001
def test_docs():
f = lambdify(x, x**2)
assert f(2) == 4
f = lambdify([x, y, z], [z, y, x])
assert f(1, 2, 3) == [3, 2, 1]
f = lambdify(x, sqrt(x))
assert f(4) == 2.0
f = lambdify((x, y), sin(x*y)**2)
assert f(0, 5) == 0
def test_math():
f = lambdify((x, y), sin(x), modules='math')
assert f(0, 5) == 0
def test_sin():
f = lambdify(x, sin(x)**2)
assert isinstance(f(2), float)
f = lambdify(x, sin(x)**2, modules='math')
assert isinstance(f(2), float)
def test_matrix():
A = Matrix([[x, x*y], [sin(z) + 4, x**z]])
sol = Matrix([[1, 2], [sin(3) + 4, 1]])
f = lambdify((x, y, z), A, modules='diofant')
assert f(1, 2, 3) == sol
f = lambdify((x, y, z), (A, [A]), modules='diofant')
assert f(1, 2, 3) == (sol, [sol])
J = Matrix((x, x + y)).jacobian((x, y))
v = Matrix((x, y))
sol = Matrix([[1, 0], [1, 1]])
assert lambdify(v, J, modules='diofant')(1, 2) == sol
assert lambdify(v.T, J, modules='diofant')(1, 2) == sol
@with_numpy
def test_numpy_matrix():
A = Matrix([[x, x*y], [sin(z) + 4, x**z]])
sol_arr = numpy.array([[1, 2], [numpy.sin(3) + 4, 1]])
# Lambdify array first, to ensure return to array as default
f = lambdify((x, y, z), A, ['numpy'])
numpy.testing.assert_allclose(f(1, 2, 3), sol_arr)
# Check that the types are arrays and matrices
assert isinstance(f(1, 2, 3), numpy.ndarray)
@with_numpy
def test_numpy_transpose():
A = Matrix([[1, x], [0, 1]])
f = lambdify(x, A.T, modules='numpy')
numpy.testing.assert_array_equal(f(2), numpy.array([[1, 0], [2, 1]]))
@with_numpy
def test_numpy_inverse():
A = Matrix([[1, x], [0, 1]])
f = lambdify(x, A**-1, modules='numpy')
numpy.testing.assert_array_equal(f(2), numpy.array([[1, -2], [0, 1]]))
@with_numpy
def test_numpy_old_matrix():
A = Matrix([[x, x*y], [sin(z) + 4, x**z]])
sol_arr = numpy.array([[1, 2], [numpy.sin(3) + 4, 1]])
f = lambdify((x, y, z), A, [{'ImmutableMatrix': numpy.array}, 'numpy'])
numpy.testing.assert_allclose(f(1, 2, 3), sol_arr)
assert isinstance(f(1, 2, 3), numpy.ndarray)
@with_numpy
@pytest.mark.filterwarnings('ignore::RuntimeWarning')
def test_python_div_zero_sympyissue_11306():
p = Piecewise((1 / x, y < -1), (x, y <= 1), (1 / x, True))
lambdify([x, y], p, modules='numpy')(0, 1)
@with_numpy
def test_numpy_piecewise():
pieces = Piecewise((x, x < 3), (x**2, x > 5), (0, True))
f = lambdify(x, pieces, modules='numpy')
numpy.testing.assert_array_equal(f(numpy.arange(10)),
numpy.array([0, 1, 2, 0, 0, 0, 36, 49, 64, 81]))
# If we evaluate somewhere all conditions are False, we should get back NaN
nodef_func = lambdify(x, Piecewise((x, x > 0), (-x, x < 0)))
numpy.testing.assert_array_equal(nodef_func(numpy.array([-1, 0, 1])),
numpy.array([1, numpy.nan, 1]))
@with_numpy
def test_numpy_logical_ops():
and_func = lambdify((x, y), And(x, y), modules='numpy')
or_func = lambdify((x, y), Or(x, y), modules='numpy')
not_func = lambdify(x, Not(x), modules='numpy')
arr1 = numpy.array([True, True])
arr2 = numpy.array([False, True])
numpy.testing.assert_array_equal(and_func(arr1, arr2), numpy.array([False, True]))
numpy.testing.assert_array_equal(or_func(arr1, arr2), numpy.array([True, True]))
numpy.testing.assert_array_equal(not_func(arr2), numpy.array([True, False]))
@with_numpy
def test_numpy_matmul():
xmat = Matrix([[x, y], [z, 1+z]])
ymat = Matrix([[x**2], [abs(x)]])
mat_func = lambdify((x, y, z), xmat*ymat, modules='numpy')
numpy.testing.assert_array_equal(mat_func(0.5, 3, 4), numpy.array([[1.625], [3.5]]))
numpy.testing.assert_array_equal(mat_func(-0.5, 3, 4), numpy.array([[1.375], [3.5]]))
# Multiple matrices chained together in multiplication
f = lambdify((x, y, z), xmat*xmat*xmat, modules='numpy')
numpy.testing.assert_array_equal(f(0.5, 3, 4), numpy.array([[72.125, 119.25],
[159, 251]]))
def test_integral():
f = Lambda(x, exp(-x**2))
l = lambdify(x, Integral(f(x), (x, -oo, oo)), modules='diofant')
assert l(x) == Integral(exp(-x**2), (x, -oo, oo))
# ================= Test symbolic ==================================
def test_sym_single_arg():
f = lambdify(x, x * y)
assert f(z) == z * y
def test_sym_list_args():
f = lambdify([x, y], x + y + z)
assert f(1, 2) == 3 + z
def test_sym_integral():
f = Lambda(x, exp(-x**2))
l = lambdify(x, Integral(f(x), (x, -oo, oo)), modules='diofant')
assert l(y).doit() == sqrt(pi)
def test_namespace_order():
# lambdify had a bug, such that module dictionaries or cached module
# dictionaries would pull earlier namespaces into themselves.
# Because the module dictionaries form the namespace of the
# generated lambda, this meant that the behavior of a previously
# generated lambda function could change as a result of later calls
# to lambdify.
n1 = {'f': lambda x: 'first f'}
n2 = {'f': lambda x: 'second f',
'g': lambda x: 'function g'}
f = diofant.Function('f')
g = diofant.Function('g')
if1 = lambdify(x, f(x), modules=(n1, 'diofant'))
assert if1(1) == 'first f'
if2 = lambdify(x, g(x), modules=(n2, 'diofant'))
assert if2(1) == 'function g'
# previously gave 'second f'
assert if1(1) == 'first f'
def test_imps():
# Here we check if the default returned functions are anonymous - in
# the sense that we can have more than one function with the same name
f = implemented_function('f', lambda x: 2*x)
g = implemented_function('f', lambda x: math.sqrt(x))
l1 = lambdify(x, f(x))
l2 = lambdify(x, g(x))
assert str(f(x)) == str(g(x))
assert l1(3) == 6
assert l2(3) == math.sqrt(3)
# check that we can pass in a Function as input
func = diofant.Function('myfunc')
assert not hasattr(func, '_imp_')
my_f = implemented_function(func, lambda x: 2*x)
assert hasattr(func, '_imp_') and hasattr(my_f, '_imp_')
# Error for functions with same name and different implementation
f2 = implemented_function('f', lambda x: x + 101)
pytest.raises(ValueError, lambda: lambdify(x, f(f2(x))))
def test_imps_errors():
# Test errors that implemented functions can return, and still be
# able to form expressions. See issue sympy/sympy#10810.
for val, error_class in itertools.product((0, 0., 2, 2.0),
(AttributeError, TypeError,
ValueError)):
def myfunc(a):
if a == 0:
raise error_class
return 1
f = implemented_function('f', myfunc)
expr = f(val)
assert expr == f(val)
def test_imps_wrong_args():
pytest.raises(ValueError, lambda: implemented_function(sin, lambda x: x))
def test_lambdify_imps():
# Test lambdify with implemented functions
# first test basic (diofant) lambdify
f = diofant.cos
assert lambdify(x, f(x))(0) == 1
assert lambdify(x, 1 + f(x))(0) == 2
assert lambdify((x, y), y + f(x))(0, 1) == 2
# make an implemented function and test
f = implemented_function('f', lambda x: x + 100)
assert lambdify(x, f(x))(0) == 100
assert lambdify(x, 1 + f(x))(0) == 101
assert lambdify((x, y), y + f(x))(0, 1) == 101
# Can also handle tuples, lists, dicts as expressions
lam = lambdify(x, (f(x), x))
assert lam(3) == (103, 3)
lam = lambdify(x, [f(x), x])
assert lam(3) == [103, 3]
lam = lambdify(x, [f(x), (f(x), x)])
assert lam(3) == [103, (103, 3)]
lam = lambdify(x, {f(x): x})
assert lam(3) == {103: 3}
lam = lambdify(x, {f(x): x})
assert lam(3) == {103: 3}
lam = lambdify(x, {x: f(x)})
assert lam(3) == {3: 103}
# Check that imp preferred to other namespaces by default
d = {'f': lambda x: x + 99}
lam = lambdify(x, f(x), d)
assert lam(3) == 103
# Unless flag passed
lam = lambdify(x, f(x), d, use_imps=False)
assert lam(3) == 102
def test_dummification():
F = Function('F')
G = Function('G')
# "\alpha" is not a valid python variable name
# lambdify should sub in a dummy for it, and return
# without a syntax error
alpha = symbols(r'\alpha')
some_expr = 2 * F(t)**2 / G(t)
lam = lambdify((F(t), G(t)), some_expr)
assert lam(3, 9) == 2
lam = lambdify(sin(t), 2 * sin(t)**2)
assert lam(F(t)) == 2 * F(t)**2
# Test that \alpha was properly dummified
lam = lambdify((alpha, t), 2*alpha + t)
assert lam(2, 1) == 5
pytest.raises(SyntaxError, lambda: lambdify(F(t) * G(t), F(t) * G(t) + 5))
pytest.raises(SyntaxError, lambda: lambdify(2 * F(t), 2 * F(t) + 5))
pytest.raises(SyntaxError, lambda: lambdify(2 * F(t), 4 * F(t) + 5))
def test_python_keywords():
# Test for issue sympy/sympy#7452. The automatic dummification should ensure use of
# Python reserved keywords as symbol names will create valid lambda
# functions. This is an additional regression test.
python_if = symbols('if')
expr = python_if / 2
f = lambdify(python_if, expr)
assert f(4.0) == 2.0
def test_lambdify_docstring():
func = lambdify((w, x, y, z), w + x + y + z)
assert func.__doc__ == (
'Created with lambdify. Signature:\n\n'
'func(w, x, y, z)\n\n'
'Expression:\n\n'
'w + x + y + z')
syms = symbols('a1:26')
func = lambdify(syms, sum(syms))
assert func.__doc__ == (
'Created with lambdify. Signature:\n\n'
'func(a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15,\n'
' a16, a17, a18, a19, a20, a21, a22, a23, a24, a25)\n\n'
'Expression:\n\n'
'a1 + a10 + a11 + a12 + a13 + a14 + a15 + a16 + a17 + a18 + a19 + a2 + a20 +...')
# ================= Test special printers ==========================
def test_special_printers():
class IntervalPrinter(LambdaPrinter):
"""Use ``lambda`` printer but print numbers as ``mpi`` intervals."""
def _print_Integer(self, expr):
return f"mpi('{super()._print_Integer(expr)}')"
def _print_Rational(self, expr):
return f"mpi('{super()._print_Rational(expr)}')"
def intervalrepr(expr):
return IntervalPrinter().doprint(expr)
expr = diofant.sqrt(diofant.sqrt(2) + diofant.sqrt(3)) + diofant.Rational(1, 2)
func0 = lambdify((), expr, modules='mpmath', printer=intervalrepr)
func1 = lambdify((), expr, modules='mpmath', printer=IntervalPrinter)
func2 = lambdify((), expr, modules='mpmath', printer=IntervalPrinter())
mpi = type(mpmath.mpi(1, 2))
assert isinstance(func0(), mpi)
assert isinstance(func1(), mpi)
assert isinstance(func2(), mpi)
def test_true_false():
# We want exact is comparison here, not just ==
assert lambdify([], true)() is True
assert lambdify([], false)() is False
def test_ITE():
assert lambdify((x, y, z), ITE(x, y, z))(True, 5, 3) == 5
assert lambdify((x, y, z), ITE(x, y, z))(False, 5, 3) == 3
def test_Min_Max():
# see sympy/sympy#10375
assert lambdify((x, y, z), Min(x, y, z))(1, 2, 3) == 1
assert lambdify((x, y, z), Max(x, y, z))(1, 2, 3) == 3
def test_sympyissue_12092():
f = implemented_function('f', lambda x: x**2)
assert f(f(2)).evalf() == Float(16)
| 31.305317 | 89 | 0.585612 |
4a1dcb405a364865fdbc62d5a4a6212f0d1ad556 | 376 | py | Python | EndBlock.py | KRHS-GameProgramming-2014/the-temple-of-the-lobsterman-2 | df4d6beddf207d10a0a6b6fb43bb8dbf1b4220a1 | [
"BSD-2-Clause"
] | null | null | null | EndBlock.py | KRHS-GameProgramming-2014/the-temple-of-the-lobsterman-2 | df4d6beddf207d10a0a6b6fb43bb8dbf1b4220a1 | [
"BSD-2-Clause"
] | null | null | null | EndBlock.py | KRHS-GameProgramming-2014/the-temple-of-the-lobsterman-2 | df4d6beddf207d10a0a6b6fb43bb8dbf1b4220a1 | [
"BSD-2-Clause"
] | null | null | null | import pygame
class EndBlock(pygame.sprite.Sprite):
def __init__(self, pos = [0,0]):
pygame.sprite.Sprite.__init__(self, self.containers)
self.image = pygame.image.load("Resources/Objects/Wall/End.png")
self.rect = self.image.get_rect()
self.place(pos)
self.living = True
def place(self, pos):
self.rect.topleft = pos
def update(*args):
self = args[0]
| 23.5 | 66 | 0.699468 |
4a1dcb50e4dd7125d24c22ce9fa19871af07e390 | 2,906 | py | Python | LAB/Kasir/kasir.py | HudaFiqri/belajarPython | 8bc03e10339ce455afa052c635657b959abef493 | [
"Apache-2.0"
] | 5 | 2019-05-04T03:20:31.000Z | 2021-02-15T12:35:52.000Z | LAB/Kasir/kasir.py | kuhakuu04/belajarPython | 8bc03e10339ce455afa052c635657b959abef493 | [
"Apache-2.0"
] | null | null | null | LAB/Kasir/kasir.py | kuhakuu04/belajarPython | 8bc03e10339ce455afa052c635657b959abef493 | [
"Apache-2.0"
] | null | null | null | #modul
import os
import time
#mesin kalkulator
def tam(a, b):
return (a + b)
def kur(a, b):
return a - b
def kal(a, b):
return a * b
def bag(a, b):
return a / b
#menu
if __name__ == '__main__':
while True:
os.system('cls')
os.system('color 0b')
print('Selamat Datang Di Program Kasir McDonald')
print('')
print('Silahkan Pilih Menu:')
print('1. Teh\t\t\t Rp.8000')
print('2. McSpicy\t\t Rp.39000')
print('3. Nasi Medium\t\t Rp.5000')
print('4. Nasi Large\t\t Rp.7000')
print('5. Hash Brown\t\t Rp.7000')
print('0. keluar')
print('')
inp = input('pilih>>> ')
#masukkan input
if('0' == inp):
break
elif('1' == inp):
inp1 = int(input('masukkan jumlah>>> '))
inp2 = int(input('masukkan Rp>>> '))
harga_teh = 8000
harga1 = kal(harga_teh, inp1)
kembali = inp2 - harga1
print('')
print('uang yang terima =', inp2)
print('uang yang kembalikan =', kembali)
print('harga keseluruhan =', harga1)
elif('2' == inp):
inp1 = int(input('masukkan jumlah>>> '))
inp2 = int(input('masukkan Rp>>> '))
harga_mc = 39000
harga1 = kal(harga_mc, inp1)
kembali = inp2 - harga1
print('')
print('uang yang terima =', inp2)
print('uang yang kembalikan =', kembali)
print('harga keseluruhan =', harga1)
elif('3' == inp):
inp1 = int(input('masukkan jumlah>>> '))
inp2 = int(input('masukkan Rp>>> '))
harga_med = 5000
harga1 = kal(harga_med, inp1)
kembali = inp2 - harga1
print('')
print('uang yang terima =', inp2)
print('uang yang kembalikan =', kembali)
print('harga keseluruhan =', harga1)
elif('4' == inp):
inp1 = int(input('masukkan jumlah>>> '))
inp2 = int(input('masukkan Rp>>> '))
harga_lar = 7000
harga1 = kal(harga_lar, inp1)
kembali = inp2 - harga1
elif('5' == inp):
inp1 = int(input('masukkan jumlah>>> '))
inp2 = int(input('masukkan Rp>>> '))
harga_has = 7000
harga1 = kal(harga_has, inp1)
kembali = inp2 - harga1
print('')
print('uang yang terima =', inp2)
print('uang yang kembalikan =', kembali)
print('harga keseluruhan =', harga1)
else:
print('pengetikan salah')
print('')
awal = input('kembali (y/n)>>> ')
if('n' == awal):
break
| 25.716814 | 58 | 0.456986 |
4a1dcb70ea43341de423c68976e0cc57c3119a36 | 3,560 | py | Python | oncopolicy/utils/generic.py | yala/Tempo | bf3e0e78d64869bb2079c582a4a35982f78386ad | [
"MIT"
] | 6 | 2022-01-15T11:57:19.000Z | 2022-02-13T21:15:22.000Z | oncopolicy/utils/generic.py | yala/Tempo | bf3e0e78d64869bb2079c582a4a35982f78386ad | [
"MIT"
] | null | null | null | oncopolicy/utils/generic.py | yala/Tempo | bf3e0e78d64869bb2079c582a4a35982f78386ad | [
"MIT"
] | 2 | 2022-02-02T13:09:29.000Z | 2022-02-18T07:06:19.000Z | import datetime
import hashlib
import numpy as np
from copy import deepcopy
import torch
import pdb
INVALID_DATE_STR = "Date string not valid! Received {}, and got exception {}"
ISO_FORMAT = '%Y-%m-%d %H:%M:%S'
CGMH_ISO_FORMAT ='%Y%m%d'
DAYS_IN_YEAR = 365
DAYS_IN_MO = 30
MAX_MO_TO_CANCER = 1200
MIN_MO_TO_CANCER = 3
MAX_PREFERNCES = 10.0
MIN_PREFERNCES = 0
EPSILON = 1e-3
AVG_MOMENTUM = 0.95
NUM_DIM_AUX_FEATURES = 7 ## Deprecated
class AverageMeter():
def __init__(self):
self.avg = 0
self.first_update = True
def reset(self):
self.avg = 0
self.first_update = True
def update(self, val_tensor):
val = val_tensor.item()
if self.first_update:
self.avg = val
self.first_update = False
else:
self.avg = (AVG_MOMENTUM * self.avg) + (1-AVG_MOMENTUM) * val
assert self.avg >= 0 and val >= 0
def get_aux_tensor(tensor, args):
## use of auxillary features for screen is deprecated
return torch.zeros([tensor.size()[0], NUM_DIM_AUX_FEATURES]).to(tensor.device)
def to_numpy(tensor):
return tensor.cpu().numpy()
def to_tensor(arr, device):
return torch.Tensor(arr).to(device)
def sample_preference_vector(batch_size, sample_random, args):
if sample_random:
dist = torch.distributions.uniform.Uniform(MIN_PREFERNCES, MAX_PREFERNCES)
preferences = dist.sample([batch_size, len(args.metrics), 1])
else:
preferences = torch.ones(batch_size, len(args.metrics), 1)
preferences *= torch.tensor(args.fixed_preference).unsqueeze(0).unsqueeze(-1)
preferences = preferences + EPSILON
preferences = (preferences / (preferences).sum(dim=1).unsqueeze(-1))
return preferences.to(args.device)
def normalize_dictionary(dictionary):
'''
Normalizes counts in dictionary
:dictionary: a python dict where each value is a count
:returns: a python dict where each value is normalized to sum to 1
'''
num_samples = sum([dictionary[l] for l in dictionary])
for label in dictionary:
dictionary[label] = dictionary[label]*1. / num_samples
return dictionary
def parse_date(iso_string):
'''
Takes a string of format "YYYY-MM-DD HH:MM:SS" and
returns a corresponding datetime.datetime obj
throws an exception if this can't be done.
'''
try:
return datetime.datetime.strptime(iso_string, ISO_FORMAT)
except Exception as e:
raise Exception(INVALID_DATE_STR.format(iso_string, e))
def md5(key):
'''
returns a hashed with md5 string of the key
'''
return hashlib.md5(key.encode()).hexdigest()
def pad_array_to_length(arr, pad_token, max_length):
arr = arr[:max_length]
return np.array( arr + [pad_token]* (max_length - len(arr)))
def fast_forward_exam_by_one_time_step(curr_exam, NUM_DAYS_IN_TIME_STEP):
exam = deepcopy(curr_exam)
est_date_of_last_followup = curr_exam['date'] + datetime.timedelta(days=int(DAYS_IN_YEAR * curr_exam['years_to_last_followup']))
est_date_of_cancer = curr_exam['date'] + datetime.timedelta(days=int(DAYS_IN_MO * curr_exam['months_to_cancer']))
exam['date'] = curr_exam['date'] + datetime.timedelta(days=int(NUM_DAYS_IN_TIME_STEP))
exam['years_to_last_followup'] = (est_date_of_last_followup - exam['date']).days / DAYS_IN_YEAR
exam['months_to_cancer'] = (est_date_of_cancer - exam['date']).days / DAYS_IN_MO
exam['has_cancer'] = exam['months_to_cancer'] < MIN_MO_TO_CANCER
exam['time_stamp'] = curr_exam['time_stamp'] + 1
return exam
| 33.584906 | 132 | 0.69691 |
4a1dcc23a77736f3758e0465bf9606a15fa81616 | 4,682 | py | Python | scripts/convert_publications.py | gqlo/website | 90237b0e1643eaceec812a765df363ce63542fd1 | [
"MIT"
] | 5 | 2020-02-17T06:28:59.000Z | 2022-01-27T10:25:16.000Z | scripts/convert_publications.py | gqlo/website | 90237b0e1643eaceec812a765df363ce63542fd1 | [
"MIT"
] | 49 | 2020-02-22T10:20:16.000Z | 2022-01-27T01:37:34.000Z | scripts/convert_publications.py | gqlo/website | 90237b0e1643eaceec812a765df363ce63542fd1 | [
"MIT"
] | 6 | 2020-04-17T06:49:56.000Z | 2021-12-21T21:55:10.000Z | from datetime import datetime, timedelta
import json
import sys
header_ja = """---
# Documentation: https://sourcethemes.com/academic/docs/managing-content/
title: "業績 (過去5年分)"
subtitle: ""
summary: ""
authors: []
tags: []
categories: []
date: 2020-02-17T18:32:18+09:00
featured: false
draft: false
# Featured image
# To use, add an image named `featured.jpg/png` to your page's folder.
# Focal points: Smart, Center, TopLeft, Top, TopRight, Left, Right, BottomLeft, Bottom, BottomRight.
image:
caption: ""
focal_point: ""
preview_only: false
# Projects (optional).
# Associate this post with one or more of your projects.
# Simply enter your project's folder or file name without extension.
# E.g. `projects = ["internal-project"]` references `content/project/deep-learning/index.md`.
# Otherwise, set `projects = []`.
projects: []
---
""" # noqa: E501
header_en = """---
# Documentation: https://sourcethemes.com/academic/docs/managing-content/
title: "Publications (last 5 years)"
subtitle: ""
summary: ""
authors: []
tags: []
categories: []
date: 2020-02-17T18:32:18+09:00
featured: false
draft: false
# Featured image
# To use, add an image named `featured.jpg/png` to your page's folder.
# Focal points: Smart, Center, TopLeft, Top, TopRight, Left, Right, BottomLeft, Bottom, BottomRight.
image:
caption: ""
focal_point: ""
preview_only: false
# Projects (optional).
# Associate this post with one or more of your projects.
# Simply enter your project's folder or file name without extension.
# E.g. `projects = ["internal-project"]` references `content/project/deep-learning/index.md`.
# Otherwise, set `projects = []`.
projects: []
---
""" # noqa: E501
categories_ja = {
"学術論文誌": "学術論文誌",
"国際会議等発表": "国際会議論文",
"著書": "著書",
"国内学会研究会・シンポジウム等発表": "国内学会研究会・シンポジウム等",
"国内学会大会等発表": "国内学会大会等",
"解説・総説": "解説・総説",
"表彰・受賞": "表彰・受賞",
"その他": "その他"
}
categories_en = {
"学術論文誌": "Journal",
"国際会議等発表": "International Conference",
"著書": "Book",
"国内学会研究会・シンポジウム等発表": "Domestic Conference and Symposium",
"国内学会大会等発表": "Annual Domestic Meeting",
"解説・総説": "Survey",
"表彰・受賞": "Award",
"その他": "Misc"
}
def read_json(input_fname):
publications = []
with open(input_fname) as f:
j = json.load(f)
print("Converting", j["list_item"]["items_count"], "publications")
for publication in j["list_item"]["items"]:
item = {}
item["authors"] = ", ".join([author["@name"] for author in
publication["dc:creator"]])
item["title"] = "\"" + publication["dc:title"]["@value"] + "\""
item["booktitle"] = publication["prism:sourceName"]["@value"] or \
publication["prism:sourceName2"]["@value"]
item["category"] = publication["dc:category"]["@value"]
item["doi"] = publication["prism:doi"]
date_str = publication["prism:publicationDate"]
try:
item["date"] = datetime.strptime(date_str, "%Y/%m/%d")
except ValueError:
item["date"] = datetime.strptime(date_str, "%Y/%m")
publications.append(item)
return publications
def write_markdown(output_fname, publications, categories, header):
with open(output_fname, "w") as f:
f.write(header)
for key, category in categories.items():
filtered = [p for p in publications if p["category"] ==
key and p["date"] > datetime.now() -
timedelta(days=365) * 5]
filtered.sort(key=lambda p: p["date"], reverse=True)
if not filtered:
print("Skipping category", category)
continue
print(len(filtered), "items found for category", category)
f.write("\n## " + category + "\n\n")
for item in filtered:
f.write("1. " + ", ".join([item["authors"], item["title"],
item["booktitle"],
item["date"].strftime("%b. %Y")])
+ ".")
if item["doi"]:
f.write(" [doi:{}](https://doi.org/{})".format(item["doi"], item["doi"]))
f.write("\n")
def main():
input_fname = sys.argv[1]
output_fname_ja = sys.argv[2]
output_fname_en = sys.argv[3]
publications = read_json(input_fname)
write_markdown(output_fname_ja, publications, categories_ja, header_ja)
write_markdown(output_fname_en, publications, categories_en, header_en)
if __name__ == "__main__":
main()
| 28.901235 | 100 | 0.587142 |
4a1dcc828538e5d118cabc429db9ed43b34291b5 | 336 | py | Python | exercises/exc_03_06.py | rklymentiev/py-for-neuro | 6bb163347483642c79eac429e5a9289edff7ce09 | [
"MIT"
] | 7 | 2021-04-28T13:12:16.000Z | 2022-01-15T00:21:11.000Z | exercises/exc_03_06.py | rklymentiev/py-for-neuro | 6bb163347483642c79eac429e5a9289edff7ce09 | [
"MIT"
] | 2 | 2021-04-02T18:42:55.000Z | 2021-05-20T08:43:06.000Z | exercises/exc_03_06.py | rklymentiev/py-for-neuro | 6bb163347483642c79eac429e5a9289edff7ce09 | [
"MIT"
] | 2 | 2021-07-04T22:57:29.000Z | 2021-07-29T19:28:43.000Z | import pandas as pd
table_1 = pd.read_json("exercises/data/table1.json")
table_2 = pd.read_json("exercises/data/table2.json")
joined_table = ___ # join two tables together
joined_table.___(labels=___, ___=1, inplace=___) # drop the redundant column
___ # replace the missing values in a column
display(joined_table)
| 30.545455 | 76 | 0.738095 |
4a1dcd0f5c5a6f61424705a02bb0d7c611cbdf3b | 1,299 | py | Python | deploy.py | lucasalexsorensen/mlops | 2d8157eb493061775bdab9a8e176d2bdcc2c166e | [
"MIT"
] | null | null | null | deploy.py | lucasalexsorensen/mlops | 2d8157eb493061775bdab9a8e176d2bdcc2c166e | [
"MIT"
] | null | null | null | deploy.py | lucasalexsorensen/mlops | 2d8157eb493061775bdab9a8e176d2bdcc2c166e | [
"MIT"
] | null | null | null | from google.cloud import aiplatform
CUSTOM_PREDICTOR_IMAGE_URI = (
"europe-west4-docker.pkg.dev/dtumlops-project-mask-no-mask/dtu-mlops/serve"
)
VERSION = 1
model_display_name = f"mask-v{VERSION}"
model_description = "PyTorch based text classifier with custom container"
MODEL_NAME = "mask"
health_route = "/ping"
predict_route = f"/predictions/{MODEL_NAME}"
serving_container_ports = [7080]
model = aiplatform.Model.upload(
display_name=model_display_name,
description=model_description,
serving_container_image_uri=CUSTOM_PREDICTOR_IMAGE_URI,
serving_container_predict_route=predict_route,
serving_container_health_route=health_route,
serving_container_ports=serving_container_ports,
)
model.wait()
print(model.display_name)
print(model.resource_name)
endpoint_display_name = f"mask-endpoint"
endpoint = aiplatform.Endpoint.create(display_name=endpoint_display_name)
traffic_percentage = 100
machine_type = "n1-standard-4"
deployed_model_display_name = model_display_name
min_replica_count = 1
max_replica_count = 3
sync = True
model.deploy(
endpoint=endpoint,
deployed_model_display_name=deployed_model_display_name,
machine_type=machine_type,
traffic_percentage=traffic_percentage,
sync=sync,
)
model.wait()
print("DEPLOYED TO ENDPOINT!")
| 27.0625 | 79 | 0.806005 |
4a1dcfa763728b3bdad70aaa7eabb22cd3d68980 | 3,026 | py | Python | dlrs/clearlinux/pytorch/mkl/scripts/generate_defaults.py | rahulunair/stacks | 92635edb582e9d2a16139deb3afba8cd98355344 | [
"Apache-2.0"
] | 32 | 2020-01-14T16:06:04.000Z | 2021-10-06T10:42:20.000Z | dlrs/clearlinux/pytorch/mkl/scripts/generate_defaults.py | rahulunair/stacks | 92635edb582e9d2a16139deb3afba8cd98355344 | [
"Apache-2.0"
] | 31 | 2019-10-31T16:44:44.000Z | 2021-04-30T17:31:01.000Z | dlrs/clearlinux/pytorch/mkl/scripts/generate_defaults.py | rahulunair/stacks | 92635edb582e9d2a16139deb3afba8cd98355344 | [
"Apache-2.0"
] | 22 | 2019-10-29T21:45:48.000Z | 2021-12-15T22:20:58.000Z | #!/usr/bin/env python
#
# Copyright (c) 2020 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Helper script that generates a file with sane defaults that can be sourced when using oneDNN optimized DLRS stack.
We recommend you fine tune the exported env variables based on the workload. More details can be found at:
https://github.com/IntelAI/models/blob/master/docs/general/tensorflow_serving/GeneralBestPractices.md.
To get further details, try --verbose."""
import os
import argparse
import subprocess
import sys
import psutil
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
"-v",
"--verbose",
action="store_true",
help="detailed info on the variables being set",
)
parser.add_argument(
"-g",
"--generate",
action="store_true",
help="generate 'mkl_env.sh' file with default settings for oneDNN",
required=False,
)
args = parser.parse_args()
def main():
sockets = int(
subprocess.check_output(
'cat /proc/cpuinfo | grep "physical id" | sort -u | wc -l', shell=True
)
)
physical_cores = psutil.cpu_count(logical=False)
vars = {}
vars["OMP_NUM_THREADS"] = {
"value": physical_cores,
"help": "Number of OpenMP threads",
}
vars["KMP_BLOCKTIME"] = {
"value": 1,
"help": "Thread waits until set ms after execution.",
}
vars["KMP_AFFINITY"] = {
"value": "granularity=fine,verbose,compact,1,0",
"help": "OpenMP threads bound to single thread context compactly",
}
vars["INTRA_OP_PARALLELISM_THREADS"] = {
"value": physical_cores,
"help": "scheme for individual op",
}
vars["INTER_OP_PARALLELISM_THREADS"] = {
"value": sockets,
"help": "parllelizing scheme for independent ops",
}
if args.verbose:
print(
(
"variables that can be used to fine tune performance,\n"
"use '-g' or '--generate' to generate a file with these variables\n"
)
)
for var, val in vars.items():
print("variable: {}, description: {}".format(var, val["help"]))
if args.generate:
print("Generating default env vars for MKL and OpenMP, stored in /workspace/mkl_env.sh ")
for var, val in vars.items():
print(
"export {}={}".format(var, str(val["value"])),
file=open("mkl_env.sh", "a"),
)
if __name__ == "__main__":
main()
| 32.191489 | 118 | 0.639458 |
4a1dcfb2922c88d088bf3496a84abe92f9edb699 | 462 | py | Python | DatosGraficas/Datos_Para_Entrenamiento/TimeJumps.py | Rodrigo2118/IDMotor | 5a0ac6675cd4cde49efcc0af50bae891e6d256c9 | [
"MIT"
] | null | null | null | DatosGraficas/Datos_Para_Entrenamiento/TimeJumps.py | Rodrigo2118/IDMotor | 5a0ac6675cd4cde49efcc0af50bae891e6d256c9 | [
"MIT"
] | 1 | 2020-02-13T03:02:13.000Z | 2020-02-13T03:02:13.000Z | DatosGraficas/Datos_Para_Entrenamiento/TimeJumps.py | Rodrigo2118/IDMotor | 5a0ac6675cd4cde49efcc0af50bae891e6d256c9 | [
"MIT"
] | null | null | null | import csv
import sys
import random
p0=0.5
file=open(sys.argv[1],'r')
#csv_reader=csv.reader(file, delimiter=',',lineterminator='\n')
nfile=open(r"tj_"+sys.argv[1],"w")
#csv_writer=csv.writer(nfile,delimiter=',',lineterminator='\n')
string=file.read().split('\n')
#print(string)
first = True
for row in string:
r=random.random()-p0
if r>0:
if not first:
nfile.write("\n"+row)
else:
nfile.write(row)
first=False
file.close();
nfile.close();
| 17.769231 | 63 | 0.67316 |
4a1dd1baac85cadb05ed10d80aa4df6c97f9419d | 800 | py | Python | remove_white.py | l0stpenguin/MegaDepth | ae060cabd60c9210291b0cd158650e6e9041496b | [
"MIT"
] | null | null | null | remove_white.py | l0stpenguin/MegaDepth | ae060cabd60c9210291b0cd158650e6e9041496b | [
"MIT"
] | null | null | null | remove_white.py | l0stpenguin/MegaDepth | ae060cabd60c9210291b0cd158650e6e9041496b | [
"MIT"
] | null | null | null | import torch
import sys
from torch.autograd import Variable
import numpy as np
from data.data_loader import CreateDataLoader
from models.models import create_model
from skimage import io
from skimage.transform import resize
import glob
import os
from pathlib import Path
import cv2
#remove all depth maps which are almost white
sequence_number = '0008'
root = sequence_number + '/dense0/processed/depth/'
image_files = glob.glob(root+'/*.png')
for i, file_path in enumerate(image_files):
image = cv2.imread(file_path)
file_name = os.path.split(file_path)[-1]
mean = image.mean()
if mean > 230:
print('deleting ', file_name,image.mean())
os.remove(file_path)
image_path = file_path.replace('depth','images').replace('png','jpg')
os.remove(image_path)
| 28.571429 | 77 | 0.73375 |
4a1dd443dec5bd34813b5a5686805c5ddea1a1c6 | 1,497 | py | Python | pizzaProject/pizzaApp/models.py | martinezlucas98/pizzapro | 15fe1f6feea20b14e954cbc1dd8648ba2df28049 | [
"MIT"
] | null | null | null | pizzaProject/pizzaApp/models.py | martinezlucas98/pizzapro | 15fe1f6feea20b14e954cbc1dd8648ba2df28049 | [
"MIT"
] | null | null | null | pizzaProject/pizzaApp/models.py | martinezlucas98/pizzapro | 15fe1f6feea20b14e954cbc1dd8648ba2df28049 | [
"MIT"
] | null | null | null | from django.db import models
# El modelo Pizza
class Pizza (models.Model):
id = models.CharField(max_length=32, primary_key=True)
nombre = models.CharField(max_length=32)
precio = models.PositiveIntegerField()
activo = models.BooleanField(default=True)
def save(self, *args, **kwargs):
self.id = (self.id).upper()
return super(Pizza, self).save(*args, **kwargs)
def __str__(self):
return self.nombre
CAT_BASICO = 'BASICO'
CAT_PREMIUM = 'PREMIUM'
CATEGORIAS_CHOICES = [
(CAT_BASICO, 'Basico'),
(CAT_PREMIUM, 'Premium')
]
# El modelo para cada ingrediente
class Ingrediente (models.Model):
id = models.CharField(max_length=32, primary_key=True)
nombre = models.CharField(max_length=32)
# Solo hay dos categorias por lo que usamos choices
categoria = models.CharField(max_length=32, choices=CATEGORIAS_CHOICES, default=CAT_BASICO)
def save(self, *args, **kwargs):
self.id = (self.id).upper()
self.categoria = (self.categoria).upper()
return super(Ingrediente, self).save(*args, **kwargs)
def __str__(self):
return self.nombre
# Modelo para relacionar cada pizza con sus ingredientes
class Ingredientes_x_pizza (models.Model):
pizza = models.ForeignKey(Pizza, on_delete=models.CASCADE)
ingrediente = models.ForeignKey(Ingrediente, on_delete=models.CASCADE)
def __str__(self):
return ("Pizza: "+self.pizza.nombre + " || Ingrediente: "+self.ingrediente.nombre) | 31.851064 | 95 | 0.696059 |
4a1dd560ca2b3c0f98f93bf96a1c314f344d3f35 | 1,486 | py | Python | teacher/urls.py | rummansadik/Admission-Automation | a2fd305644cf60bfd0a381b855fb8c2810507f36 | [
"MIT"
] | null | null | null | teacher/urls.py | rummansadik/Admission-Automation | a2fd305644cf60bfd0a381b855fb8c2810507f36 | [
"MIT"
] | null | null | null | teacher/urls.py | rummansadik/Admission-Automation | a2fd305644cf60bfd0a381b855fb8c2810507f36 | [
"MIT"
] | null | null | null | from django.contrib.auth.views import LoginView
from django.urls import path
from teacher import views
urlpatterns = [
path('teacherclick', views.teacherclick_view),
path('teacherlogin', LoginView.as_view(
template_name='teacher/teacherlogin.html'), name='teacherlogin'),
path('teachersignup', views.teacher_signup_view, name='teachersignup'),
path('teacher-dashboard', views.teacher_dashboard_view,
name='teacher-dashboard'),
path('teacher-exam', views.teacher_exam_view, name='teacher-exam'),
path('pending-students', views.teacher_pending_students_view,
name='pending-students'),
path('pending-student/<int:pk>',
views.teacher_pending_student_answer_view, name='pending-student'),
path('teacher-add-exam', views.teacher_add_exam_view, name='teacher-add-exam'),
path('teacher-view-exam', views.teacher_view_exam_view,
name='teacher-view-exam'),
path('delete-exam/<int:pk>', views.delete_exam_view, name='delete-exam'),
path('teacher-question', views.teacher_question_view, name='teacher-question'),
path('teacher-add-question', views.teacher_add_question_view,
name='teacher-add-question'),
path('teacher-view-question', views.teacher_view_question_view,
name='teacher-view-question'),
path('see-question/<int:pk>', views.see_question_view, name='see-question'),
path('remove-question/<int:pk>',
views.remove_question_view, name='remove-question'),
]
| 47.935484 | 83 | 0.719381 |
4a1dd58ddb4c55392a8bae0639e01f9553698d66 | 558 | py | Python | var/spack/repos/builtin/packages/perl-array-utils/package.py | kkauder/spack | 6ae8d5c380c1f42094b05d38be26b03650aafb39 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2 | 2019-02-10T13:47:48.000Z | 2019-04-17T13:05:17.000Z | var/spack/repos/builtin/packages/perl-array-utils/package.py | kkauder/spack | 6ae8d5c380c1f42094b05d38be26b03650aafb39 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 32 | 2020-12-15T17:29:20.000Z | 2022-03-21T15:08:31.000Z | var/spack/repos/builtin/packages/perl-array-utils/package.py | kkauder/spack | 6ae8d5c380c1f42094b05d38be26b03650aafb39 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2 | 2018-04-06T09:04:11.000Z | 2020-01-24T12:52:12.000Z | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PerlArrayUtils(PerlPackage):
"""Small utils for array manipulation"""
homepage = "http://search.cpan.org/~zmij/Array-Utils/Utils.pm"
url = "http://search.cpan.org/CPAN/authors/id/Z/ZM/ZMIJ/Array/Array-Utils-0.5.tar.gz"
version('0.5', sha256='89dd1b7fcd9b4379492a3a77496e39fe6cd379b773fd03a6b160dd26ede63770')
| 34.875 | 94 | 0.74552 |
4a1dd625dcb71f58f25dd34536fb870fba81d264 | 389 | py | Python | app/litrev/wsgi.py | ipa/litrev | 01550417872b0e2b0c0541a3b7f6b28d18d874c9 | [
"MIT"
] | 2 | 2020-04-09T11:46:36.000Z | 2022-02-01T00:56:11.000Z | app/litrev/wsgi.py | ipa/litrev | 01550417872b0e2b0c0541a3b7f6b28d18d874c9 | [
"MIT"
] | 5 | 2021-03-30T13:01:04.000Z | 2021-09-22T18:50:40.000Z | app/litrev/wsgi.py | ipa/litrev | 01550417872b0e2b0c0541a3b7f6b28d18d874c9 | [
"MIT"
] | null | null | null | """
WSGI config for litrev project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'litrev.settings')
application = get_wsgi_application()
| 22.882353 | 78 | 0.784062 |
4a1dd6e288c2e4b81c1f6d6e68d7ec5a0dcd0043 | 51,477 | py | Python | python/ray/scripts/scripts.py | lavanyashukla/ray | 9c1a75b6ff82a842131e6beb3c260188befc21df | [
"Apache-2.0"
] | 1 | 2020-10-21T22:24:27.000Z | 2020-10-21T22:24:27.000Z | python/ray/scripts/scripts.py | mfitton/ray | fece8db70d703da1aad192178bd50923e83cc99a | [
"Apache-2.0"
] | 1 | 2020-12-04T02:39:55.000Z | 2020-12-04T02:39:55.000Z | python/ray/scripts/scripts.py | mfitton/ray | fece8db70d703da1aad192178bd50923e83cc99a | [
"Apache-2.0"
] | null | null | null | import click
import copy
from datetime import datetime
import json
import logging
import os
import subprocess
import sys
import time
import urllib
import urllib.parse
import yaml
from socket import socket
import ray
import psutil
import ray._private.services as services
from ray.autoscaler._private.commands import (
attach_cluster, exec_cluster, create_or_update_cluster, monitor_cluster,
rsync, teardown_cluster, get_head_node_ip, kill_node, get_worker_node_ips,
debug_status, RUN_ENV_TYPES)
import ray.ray_constants as ray_constants
import ray.utils
from ray.autoscaler._private.cli_logger import cli_logger, cf
logger = logging.getLogger(__name__)
def check_no_existing_redis_clients(node_ip_address, redis_client):
# The client table prefix must be kept in sync with the file
# "src/ray/gcs/redis_module/ray_redis_module.cc" where it is defined.
REDIS_CLIENT_TABLE_PREFIX = "CL:"
client_keys = redis_client.keys(f"{REDIS_CLIENT_TABLE_PREFIX}*")
# Filter to clients on the same node and do some basic checking.
for key in client_keys:
info = redis_client.hgetall(key)
assert b"ray_client_id" in info
assert b"node_ip_address" in info
assert b"client_type" in info
assert b"deleted" in info
# Clients that ran on the same node but that are marked dead can be
# ignored.
deleted = info[b"deleted"]
deleted = bool(int(deleted))
if deleted:
continue
if ray.utils.decode(info[b"node_ip_address"]) == node_ip_address:
raise Exception("This Redis instance is already connected to "
"clients with this IP address.")
logging_options = [
click.option(
"--log-style",
required=False,
type=click.Choice(cli_logger.VALID_LOG_STYLES, case_sensitive=False),
default="auto",
help=("If 'pretty', outputs with formatting and color. If 'record', "
"outputs record-style without formatting. "
"'auto' defaults to 'pretty', and disables pretty logging "
"if stdin is *not* a TTY.")),
click.option(
"--log-color",
required=False,
type=click.Choice(["auto", "false", "true"], case_sensitive=False),
default="auto",
help=("Use color logging. "
"Auto enables color logging if stdout is a TTY.")),
click.option("-v", "--verbose", default=None, count=True)
]
def add_click_options(options):
def wrapper(f):
for option in reversed(logging_options):
f = option(f)
return f
return wrapper
@click.group()
@click.option(
"--logging-level",
required=False,
default=ray_constants.LOGGER_LEVEL,
type=str,
help=ray_constants.LOGGER_LEVEL_HELP)
@click.option(
"--logging-format",
required=False,
default=ray_constants.LOGGER_FORMAT,
type=str,
help=ray_constants.LOGGER_FORMAT_HELP)
@click.version_option()
def cli(logging_level, logging_format):
level = logging.getLevelName(logging_level.upper())
ray.ray_logging.setup_logger(level, logging_format)
cli_logger.set_format(format_tmpl=logging_format)
@click.command()
@click.argument("cluster_config_file", required=True, type=str)
@click.option(
"--cluster-name",
"-n",
required=False,
type=str,
help="Override the configured cluster name.")
@click.option(
"--port",
"-p",
required=False,
type=int,
default=8265,
help="The local port to forward to the dashboard")
@click.option(
"--remote-port",
required=False,
type=int,
default=8265,
help="The remote port your dashboard runs on")
def dashboard(cluster_config_file, cluster_name, port, remote_port):
"""Port-forward a Ray cluster's dashboard to the local machine."""
# Sleeping in a loop is preferable to `sleep infinity` because the latter
# only works on linux.
# Find the first open port sequentially from `remote_port`.
try:
port_forward = [
(port, remote_port),
]
click.echo("Attempting to establish dashboard locally at"
" localhost:{} connected to"
" remote port {}".format(port, remote_port))
# We want to probe with a no-op that returns quickly to avoid
# exceptions caused by network errors.
exec_cluster(
cluster_config_file,
override_cluster_name=cluster_name,
port_forward=port_forward)
click.echo("Successfully established connection.")
except Exception as e:
raise click.ClickException(
"Failed to forward dashboard from remote port {1} to local port "
"{0}. There are a couple possibilities: \n 1. The remote port is "
"incorrectly specified \n 2. The local port {0} is already in "
"use.\n The exception is: {2}".format(port, remote_port, e)) \
from None
def continue_debug_session():
"""Continue active debugging session.
This function will connect 'ray debug' to the right debugger
when a user is stepping between Ray tasks.
"""
active_sessions = ray.experimental.internal_kv._internal_kv_list(
"RAY_PDB_")
for active_session in active_sessions:
if active_session.startswith(b"RAY_PDB_CONTINUE"):
print("Continuing pdb session in different process...")
key = b"RAY_PDB_" + active_session[len("RAY_PDB_CONTINUE_"):]
while True:
data = ray.experimental.internal_kv._internal_kv_get(key)
if data:
session = json.loads(data)
if "exit_debugger" in session:
ray.experimental.internal_kv._internal_kv_del(key)
return
host, port = session["pdb_address"].split(":")
ray.util.rpdb.connect_pdb_client(host, int(port))
ray.experimental.internal_kv._internal_kv_del(key)
continue_debug_session()
return
time.sleep(1.0)
@cli.command()
@click.option(
"--address",
required=False,
type=str,
help="Override the address to connect to.")
def debug(address):
"""Show all active breakpoints and exceptions in the Ray debugger."""
if not address:
address = services.get_ray_address_to_use_or_die()
logger.info(f"Connecting to Ray instance at {address}.")
ray.init(address=address, log_to_driver=False)
while True:
continue_debug_session()
active_sessions = ray.experimental.internal_kv._internal_kv_list(
"RAY_PDB_")
print("Active breakpoints:")
for i, active_session in enumerate(active_sessions):
data = json.loads(
ray.experimental.internal_kv._internal_kv_get(active_session))
print(
str(i) + ": " + data["proctitle"] + " | " + data["filename"] +
":" + str(data["lineno"]))
print(data["traceback"])
inp = input("Enter breakpoint index or press enter to refresh: ")
if inp == "":
print()
continue
else:
index = int(inp)
session = json.loads(
ray.experimental.internal_kv._internal_kv_get(
active_sessions[index]))
host, port = session["pdb_address"].split(":")
ray.util.rpdb.connect_pdb_client(host, int(port))
@cli.command()
@click.option(
"--node-ip-address",
required=False,
type=str,
help="the IP address of this node")
@click.option(
"--address", required=False, type=str, help="the address to use for Ray")
@click.option(
"--port",
type=int,
required=False,
help=f"the port of the head ray process. If not provided, defaults to "
f"{ray_constants.DEFAULT_PORT}; if port is set to 0, we will"
f" allocate an available port.")
@click.option(
"--redis-password",
required=False,
hidden=True,
type=str,
default=ray_constants.REDIS_DEFAULT_PASSWORD,
help="If provided, secure Redis ports with this password")
@click.option(
"--redis-shard-ports",
required=False,
type=str,
help="the port to use for the Redis shards other than the "
"primary Redis shard")
@click.option(
"--object-manager-port",
required=False,
type=int,
help="the port to use for starting the object manager")
@click.option(
"--node-manager-port",
required=False,
type=int,
help="the port to use for starting the node manager")
@click.option(
"--gcs-server-port",
required=False,
type=int,
help="Port number for the GCS server.")
@click.option(
"--min-worker-port",
required=False,
type=int,
default=10000,
help="the lowest port number that workers will bind on. If not set, "
"random ports will be chosen.")
@click.option(
"--max-worker-port",
required=False,
type=int,
default=10999,
help="the highest port number that workers will bind on. If set, "
"'--min-worker-port' must also be set.")
@click.option(
"--worker-port-list",
required=False,
help="a comma-separated list of open ports for workers to bind on. "
"Overrides '--min-worker-port' and '--max-worker-port'.")
@click.option(
"--memory",
required=False,
hidden=True,
type=int,
help="The amount of memory (in bytes) to make available to workers. "
"By default, this is set to the available memory on the node.")
@click.option(
"--object-store-memory",
required=False,
type=int,
help="The amount of memory (in bytes) to start the object store with. "
"By default, this is capped at 20GB but can be set higher.")
@click.option(
"--redis-max-memory",
required=False,
hidden=True,
type=int,
help="The max amount of memory (in bytes) to allow redis to use. Once the "
"limit is exceeded, redis will start LRU eviction of entries. This only "
"applies to the sharded redis tables (task, object, and profile tables). "
"By default this is capped at 10GB but can be set higher.")
@click.option(
"--num-cpus",
required=False,
type=int,
help="the number of CPUs on this node")
@click.option(
"--num-gpus",
required=False,
type=int,
help="the number of GPUs on this node")
@click.option(
"--resources",
required=False,
default="{}",
type=str,
help="a JSON serialized dictionary mapping resource name to "
"resource quantity")
@click.option(
"--head",
is_flag=True,
default=False,
help="provide this argument for the head node")
@click.option(
"--include-dashboard",
default=None,
type=bool,
help="provide this argument to start the Ray dashboard GUI")
@click.option(
"--dashboard-host",
required=False,
default="localhost",
help="the host to bind the dashboard server to, either localhost "
"(127.0.0.1) or 0.0.0.0 (available from all interfaces). By default, this"
"is localhost.")
@click.option(
"--dashboard-port",
required=False,
type=int,
default=ray_constants.DEFAULT_DASHBOARD_PORT,
help="the port to bind the dashboard server to--defaults to {}".format(
ray_constants.DEFAULT_DASHBOARD_PORT))
@click.option(
"--block",
is_flag=True,
default=False,
help="provide this argument to block forever in this command")
@click.option(
"--plasma-directory",
required=False,
type=str,
help="object store directory for memory mapped files")
@click.option(
"--autoscaling-config",
required=False,
type=str,
help="the file that contains the autoscaling config")
@click.option(
"--no-redirect-worker-output",
is_flag=True,
default=False,
help="do not redirect worker stdout and stderr to files")
@click.option(
"--no-redirect-output",
is_flag=True,
default=False,
help="do not redirect non-worker stdout and stderr to files")
@click.option(
"--plasma-store-socket-name",
default=None,
help="manually specify the socket name of the plasma store")
@click.option(
"--raylet-socket-name",
default=None,
help="manually specify the socket path of the raylet process")
@click.option(
"--temp-dir",
hidden=True,
default=None,
help="manually specify the root temporary dir of the Ray process")
@click.option(
"--java-worker-options",
required=False,
hidden=True,
default=None,
type=str,
help="Overwrite the options to start Java workers.")
@click.option(
"--system-config",
default=None,
hidden=True,
type=json.loads,
help="Override system configuration defaults.")
@click.option(
"--lru-evict",
is_flag=True,
hidden=True,
default=False,
help="Specify whether LRU evict will be used for this cluster.")
@click.option(
"--enable-object-reconstruction",
is_flag=True,
default=False,
hidden=True,
help="Specify whether object reconstruction will be used for this cluster."
)
@click.option(
"--metrics-export-port",
type=int,
hidden=True,
default=None,
help="the port to use to expose Ray metrics through a "
"Prometheus endpoint.")
@add_click_options(logging_options)
def start(node_ip_address, address, port, redis_password, redis_shard_ports,
object_manager_port, node_manager_port, gcs_server_port,
min_worker_port, max_worker_port, worker_port_list, memory,
object_store_memory, redis_max_memory, num_cpus, num_gpus, resources,
head, include_dashboard, dashboard_host, dashboard_port, block,
plasma_directory, autoscaling_config, no_redirect_worker_output,
no_redirect_output, plasma_store_socket_name, raylet_socket_name,
temp_dir, java_worker_options, system_config, lru_evict,
enable_object_reconstruction, metrics_export_port, log_style,
log_color, verbose):
"""Start Ray processes manually on the local machine."""
cli_logger.configure(log_style, log_color, verbose)
if gcs_server_port and not head:
raise ValueError(
"gcs_server_port can be only assigned when you specify --head.")
# Convert hostnames to numerical IP address.
if node_ip_address is not None:
node_ip_address = services.address_to_ip(node_ip_address)
redis_address = None
if address is not None:
(redis_address, redis_address_ip,
redis_address_port) = services.validate_redis_address(address)
try:
resources = json.loads(resources)
except Exception:
cli_logger.error("`{}` is not a valid JSON string.",
cf.bold("--resources"))
cli_logger.abort(
"Valid values look like this: `{}`",
cf.bold("--resources='\"CustomResource3\": 1, "
"\"CustomResource2\": 2}'"))
raise Exception("Unable to parse the --resources argument using "
"json.loads. Try using a format like\n\n"
" --resources='{\"CustomResource1\": 3, "
"\"CustomReseource2\": 2}'")
redirect_worker_output = None if not no_redirect_worker_output else True
redirect_output = None if not no_redirect_output else True
ray_params = ray.parameter.RayParams(
node_ip_address=node_ip_address,
min_worker_port=min_worker_port,
max_worker_port=max_worker_port,
worker_port_list=worker_port_list,
object_manager_port=object_manager_port,
node_manager_port=node_manager_port,
gcs_server_port=gcs_server_port,
memory=memory,
object_store_memory=object_store_memory,
redis_password=redis_password,
redirect_worker_output=redirect_worker_output,
redirect_output=redirect_output,
num_cpus=num_cpus,
num_gpus=num_gpus,
resources=resources,
plasma_directory=plasma_directory,
huge_pages=False,
plasma_store_socket_name=plasma_store_socket_name,
raylet_socket_name=raylet_socket_name,
temp_dir=temp_dir,
include_dashboard=include_dashboard,
dashboard_host=dashboard_host,
dashboard_port=dashboard_port,
java_worker_options=java_worker_options,
_system_config=system_config,
lru_evict=lru_evict,
enable_object_reconstruction=enable_object_reconstruction,
metrics_export_port=metrics_export_port)
if head:
# Use default if port is none, allocate an available port if port is 0
if port is None:
port = ray_constants.DEFAULT_PORT
if port == 0:
with socket() as s:
s.bind(("", 0))
port = s.getsockname()[1]
num_redis_shards = None
# Start Ray on the head node.
if redis_shard_ports is not None:
redis_shard_ports = redis_shard_ports.split(",")
# Infer the number of Redis shards from the ports if the number is
# not provided.
num_redis_shards = len(redis_shard_ports)
if redis_address is not None:
cli_logger.abort(
"`{}` starts a new Redis server, `{}` should not be set.",
cf.bold("--head"), cf.bold("--address"))
raise Exception("If --head is passed in, a Redis server will be "
"started, so a Redis address should not be "
"provided.")
node_ip_address = services.get_node_ip_address()
# Get the node IP address if one is not provided.
ray_params.update_if_absent(node_ip_address=node_ip_address)
cli_logger.labeled_value("Local node IP", ray_params.node_ip_address)
ray_params.update_if_absent(
redis_port=port,
redis_shard_ports=redis_shard_ports,
redis_max_memory=redis_max_memory,
num_redis_shards=num_redis_shards,
redis_max_clients=None,
autoscaling_config=autoscaling_config,
)
# Fail early when starting a new cluster when one is already running
if address is None:
default_address = f"{node_ip_address}:{port}"
redis_addresses = services.find_redis_address(default_address)
if len(redis_addresses) > 0:
raise ConnectionError(
f"Ray is already running at {default_address}. "
f"Please specify a different port using the `--port`"
f" command to `ray start`.")
node = ray.node.Node(
ray_params, head=True, shutdown_at_exit=block, spawn_reaper=block)
redis_address = node.redis_address
# this is a noop if new-style is not set, so the old logger calls
# are still in place
cli_logger.newline()
startup_msg = "Ray runtime started."
cli_logger.success("-" * len(startup_msg))
cli_logger.success(startup_msg)
cli_logger.success("-" * len(startup_msg))
cli_logger.newline()
with cli_logger.group("Next steps"):
cli_logger.print(
"To connect to this Ray runtime from another node, run")
# NOTE(kfstorm): Java driver rely on this line to get the address
# of the cluster. Please be careful when updating this line.
cli_logger.print(
cf.bold(" ray start --address='{}'{}"), redis_address,
f" --redis-password='{redis_password}'"
if redis_password else "")
cli_logger.newline()
cli_logger.print("Alternatively, use the following Python code:")
with cli_logger.indented():
with cf.with_style("monokai") as c:
cli_logger.print("{} ray", c.magenta("import"))
cli_logger.print(
"ray{}init(address{}{}{})", c.magenta("."),
c.magenta("="), c.yellow("'auto'"),
", _redis_password{}{}".format(
c.magenta("="),
c.yellow("'" + redis_password + "'"))
if redis_password else "")
cli_logger.newline()
cli_logger.print(
cf.underlined("If connection fails, check your "
"firewall settings and "
"network configuration."))
cli_logger.newline()
cli_logger.print("To terminate the Ray runtime, run")
cli_logger.print(cf.bold(" ray stop"))
else:
# Start Ray on a non-head node.
if not (port is None):
cli_logger.abort("`{}` should not be specified without `{}`.",
cf.bold("--port"), cf.bold("--head"))
raise Exception("If --head is not passed in, --port is not "
"allowed.")
if redis_shard_ports is not None:
cli_logger.abort("`{}` should not be specified without `{}`.",
cf.bold("--redis-shard-ports"), cf.bold("--head"))
raise Exception("If --head is not passed in, --redis-shard-ports "
"is not allowed.")
if redis_address is None:
cli_logger.abort("`{}` is required unless starting with `{}`.",
cf.bold("--address"), cf.bold("--head"))
raise Exception("If --head is not passed in, --address must "
"be provided.")
if include_dashboard:
cli_logger.abort("`{}` should not be specified without `{}`.",
cf.bold("--include-dashboard"), cf.bold("--head"))
raise ValueError(
"If --head is not passed in, the --include-dashboard"
"flag is not relevant.")
# Wait for the Redis server to be started. And throw an exception if we
# can't connect to it.
services.wait_for_redis_to_start(
redis_address_ip, redis_address_port, password=redis_password)
# Create a Redis client.
redis_client = services.create_redis_client(
redis_address, password=redis_password)
# Check that the version information on this node matches the version
# information that the cluster was started with.
services.check_version_info(redis_client)
# Get the node IP address if one is not provided.
ray_params.update_if_absent(
node_ip_address=services.get_node_ip_address(redis_address))
cli_logger.labeled_value("Local node IP", ray_params.node_ip_address)
# Check that there aren't already Redis clients with the same IP
# address connected with this Redis instance. This raises an exception
# if the Redis server already has clients on this node.
check_no_existing_redis_clients(ray_params.node_ip_address,
redis_client)
ray_params.update(redis_address=redis_address)
node = ray.node.Node(
ray_params, head=False, shutdown_at_exit=block, spawn_reaper=block)
cli_logger.newline()
startup_msg = "Ray runtime started."
cli_logger.success("-" * len(startup_msg))
cli_logger.success(startup_msg)
cli_logger.success("-" * len(startup_msg))
cli_logger.newline()
cli_logger.print("To terminate the Ray runtime, run")
cli_logger.print(cf.bold(" ray stop"))
if block:
cli_logger.newline()
with cli_logger.group(cf.bold("--block")):
cli_logger.print(
"This command will now block until terminated by a signal.")
cli_logger.print(
"Running subprocesses are monitored and a message will be "
"printed if any of them terminate unexpectedly.")
while True:
time.sleep(1)
deceased = node.dead_processes()
if len(deceased) > 0:
cli_logger.newline()
cli_logger.error("Some Ray subprcesses exited unexpectedly:")
with cli_logger.indented():
for process_type, process in deceased:
cli_logger.error(
"{}",
cf.bold(str(process_type)),
_tags={"exit code": str(process.returncode)})
# shutdown_at_exit will handle cleanup.
cli_logger.newline()
cli_logger.error("Remaining processes will be killed.")
sys.exit(1)
@cli.command()
@click.option(
"-f",
"--force",
is_flag=True,
help="If set, ray will send SIGKILL instead of SIGTERM.")
@add_click_options(logging_options)
def stop(force, verbose, log_style, log_color):
"""Stop Ray processes manually on the local machine."""
cli_logger.configure(log_style, log_color, verbose)
# Note that raylet needs to exit before object store, otherwise
# it cannot exit gracefully.
is_linux = sys.platform.startswith("linux")
processes_to_kill = [
# The first element is the substring to filter.
# The second element, if True, is to filter ps results by command name
# (only the first 15 charactors of the executable name on Linux);
# if False, is to filter ps results by command with all its arguments.
# See STANDARD FORMAT SPECIFIERS section of
# http://man7.org/linux/man-pages/man1/ps.1.html
# about comm and args. This can help avoid killing non-ray processes.
# Format:
# Keyword to filter, filter by command (True)/filter by args (False)
["raylet", True],
["plasma_store", True],
["gcs_server", True],
["monitor.py", False],
["redis-server", False],
["default_worker.py", False], # Python worker.
["ray::", True], # Python worker. TODO(mehrdadn): Fix for Windows
["io.ray.runtime.runner.worker.DefaultWorker", False], # Java worker.
["log_monitor.py", False],
["reporter.py", False],
["dashboard.py", False],
["new_dashboard/agent.py", False],
["ray_process_reaper.py", False],
]
process_infos = []
for proc in psutil.process_iter(["name", "cmdline"]):
try:
process_infos.append((proc, proc.name(), proc.cmdline()))
except psutil.Error:
pass
total_found = 0
total_stopped = 0
for keyword, filter_by_cmd in processes_to_kill:
if filter_by_cmd and is_linux and len(keyword) > 15:
# getting here is an internal bug, so we do not use cli_logger
msg = ("The filter string should not be more than {} "
"characters. Actual length: {}. Filter: {}").format(
15, len(keyword), keyword)
raise ValueError(msg)
found = []
for candidate in process_infos:
proc, proc_cmd, proc_args = candidate
corpus = (proc_cmd
if filter_by_cmd else subprocess.list2cmdline(proc_args))
if keyword in corpus:
found.append(candidate)
for proc, proc_cmd, proc_args in found:
total_found += 1
proc_string = str(subprocess.list2cmdline(proc_args))
try:
if force:
proc.kill()
else:
# TODO(mehrdadn): On Windows, this is forceful termination.
# We don't want CTRL_BREAK_EVENT, because that would
# terminate the entire process group. What to do?
proc.terminate()
if force:
cli_logger.verbose("Killed `{}` {} ", cf.bold(proc_string),
cf.dimmed("(via SIGKILL)"))
else:
cli_logger.verbose("Send termination request to `{}` {}",
cf.bold(proc_string),
cf.dimmed("(via SIGTERM)"))
total_stopped += 1
except psutil.NoSuchProcess:
cli_logger.verbose(
"Attempted to stop `{}`, but process was already dead.",
cf.bold(proc_string))
pass
except (psutil.Error, OSError) as ex:
cli_logger.error("Could not terminate `{}` due to {}",
cf.bold(proc_string), str(ex))
if total_found == 0:
cli_logger.print("Did not find any active Ray processes.")
else:
if total_stopped == total_found:
cli_logger.success("Stopped all {} Ray processes.", total_stopped)
else:
cli_logger.warning(
"Stopped only {} out of {} Ray processes. "
"Set `{}` to see more details.", total_stopped, total_found,
cf.bold("-v"))
cli_logger.warning("Try running the command again, or use `{}`.",
cf.bold("--force"))
# TODO(maximsmol): we should probably block until the processes actually
# all died somehow
@cli.command()
@click.argument("cluster_config_file", required=True, type=str)
@click.option(
"--min-workers",
required=False,
type=int,
help="Override the configured min worker node count for the cluster.")
@click.option(
"--max-workers",
required=False,
type=int,
help="Override the configured max worker node count for the cluster.")
@click.option(
"--no-restart",
is_flag=True,
default=False,
help=("Whether to skip restarting Ray services during the update. "
"This avoids interrupting running jobs."))
@click.option(
"--restart-only",
is_flag=True,
default=False,
help=("Whether to skip running setup commands and only restart Ray. "
"This cannot be used with 'no-restart'."))
@click.option(
"--yes",
"-y",
is_flag=True,
default=False,
help="Don't ask for confirmation.")
@click.option(
"--cluster-name",
"-n",
required=False,
type=str,
help="Override the configured cluster name.")
@click.option(
"--no-config-cache",
is_flag=True,
default=False,
help="Disable the local cluster config cache.")
@click.option(
"--redirect-command-output",
is_flag=True,
default=False,
help="Whether to redirect command output to a file.")
@click.option(
"--use-login-shells/--use-normal-shells",
is_flag=True,
default=True,
help=("Ray uses login shells (bash --login -i) to run cluster commands "
"by default. If your workflow is compatible with normal shells, "
"this can be disabled for a better user experience."))
@add_click_options(logging_options)
def up(cluster_config_file, min_workers, max_workers, no_restart, restart_only,
yes, cluster_name, no_config_cache, redirect_command_output,
use_login_shells, log_style, log_color, verbose):
"""Create or update a Ray cluster."""
cli_logger.configure(log_style, log_color, verbose)
if restart_only or no_restart:
cli_logger.doassert(restart_only != no_restart,
"`{}` is incompatible with `{}`.",
cf.bold("--restart-only"), cf.bold("--no-restart"))
assert restart_only != no_restart, "Cannot set both 'restart_only' " \
"and 'no_restart' at the same time!"
if urllib.parse.urlparse(cluster_config_file).scheme in ("http", "https"):
try:
response = urllib.request.urlopen(cluster_config_file, timeout=5)
content = response.read()
file_name = cluster_config_file.split("/")[-1]
with open(file_name, "wb") as f:
f.write(content)
cluster_config_file = file_name
except urllib.error.HTTPError as e:
cli_logger.warning("{}", str(e))
cli_logger.warning(
"Could not download remote cluster configuration file.")
create_or_update_cluster(
config_file=cluster_config_file,
override_min_workers=min_workers,
override_max_workers=max_workers,
no_restart=no_restart,
restart_only=restart_only,
yes=yes,
override_cluster_name=cluster_name,
no_config_cache=no_config_cache,
redirect_command_output=redirect_command_output,
use_login_shells=use_login_shells)
@cli.command()
@click.argument("cluster_config_file", required=True, type=str)
@click.option(
"--yes",
"-y",
is_flag=True,
default=False,
help="Don't ask for confirmation.")
@click.option(
"--workers-only",
is_flag=True,
default=False,
help="Only destroy the workers.")
@click.option(
"--cluster-name",
"-n",
required=False,
type=str,
help="Override the configured cluster name.")
@click.option(
"--keep-min-workers",
is_flag=True,
default=False,
help="Retain the minimal amount of workers specified in the config.")
@add_click_options(logging_options)
def down(cluster_config_file, yes, workers_only, cluster_name,
keep_min_workers, log_style, log_color, verbose):
"""Tear down a Ray cluster."""
cli_logger.configure(log_style, log_color, verbose)
teardown_cluster(cluster_config_file, yes, workers_only, cluster_name,
keep_min_workers)
@cli.command(hidden=True)
@click.argument("cluster_config_file", required=True, type=str)
@click.option(
"--yes",
"-y",
is_flag=True,
default=False,
help="Don't ask for confirmation.")
@click.option(
"--hard",
is_flag=True,
default=False,
help="Terminates the node via node provider (defaults to a 'soft kill'"
" which terminates Ray but does not actually delete the instances).")
@click.option(
"--cluster-name",
"-n",
required=False,
type=str,
help="Override the configured cluster name.")
def kill_random_node(cluster_config_file, yes, hard, cluster_name):
"""Kills a random Ray node. For testing purposes only."""
click.echo("Killed node with IP " +
kill_node(cluster_config_file, yes, hard, cluster_name))
@cli.command()
@click.argument("cluster_config_file", required=True, type=str)
@click.option(
"--lines",
required=False,
default=100,
type=int,
help="Number of lines to tail.")
@click.option(
"--cluster-name",
"-n",
required=False,
type=str,
help="Override the configured cluster name.")
@add_click_options(logging_options)
def monitor(cluster_config_file, lines, cluster_name, log_style, log_color,
verbose):
"""Tails the autoscaler logs of a Ray cluster."""
cli_logger.configure(log_style, log_color, verbose)
monitor_cluster(cluster_config_file, lines, cluster_name)
@cli.command()
@click.argument("cluster_config_file", required=True, type=str)
@click.option(
"--start",
is_flag=True,
default=False,
help="Start the cluster if needed.")
@click.option(
"--screen", is_flag=True, default=False, help="Run the command in screen.")
@click.option(
"--tmux", is_flag=True, default=False, help="Run the command in tmux.")
@click.option(
"--cluster-name",
"-n",
required=False,
type=str,
help="Override the configured cluster name.")
@click.option(
"--no-config-cache",
is_flag=True,
default=False,
help="Disable the local cluster config cache.")
@click.option(
"--new", "-N", is_flag=True, help="Force creation of a new screen.")
@click.option(
"--port-forward",
"-p",
required=False,
multiple=True,
type=int,
help="Port to forward. Use this multiple times to forward multiple ports.")
@add_click_options(logging_options)
def attach(cluster_config_file, start, screen, tmux, cluster_name,
no_config_cache, new, port_forward, log_style, log_color, verbose):
"""Create or attach to a SSH session to a Ray cluster."""
cli_logger.configure(log_style, log_color, verbose)
port_forward = [(port, port) for port in list(port_forward)]
attach_cluster(
cluster_config_file,
start,
screen,
tmux,
cluster_name,
no_config_cache=no_config_cache,
new=new,
port_forward=port_forward)
@cli.command()
@click.argument("cluster_config_file", required=True, type=str)
@click.argument("source", required=False, type=str)
@click.argument("target", required=False, type=str)
@click.option(
"--cluster-name",
"-n",
required=False,
type=str,
help="Override the configured cluster name.")
@add_click_options(logging_options)
def rsync_down(cluster_config_file, source, target, cluster_name, log_style,
log_color, verbose):
"""Download specific files from a Ray cluster."""
cli_logger.configure(log_style, log_color, verbose)
rsync(cluster_config_file, source, target, cluster_name, down=True)
@cli.command()
@click.argument("cluster_config_file", required=True, type=str)
@click.argument("source", required=False, type=str)
@click.argument("target", required=False, type=str)
@click.option(
"--cluster-name",
"-n",
required=False,
type=str,
help="Override the configured cluster name.")
@click.option(
"--all-nodes",
"-A",
is_flag=True,
required=False,
help="Upload to all nodes (workers and head).")
@add_click_options(logging_options)
def rsync_up(cluster_config_file, source, target, cluster_name, all_nodes,
log_style, log_color, verbose):
"""Upload specific files to a Ray cluster."""
cli_logger.configure(log_style, log_color, verbose)
if all_nodes:
cli_logger.warning(
"WARNING: the `all_nodes` option is deprecated and will be "
"removed in the future. "
"Rsync to worker nodes is not reliable since workers may be "
"added during autoscaling. Please use the `file_mounts` "
"feature instead for consistent file sync in autoscaling clusters")
rsync(
cluster_config_file,
source,
target,
cluster_name,
down=False,
all_nodes=all_nodes)
@cli.command(context_settings={"ignore_unknown_options": True})
@click.argument("cluster_config_file", required=True, type=str)
@click.option(
"--stop",
is_flag=True,
default=False,
help="Stop the cluster after the command finishes running.")
@click.option(
"--start",
is_flag=True,
default=False,
help="Start the cluster if needed.")
@click.option(
"--screen",
is_flag=True,
default=False,
help="Run the command in a screen.")
@click.option(
"--tmux", is_flag=True, default=False, help="Run the command in tmux.")
@click.option(
"--cluster-name",
"-n",
required=False,
type=str,
help="Override the configured cluster name.")
@click.option(
"--no-config-cache",
is_flag=True,
default=False,
help="Disable the local cluster config cache.")
@click.option(
"--port-forward",
"-p",
required=False,
multiple=True,
type=int,
help="Port to forward. Use this multiple times to forward multiple ports.")
@click.argument("script", required=True, type=str)
@click.option(
"--args",
required=False,
type=str,
help="(deprecated) Use '-- --arg1 --arg2' for script args.")
@click.argument("script_args", nargs=-1)
@add_click_options(logging_options)
def submit(cluster_config_file, screen, tmux, stop, start, cluster_name,
no_config_cache, port_forward, script, args, script_args, log_style,
log_color, verbose):
"""Uploads and runs a script on the specified cluster.
The script is automatically synced to the following location:
os.path.join("~", os.path.basename(script))
Example:
>>> ray submit [CLUSTER.YAML] experiment.py -- --smoke-test
"""
cli_logger.configure(log_style, log_color, verbose)
cli_logger.doassert(not (screen and tmux),
"`{}` and `{}` are incompatible.", cf.bold("--screen"),
cf.bold("--tmux"))
cli_logger.doassert(
not (script_args and args),
"`{0}` and `{1}` are incompatible. Use only `{1}`.\n"
"Example: `{2}`", cf.bold("--args"), cf.bold("-- <args ...>"),
cf.bold("ray submit script.py -- --arg=123 --flag"))
assert not (screen and tmux), "Can specify only one of `screen` or `tmux`."
assert not (script_args and args), "Use -- --arg1 --arg2 for script args."
if args:
cli_logger.warning(
"`{}` is deprecated and will be removed in the future.",
cf.bold("--args"))
cli_logger.warning("Use `{}` instead. Example: `{}`.",
cf.bold("-- <args ...>"),
cf.bold("ray submit script.py -- --arg=123 --flag"))
cli_logger.newline()
if start:
create_or_update_cluster(
config_file=cluster_config_file,
override_min_workers=None,
override_max_workers=None,
no_restart=False,
restart_only=False,
yes=True,
override_cluster_name=cluster_name,
no_config_cache=no_config_cache,
redirect_command_output=False,
use_login_shells=True)
target = os.path.basename(script)
target = os.path.join("~", target)
rsync(
cluster_config_file,
script,
target,
cluster_name,
no_config_cache=no_config_cache,
down=False)
command_parts = ["python", target]
if script_args:
command_parts += list(script_args)
elif args is not None:
command_parts += [args]
port_forward = [(port, port) for port in list(port_forward)]
cmd = " ".join(command_parts)
exec_cluster(
cluster_config_file,
cmd=cmd,
run_env="docker",
screen=screen,
tmux=tmux,
stop=stop,
start=False,
override_cluster_name=cluster_name,
no_config_cache=no_config_cache,
port_forward=port_forward)
@cli.command()
@click.argument("cluster_config_file", required=True, type=str)
@click.argument("cmd", required=True, type=str)
@click.option(
"--run-env",
required=False,
type=click.Choice(RUN_ENV_TYPES),
default="auto",
help="Choose whether to execute this command in a container or directly on"
" the cluster head. Only applies when docker is configured in the YAML.")
@click.option(
"--stop",
is_flag=True,
default=False,
help="Stop the cluster after the command finishes running.")
@click.option(
"--start",
is_flag=True,
default=False,
help="Start the cluster if needed.")
@click.option(
"--screen",
is_flag=True,
default=False,
help="Run the command in a screen.")
@click.option(
"--tmux", is_flag=True, default=False, help="Run the command in tmux.")
@click.option(
"--cluster-name",
"-n",
required=False,
type=str,
help="Override the configured cluster name.")
@click.option(
"--no-config-cache",
is_flag=True,
default=False,
help="Disable the local cluster config cache.")
@click.option(
"--port-forward",
"-p",
required=False,
multiple=True,
type=int,
help="Port to forward. Use this multiple times to forward multiple ports.")
@add_click_options(logging_options)
def exec(cluster_config_file, cmd, run_env, screen, tmux, stop, start,
cluster_name, no_config_cache, port_forward, log_style, log_color,
verbose):
"""Execute a command via SSH on a Ray cluster."""
cli_logger.configure(log_style, log_color, verbose)
port_forward = [(port, port) for port in list(port_forward)]
exec_cluster(
cluster_config_file,
cmd=cmd,
run_env=run_env,
screen=screen,
tmux=tmux,
stop=stop,
start=start,
override_cluster_name=cluster_name,
no_config_cache=no_config_cache,
port_forward=port_forward)
@cli.command()
@click.argument("cluster_config_file", required=True, type=str)
@click.option(
"--cluster-name",
"-n",
required=False,
type=str,
help="Override the configured cluster name.")
def get_head_ip(cluster_config_file, cluster_name):
"""Return the head node IP of a Ray cluster."""
click.echo(get_head_node_ip(cluster_config_file, cluster_name))
@cli.command()
@click.argument("cluster_config_file", required=True, type=str)
@click.option(
"--cluster-name",
"-n",
required=False,
type=str,
help="Override the configured cluster name.")
def get_worker_ips(cluster_config_file, cluster_name):
"""Return the list of worker IPs of a Ray cluster."""
worker_ips = get_worker_node_ips(cluster_config_file, cluster_name)
click.echo("\n".join(worker_ips))
@cli.command()
def stack():
"""Take a stack dump of all Python workers on the local machine."""
COMMAND = """
pyspy=`which py-spy`
if [ ! -e "$pyspy" ]; then
echo "ERROR: Please 'pip install py-spy' first"
exit 1
fi
# Set IFS to iterate over lines instead of over words.
export IFS="
"
# Call sudo to prompt for password before anything has been printed.
sudo true
workers=$(
ps aux | grep -E ' ray_|default_worker.py' | grep -v grep
)
for worker in $workers; do
echo "Stack dump for $worker";
pid=`echo $worker | awk '{print $2}'`;
sudo $pyspy dump --pid $pid;
echo;
done
"""
subprocess.call(COMMAND, shell=True)
@cli.command()
def microbenchmark():
"""Run a local Ray microbenchmark on the current machine."""
from ray.ray_perf import main
main()
@cli.command()
@click.option(
"--address",
required=False,
type=str,
help="Override the redis address to connect to.")
def timeline(address):
"""Take a Chrome tracing timeline for a Ray cluster."""
if not address:
address = services.get_ray_address_to_use_or_die()
logger.info(f"Connecting to Ray instance at {address}.")
ray.init(address=address)
time = datetime.today().strftime("%Y-%m-%d_%H-%M-%S")
filename = os.path.join(ray.utils.get_user_temp_dir(),
f"ray-timeline-{time}.json")
ray.timeline(filename=filename)
size = os.path.getsize(filename)
logger.info(f"Trace file written to {filename} ({size} bytes).")
logger.info(
"You can open this with chrome://tracing in the Chrome browser.")
@cli.command()
@click.option(
"--address",
required=False,
type=str,
help="Override the address to connect to.")
@click.option(
"--redis_password",
required=False,
type=str,
default=ray_constants.REDIS_DEFAULT_PASSWORD,
help="Connect to ray with redis_password.")
def memory(address, redis_password):
"""Print object references held in a Ray cluster."""
if not address:
address = services.get_ray_address_to_use_or_die()
logger.info(f"Connecting to Ray instance at {address}.")
ray.init(address=address, _redis_password=redis_password)
print(ray.internal.internal_api.memory_summary())
@cli.command()
@click.option(
"--address",
required=False,
type=str,
help="Override the address to connect to.")
def status(address):
"""Print cluster status, including autoscaling info."""
if not address:
address = services.get_ray_address_to_use_or_die()
logger.info(f"Connecting to Ray instance at {address}.")
ray.init(address=address)
print(debug_status())
@cli.command(hidden=True)
@click.option(
"--address",
required=False,
type=str,
help="Override the address to connect to.")
def global_gc(address):
"""Trigger Python garbage collection on all cluster workers."""
if not address:
address = services.get_ray_address_to_use_or_die()
logger.info(f"Connecting to Ray instance at {address}.")
ray.init(address=address)
ray.internal.internal_api.global_gc()
print("Triggered gc.collect() on all workers.")
@cli.command()
@click.option("-v", "--verbose", is_flag=True)
@click.option(
"--dryrun",
is_flag=True,
help="Identifies the wheel but does not execute the installation.")
def install_nightly(verbose, dryrun):
"""Install the latest wheels for Ray.
This uses the same python environment as the one that Ray is currently
installed in. Make sure that there is no Ray processes on this
machine (ray stop) when running this command.
"""
raydir = os.path.abspath(os.path.dirname(ray.__file__))
all_wheels_path = os.path.join(raydir, "nightly-wheels.yaml")
wheels = None
if os.path.exists(all_wheels_path):
with open(all_wheels_path) as f:
wheels = yaml.safe_load(f)
if not wheels:
raise click.ClickException(
f"Wheels not found in '{all_wheels_path}'! "
"Please visit https://docs.ray.io/en/master/installation.html to "
"obtain the latest wheels.")
platform = sys.platform
py_version = "{0}.{1}".format(*sys.version_info[:2])
matching_wheel = None
for target_platform, wheel_map in wheels.items():
if verbose:
print(f"Evaluating os={target_platform}, python={list(wheel_map)}")
if platform.startswith(target_platform):
if py_version in wheel_map:
matching_wheel = wheel_map[py_version]
break
if verbose:
print("Not matched.")
if matching_wheel is None:
raise click.ClickException(
"Unable to identify a matching platform. "
"Please visit https://docs.ray.io/en/master/installation.html to "
"obtain the latest wheels.")
if dryrun:
print(f"Found wheel: {matching_wheel}")
else:
cmd = [sys.executable, "-m", "pip", "install", "-U", matching_wheel]
print(f"Running: {' '.join(cmd)}.")
subprocess.check_call(cmd)
def add_command_alias(command, name, hidden):
new_command = copy.deepcopy(command)
new_command.hidden = hidden
cli.add_command(new_command, name=name)
cli.add_command(dashboard)
cli.add_command(debug)
cli.add_command(start)
cli.add_command(stop)
cli.add_command(up)
add_command_alias(up, name="create_or_update", hidden=True)
cli.add_command(attach)
cli.add_command(exec)
add_command_alias(exec, name="exec_cmd", hidden=True)
add_command_alias(rsync_down, name="rsync_down", hidden=True)
add_command_alias(rsync_up, name="rsync_up", hidden=True)
cli.add_command(submit)
cli.add_command(down)
add_command_alias(down, name="teardown", hidden=True)
cli.add_command(kill_random_node)
add_command_alias(get_head_ip, name="get_head_ip", hidden=True)
cli.add_command(get_worker_ips)
cli.add_command(microbenchmark)
cli.add_command(stack)
cli.add_command(status)
cli.add_command(memory)
cli.add_command(global_gc)
cli.add_command(timeline)
cli.add_command(install_nightly)
try:
from ray.serve.scripts import serve_cli
cli.add_command(serve_cli)
except Exception as e:
logger.debug(f"Integrating ray serve command line tool failed with {e}")
def main():
return cli()
if __name__ == "__main__":
main()
| 34.548322 | 79 | 0.633176 |
4a1dd70e18f744e9f06b1a6aaf2c1a0aba24ce02 | 5,459 | py | Python | private/templates/skeleton/config.py | hitesh96db/eden | 8e1b22d7d4b92c0bce5b6172d57298949a2f0582 | [
"MIT"
] | null | null | null | private/templates/skeleton/config.py | hitesh96db/eden | 8e1b22d7d4b92c0bce5b6172d57298949a2f0582 | [
"MIT"
] | null | null | null | private/templates/skeleton/config.py | hitesh96db/eden | 8e1b22d7d4b92c0bce5b6172d57298949a2f0582 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
try:
# Python 2.7
from collections import OrderedDict
except:
# Python 2.6
from gluon.contrib.simplejson.ordered_dict import OrderedDict
from gluon import current
from gluon.storage import Storage
T = current.T
settings = current.deployment_settings
"""
Template settings
All settings which are to configure a specific template are located here
Deployers should ideally not need to edit any other files outside of their template folder
"""
# PrePopulate data
settings.base.prepopulate = ("skeleton", "demo/users")
# Theme (folder to use for views/layout.html)
settings.base.theme = "skeleton"
# Authentication settings
# Should users be allowed to register themselves?
#settings.security.self_registration = False
# Do new users need to verify their email address?
#settings.auth.registration_requires_verification = True
# Do new users need to be approved by an administrator prior to being able to login?
#settings.auth.registration_requires_approval = True
# Restrict the Location Selector to just certain countries
# NB This can also be over-ridden for specific contexts later
# e.g. Activities filtered to those of parent Project
#settings.gis.countries = ["US"]
# L10n settings
# Languages used in the deployment (used for Language Toolbar & GIS Locations)
# http://www.loc.gov/standards/iso639-2/php/code_list.php
#settings.L10n.languages = OrderedDict([
# ("ar", "العربية"),
# ("bs", "Bosanski"),
# ("en", "English"),
# ("fr", "Français"),
# ("de", "Deutsch"),
# ("el", "ελληνικά"),
# ("es", "Español"),
# ("it", "Italiano"),
# ("ja", "日本語"),
# ("km", "ភាសាខ្មែរ"),
# ("ko", "한국어"),
# ("ne", "नेपाली"), # Nepali
# ("prs", "دری"), # Dari
# ("ps", "پښتو"), # Pashto
# ("pt", "Português"),
# ("pt-br", "Português (Brasil)"),
# ("ru", "русский"),
# ("tet", "Tetum"),
# ("tl", "Tagalog"),
# ("ur", "اردو"),
# ("vi", "Tiếng Việt"),
# ("zh-cn", "中文 (简体)"),
# ("zh-tw", "中文 (繁體)"),
#])
# Default language for Language Toolbar (& GIS Locations in future)
#settings.L10n.default_language = "en"
# Uncomment to Hide the language toolbar
#settings.L10n.display_toolbar = False
# Security Policy
# http://eden.sahanafoundation.org/wiki/S3AAA#System-widePolicy
# 1: Simple (default): Global as Reader, Authenticated as Editor
# 2: Editor role required for Update/Delete, unless record owned by session
# 3: Apply Controller ACLs
# 4: Apply both Controller & Function ACLs
# 5: Apply Controller, Function & Table ACLs
# 6: Apply Controller, Function, Table ACLs and Entity Realm
# 7: Apply Controller, Function, Table ACLs and Entity Realm + Hierarchy
# 8: Apply Controller, Function, Table ACLs, Entity Realm + Hierarchy and Delegations
#
#settings.security.policy = 7 # Organisation-ACLs
# Comment/uncomment modules here to disable/enable them
# Modules menu is defined in modules/eden/menu.py
settings.modules = OrderedDict([
# Core modules which shouldn't be disabled
("default", Storage(
name_nice = T("Home"),
restricted = False, # Use ACLs to control access to this module
access = None, # All Users (inc Anonymous) can see this module in the default menu & access the controller
module_type = None # This item is not shown in the menu
)),
("admin", Storage(
name_nice = T("Administration"),
#description = "Site Administration",
restricted = True,
access = "|1|", # Only Administrators can see this module in the default menu & access the controller
module_type = None # This item is handled separately for the menu
)),
("appadmin", Storage(
name_nice = T("Administration"),
#description = "Site Administration",
restricted = True,
module_type = None # No Menu
)),
("errors", Storage(
name_nice = T("Ticket Viewer"),
#description = "Needed for Breadcrumbs",
restricted = False,
module_type = None # No Menu
)),
#("sync", Storage(
# name_nice = T("Synchronization"),
# #description = "Synchronization",
# restricted = True,
# access = "|1|", # Only Administrators can see this module in the default menu & access the controller
# module_type = None # This item is handled separately for the menu
#)),
#("tour", Storage(
# name_nice = T("Guided Tour Functionality"),
# module_type = None,
#)),
#("translate", Storage(
# name_nice = T("Translation Functionality"),
# #description = "Selective translation of strings based on module.",
# module_type = None,
#)),
("gis", Storage(
name_nice = T("Map"),
#description = "Situation Awareness & Geospatial Analysis",
restricted = True,
module_type = 6, # 6th item in the menu
)),
("pr", Storage(
name_nice = T("Person Registry"),
#description = "Central point to record details on People",
restricted = True,
access = "|1|", # Only Administrators can see this module in the default menu (access to controller is possible to all still)
module_type = 10
)),
("org", Storage(
name_nice = T("Organizations"),
#description = 'Lists "who is doing what & where". Allows relief agencies to coordinate their activities',
restricted = True,
module_type = 1
)),
])
| 35.448052 | 137 | 0.644807 |
4a1dd724f7bae2c03e874e52ed6ab77c8adef5c2 | 10,872 | py | Python | train.py | Archie7777/spectral-graph-augmentation | c3f22094b79a61357be55aa284d5f5a7df43928d | [
"MIT"
] | null | null | null | train.py | Archie7777/spectral-graph-augmentation | c3f22094b79a61357be55aa284d5f5a7df43928d | [
"MIT"
] | null | null | null | train.py | Archie7777/spectral-graph-augmentation | c3f22094b79a61357be55aa284d5f5a7df43928d | [
"MIT"
] | null | null | null | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from sklearn.model_selection import GridSearchCV, StratifiedKFold
from process.dataset import load
class GCNLayer(nn.Module):
def __init__(self, in_ft, out_ft, bias=True):
super(GCNLayer, self).__init__()
self.fc = nn.Linear(in_ft, out_ft, bias=False)
self.act = nn.PReLU()
if bias:
self.bias = nn.Parameter(torch.FloatTensor(out_ft))
self.bias.data.fill_(0.0)
else:
self.register_parameter('bias', None)
for m in self.modules():
self.weights_init(m)
def weights_init(self, m):
if isinstance(m, nn.Linear):
torch.nn.init.xavier_uniform_(m.weight.data)
if m.bias is not None:
m.bias.data.fill_(0.0)
def forward(self, feat, adj):
feat = self.fc(feat)
out = torch.bmm(adj, feat)
if self.bias is not None:
out += self.bias
return self.act(out)
class GCN(nn.Module):
def __init__(self, in_ft, out_ft, num_layers):
super(GCN, self).__init__()
n_h = out_ft
self.layers = []
self.num_layers = num_layers
self.layers.append(GCNLayer(in_ft, n_h).cuda())
for __ in range(num_layers - 1):
self.layers.append(GCNLayer(n_h, n_h).cuda())
def forward(self, feat, adj, mask):
h_1 = self.layers[0](feat, adj)
h_1g = torch.sum(h_1, 1)
for idx in range(self.num_layers - 1):
h_1 = self.layers[idx + 1](h_1, adj)
h_1g = torch.cat((h_1g, torch.sum(h_1, 1)), -1)
return h_1, h_1g
class MLP(nn.Module):
def __init__(self, in_ft, out_ft):
super(MLP, self).__init__()
self.ffn = nn.Sequential(
nn.Linear(in_ft, out_ft),
nn.PReLU(),
nn.Linear(out_ft, out_ft),
nn.PReLU(),
nn.Linear(out_ft, out_ft),
nn.PReLU()
)
self.linear_shortcut = nn.Linear(in_ft, out_ft)
def forward(self, x):
return self.ffn(x) + self.linear_shortcut(x)
class Model(nn.Module):
def __init__(self, n_in, n_h, num_layers):
super(Model, self).__init__()
self.mlp1 = MLP(1 * n_h, n_h)
self.mlp2 = MLP(num_layers * n_h, n_h)
self.gnn1 = GCN(n_in, n_h, num_layers)
self.gnn2 = GCN(n_in, n_h, num_layers)
def forward(self, adj, diff, feat, mask):
lv1, gv1 = self.gnn1(feat, adj, mask)
lv2, gv2 = self.gnn2(feat, diff, mask)
lv1 = self.mlp1(lv1)
lv2 = self.mlp1(lv2)
gv1 = self.mlp2(gv1)
gv2 = self.mlp2(gv2)
return lv1, gv1, lv2, gv2
def embed(self, feat, adj, diff, mask):
__, gv1, __, gv2 = self.forward(adj, diff, feat, mask)
return (gv1 + gv2).detach()
# Borrowed from https://github.com/fanyun-sun/InfoGraph
def get_positive_expectation(p_samples, measure, average=True):
"""Computes the positive part of a divergence / difference.
Args:
p_samples: Positive samples.
measure: Measure to compute for.
average: Average the result over samples.
Returns:
torch.Tensor
"""
log_2 = np.log(2.)
if measure == 'GAN':
Ep = - F.softplus(-p_samples)
elif measure == 'JSD':
Ep = log_2 - F.softplus(- p_samples)
elif measure == 'X2':
Ep = p_samples ** 2
elif measure == 'KL':
Ep = p_samples + 1.
elif measure == 'RKL':
Ep = -torch.exp(-p_samples)
elif measure == 'DV':
Ep = p_samples
elif measure == 'H2':
Ep = 1. - torch.exp(-p_samples)
elif measure == 'W1':
Ep = p_samples
if average:
return Ep.mean()
else:
return Ep
# Borrowed from https://github.com/fanyun-sun/InfoGraph
def get_negative_expectation(q_samples, measure, average=True):
"""Computes the negative part of a divergence / difference.
Args:
q_samples: Negative samples.
measure: Measure to compute for.
average: Average the result over samples.
Returns:
torch.Tensor
"""
log_2 = np.log(2.)
if measure == 'GAN':
Eq = F.softplus(-q_samples) + q_samples
elif measure == 'JSD':
Eq = F.softplus(-q_samples) + q_samples - log_2
elif measure == 'X2':
Eq = -0.5 * ((torch.sqrt(q_samples ** 2) + 1.) ** 2)
elif measure == 'KL':
Eq = torch.exp(q_samples)
elif measure == 'RKL':
Eq = q_samples - 1.
elif measure == 'H2':
Eq = torch.exp(q_samples) - 1.
elif measure == 'W1':
Eq = q_samples
if average:
return Eq.mean()
else:
return Eq
# Borrowed from https://github.com/fanyun-sun/InfoGraph
def local_global_loss_(l_enc, g_enc, batch, measure, mask):
'''
Args:
l: Local feature map.
g: Global features.
measure: Type of f-divergence. For use with mode `fd`
mode: Loss mode. Fenchel-dual `fd`, NCE `nce`, or Donsker-Vadadhan `dv`.
Returns:
torch.Tensor: Loss.
'''
num_graphs = g_enc.shape[0]
num_nodes = l_enc.shape[0]
max_nodes = num_nodes // num_graphs
pos_mask = torch.zeros((num_nodes, num_graphs)).cuda()
neg_mask = torch.ones((num_nodes, num_graphs)).cuda()
msk = torch.ones((num_nodes, num_graphs)).cuda()
for nodeidx, graphidx in enumerate(batch):
pos_mask[nodeidx][graphidx] = 1.
neg_mask[nodeidx][graphidx] = 0.
for idx, m in enumerate(mask):
msk[idx * max_nodes + m: idx * max_nodes + max_nodes, idx] = 0.
res = torch.mm(l_enc, g_enc.t()) * msk
E_pos = get_positive_expectation(res * pos_mask, measure, average=False).sum()
E_pos = E_pos / num_nodes
E_neg = get_negative_expectation(res * neg_mask, measure, average=False).sum()
E_neg = E_neg / (num_nodes * (num_graphs - 1))
return E_neg - E_pos
def global_global_loss_(g1_enc, g2_enc, measure):
'''
Args:
l: Local feature map.
g: Global features.
measure: Type of f-divergence. For use with mode `fd`
mode: Loss mode. Fenchel-dual `fd`, NCE `nce`, or Donsker-Vadadhan `dv`.
Returns:
torch.Tensor: Loss.
'''
num_graphs = g1_enc.shape[0]
pos_mask = torch.zeros((num_graphs, num_graphs)).cuda()
neg_mask = torch.ones((num_graphs, num_graphs)).cuda()
for graphidx in range(num_graphs):
pos_mask[graphidx][graphidx] = 1.
neg_mask[graphidx][graphidx] = 0.
res = torch.mm(g1_enc, g2_enc.t())
E_pos = get_positive_expectation(res * pos_mask, measure, average=False).sum()
E_pos = E_pos / num_graphs
E_neg = get_negative_expectation(res * neg_mask, measure, average=False).sum()
E_neg = E_neg / (num_graphs * (num_graphs - 1))
return E_neg - E_pos
def train(dataset, gpu, num_layer=4, epoch=40, batch=64):
nb_epochs = epoch
batch_size = batch
patience = 20
lr = 0.001
l2_coef = 0.0
hid_units = 512
adj, diff, feat, labels, num_nodes = load(dataset)
feat = torch.FloatTensor(feat).cuda()
diff = torch.FloatTensor(diff).cuda()
adj = torch.FloatTensor(adj).cuda()
labels = torch.LongTensor(labels).cuda()
ft_size = feat[0].shape[1]
max_nodes = feat[0].shape[0]
model = Model(ft_size, hid_units, num_layer)
optimiser = torch.optim.Adam(model.parameters(), lr=lr, weight_decay=l2_coef)
model.cuda()
cnt_wait = 0
best = 1e9
itr = (adj.shape[0] // batch_size) + 1
for epoch in range(nb_epochs):
epoch_loss = 0.0
train_idx = np.arange(adj.shape[0])
np.random.shuffle(train_idx)
for idx in range(0, len(train_idx), batch_size):
model.train()
optimiser.zero_grad()
batch = train_idx[idx: idx + batch_size]
mask = num_nodes[idx: idx + batch_size]
lv1, gv1, lv2, gv2 = model(adj[batch], diff[batch], feat[batch], mask)
lv1 = lv1.view(batch.shape[0] * max_nodes, -1)
lv2 = lv2.view(batch.shape[0] * max_nodes, -1)
batch = torch.LongTensor(np.repeat(np.arange(batch.shape[0]), max_nodes)).cuda()
loss1 = local_global_loss_(lv1, gv2, batch, 'JSD', mask)
loss2 = local_global_loss_(lv2, gv1, batch, 'JSD', mask)
# loss3 = global_global_loss_(gv1, gv2, 'JSD')
loss = loss1 + loss2 #+ loss3
epoch_loss += loss
loss.backward()
optimiser.step()
epoch_loss /= itr
# print('Epoch: {0}, Loss: {1:0.4f}'.format(epoch, epoch_loss))
if epoch_loss < best:
best = epoch_loss
best_t = epoch
cnt_wait = 0
torch.save(model.state_dict(), f'{dataset}-{gpu}.pkl')
else:
cnt_wait += 1
if cnt_wait == patience:
break
model.load_state_dict(torch.load(f'{dataset}-{gpu}.pkl'))
features = feat.cuda()
adj = adj.cuda()
diff = diff.cuda()
labels = labels.cuda()
embeds = model.embed(features, adj, diff, num_nodes)
x = embeds.cpu().numpy()
y = labels.cpu().numpy()
from sklearn.svm import LinearSVC
from sklearn.metrics import accuracy_score
params = {'C': [0.001, 0.01, 0.1, 1, 10, 100, 1000]}
kf = StratifiedKFold(n_splits=10, shuffle=True, random_state=None)
accuracies = []
for train_index, test_index in kf.split(x, y):
x_train, x_test = x[train_index], x[test_index]
y_train, y_test = y[train_index], y[test_index]
classifier = GridSearchCV(LinearSVC(), params, cv=5, scoring='accuracy', verbose=0)
classifier.fit(x_train, y_train)
accuracies.append(accuracy_score(y_test, classifier.predict(x_test)))
print(np.mean(accuracies), np.std(accuracies))
if __name__ == '__main__':
import warnings
warnings.filterwarnings("ignore")
gpu = 0
torch.cuda.set_device(gpu)
layers = [2, 8, 12]
batch = [32, 64, 128, 256]
epoch = [20, 40, 100]
ds = ['MUTAG'] # , 'PTC_MR', 'IMDB-BINARY', 'IMDB-MULTI', 'REDDIT-BINARY', 'REDDIT-MULTI-5K']
seeds = [123, 132, 321, 312, 231]
for d in ds:
print(f'####################{d}####################')
for l in layers:
for b in batch:
for e in epoch:
for i in range(5):
seed = seeds[i]
torch.manual_seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
np.random.seed(seed)
print(f'Dataset: {d}, Layer:{l}, Batch: {b}, Epoch: {e}, Seed: {seed}')
train(d, gpu, l, e, b)
print('################################################') | 31.331412 | 98 | 0.578734 |
4a1dd72afb04c845062ab9e5ba4b6a9ef729cfd4 | 3,423 | py | Python | bitmovin_api_sdk/encoding/filters/audio_mix/audio_mix_api.py | hofmannben/bitmovin-api-sdk-python | 71aae5cd8a31aa0ad54ca07a6f546a624e8686a9 | [
"MIT"
] | null | null | null | bitmovin_api_sdk/encoding/filters/audio_mix/audio_mix_api.py | hofmannben/bitmovin-api-sdk-python | 71aae5cd8a31aa0ad54ca07a6f546a624e8686a9 | [
"MIT"
] | 1 | 2020-07-06T07:13:43.000Z | 2020-07-06T07:13:43.000Z | bitmovin_api_sdk/encoding/filters/audio_mix/audio_mix_api.py | hofmannben/bitmovin-api-sdk-python | 71aae5cd8a31aa0ad54ca07a6f546a624e8686a9 | [
"MIT"
] | 1 | 2020-07-06T07:07:26.000Z | 2020-07-06T07:07:26.000Z | # coding: utf-8
from __future__ import absolute_import
from bitmovin_api_sdk.common import BaseApi, BitmovinApiLoggerBase
from bitmovin_api_sdk.common.poscheck import poscheck_except
from bitmovin_api_sdk.models.audio_mix_filter import AudioMixFilter
from bitmovin_api_sdk.models.bitmovin_response import BitmovinResponse
from bitmovin_api_sdk.models.response_envelope import ResponseEnvelope
from bitmovin_api_sdk.models.response_error import ResponseError
from bitmovin_api_sdk.encoding.filters.audio_mix.customdata.customdata_api import CustomdataApi
from bitmovin_api_sdk.encoding.filters.audio_mix.audio_mix_filter_list_query_params import AudioMixFilterListQueryParams
class AudioMixApi(BaseApi):
@poscheck_except(2)
def __init__(self, api_key, tenant_org_id=None, base_url=None, logger=None):
# type: (str, str, str, BitmovinApiLoggerBase) -> None
super(AudioMixApi, self).__init__(
api_key=api_key,
tenant_org_id=tenant_org_id,
base_url=base_url,
logger=logger
)
self.customdata = CustomdataApi(
api_key=api_key,
tenant_org_id=tenant_org_id,
base_url=base_url,
logger=logger
)
def create(self, audio_mix_filter, **kwargs):
# type: (AudioMixFilter, dict) -> AudioMixFilter
"""Create Audio Mix Filter
:param audio_mix_filter: The Audio Mix Filter to be created
:type audio_mix_filter: AudioMixFilter, required
:return: Audio Mix configuration
:rtype: AudioMixFilter
"""
return self.api_client.post(
'/encoding/filters/audio-mix',
audio_mix_filter,
type=AudioMixFilter,
**kwargs
)
def delete(self, filter_id, **kwargs):
# type: (string_types, dict) -> BitmovinResponse
"""Delete Audio Mix Filter
:param filter_id: Id of the Audio Mix configuration.
:type filter_id: string_types, required
:return: Id of the Audio Mix configuration
:rtype: BitmovinResponse
"""
return self.api_client.delete(
'/encoding/filters/audio-mix/{filter_id}',
path_params={'filter_id': filter_id},
type=BitmovinResponse,
**kwargs
)
def get(self, filter_id, **kwargs):
# type: (string_types, dict) -> AudioMixFilter
"""Audio Mix Filter Details
:param filter_id: Id of the Audio Mix configuration.
:type filter_id: string_types, required
:return: Audio Mix configuration
:rtype: AudioMixFilter
"""
return self.api_client.get(
'/encoding/filters/audio-mix/{filter_id}',
path_params={'filter_id': filter_id},
type=AudioMixFilter,
**kwargs
)
def list(self, query_params=None, **kwargs):
# type: (AudioMixFilterListQueryParams, dict) -> AudioMixFilter
"""List Audio Mix Filters
:param query_params: Query parameters
:type query_params: AudioMixFilterListQueryParams
:return: List of Audio Mix configurations
:rtype: AudioMixFilter
"""
return self.api_client.get(
'/encoding/filters/audio-mix',
query_params=query_params,
pagination_response=True,
type=AudioMixFilter,
**kwargs
)
| 33.558824 | 120 | 0.654689 |
4a1dd736b4349772465a4b26815288c5fd175573 | 2,317 | py | Python | modelexp/models/sas/_superball.py | DomiDre/modelexp | 1ec25f71e739dac27716f9a8637fa6ab067499b9 | [
"MIT"
] | null | null | null | modelexp/models/sas/_superball.py | DomiDre/modelexp | 1ec25f71e739dac27716f9a8637fa6ab067499b9 | [
"MIT"
] | null | null | null | modelexp/models/sas/_superball.py | DomiDre/modelexp | 1ec25f71e739dac27716f9a8637fa6ab067499b9 | [
"MIT"
] | null | null | null | from modelexp.models.sas import SAXSModel
from fortSAS import superball
from numpy.polynomial.hermite import hermgauss
from numpy.polynomial.legendre import leggauss
class Superball(SAXSModel):
def initParameters(self):
self.params.add('r', 100)
self.params.add('pVal', 2.3)
self.params.add('sldCore', 40e-6)
self.params.add('sldSolvent', 10e-6)
self.params.add('sigR', 0.)
self.params.add('i0', 1)
self.params.add('bg', 1e-6)
self.params.add('orderHermite', 20)
self.params.add('orderLegendre', 20)
self.addConstantParam('orderHermite')
self.addConstantParam('orderLegendre')
def initMagneticParameters(self):
self.params.add('magSldCore', 5e-6, min=0)
self.params.add('magSldSolvent', 0, vary=False)
self.addConstantParam('magSldSolvent')
def calcModel(self):
self.x_herm, self.w_herm = hermgauss(int(self.params['orderHermite']))
self.x_leg, self.w_leg = leggauss(int(self.params['orderLegendre']))
self.I = self.params['i0'] * superball.formfactor(
self.q,
self.params['r'],
self.params['pVal'],
self.params['sldCore'],
self.params['sldSolvent'],
self.params['sigR'],
self.x_herm, self.w_herm, self.x_leg, self.w_leg
) + self.params['bg']
self.r, self.sld = superball.sld(
self.params['r'],
self.params['sldCore'],
self.params['sldSolvent']
)
def calcMagneticModel(self):
self.x_herm, self.w_herm = hermgauss(int(self.params['orderHermite']))
self.x_leg, self.w_leg = leggauss(int(self.params['orderLegendre']))
self.I = self.params['i0'] * superball.magnetic_formfactor(
self.q,
self.params['r'],
self.params['pVal'],
self.params['sldCore'],
self.params['sldSolvent'],
self.params['sigR'],
self.params['magSldCore'],
self.params['magSldSolvent'],
self.params['xi'],
self.params['sin2alpha'],
self.params['polarization'],
self.x_herm, self.w_herm, self.x_leg, self.w_leg
) + self.params['bg']
self.r, self.sld = superball.sld(
self.params['r'],
self.params['sldCore'],
self.params['sldSolvent']
)
self.rMag, self.sldMag = superball.sld(
self.params['r'],
self.params['magSldCore'],
self.params['magSldSolvent'],
)
| 30.090909 | 74 | 0.644799 |
4a1dd9111d2e81b088f482dcca110cedf05c1162 | 5,832 | py | Python | pywraps/py_kernwin_plgform.py | diamondo25/src | 15b9ab2535222e492cd21b8528c27f763fb799d6 | [
"BSD-3-Clause"
] | 2 | 2019-07-08T11:58:27.000Z | 2019-07-08T13:23:57.000Z | pywraps/py_kernwin_plgform.py | Bia10/src | 15b9ab2535222e492cd21b8528c27f763fb799d6 | [
"BSD-3-Clause"
] | null | null | null | pywraps/py_kernwin_plgform.py | Bia10/src | 15b9ab2535222e492cd21b8528c27f763fb799d6 | [
"BSD-3-Clause"
] | null | null | null | from __future__ import print_function
#<pycode(py_kernwin_plgform)>
import sys
class PluginForm(object):
"""
PluginForm class.
This form can be used to host additional controls. Please check the PyQt example.
"""
WOPN_MDI = 0x01 # no-op
WOPN_TAB = 0x02
"""attached by default to a tab"""
WOPN_RESTORE = 0x04
"""
if the widget is the only widget in a floating area when
it is closed, remember that area's geometry. The next
time that widget is created as floating (i.e., no WOPN_TAB)
its geometry will be restored (e.g., "Execute script"
"""
WOPN_ONTOP = 0x08 # no-op
WOPN_MENU = 0x10 # no-op
WOPN_CENTERED = 0x20 # no-op
WOPN_PERSIST = 0x40
"""form will persist until explicitly closed with Close()"""
WOPN_CREATE_ONLY = {}
def __init__(self):
"""
"""
self.__clink__ = _ida_kernwin.plgform_new()
def Show(self, caption, options=0):
"""
Creates the form if not was not created or brings to front if it was already created
@param caption: The form caption
@param options: One of PluginForm.WOPN_ constants
"""
if options == self.WOPN_CREATE_ONLY:
options = -1
else:
options |= PluginForm.WOPN_TAB|PluginForm.WOPN_RESTORE
return _ida_kernwin.plgform_show(self.__clink__, self, caption, options)
@staticmethod
def _ensure_widget_deps(ctx):
for key, modname in [("sip", "sip"), ("QtWidgets", "PyQt5.QtWidgets")]:
if not hasattr(ctx, key):
print("Note: importing '%s' module into %s" % (key, ctx))
import importlib
setattr(ctx, key, importlib.import_module(modname))
@staticmethod
def TWidgetToPyQtWidget(form, ctx = sys.modules['__main__']):
"""
Convert a TWidget* to a QWidget to be used by PyQt
@param ctx: Context. Reference to a module that already imported SIP and QtWidgets modules
"""
if type(form).__name__ == "SwigPyObject":
ptr_l = long(form)
else:
ptr_l = form
PluginForm._ensure_widget_deps(ctx)
vptr = ctx.sip.voidptr(ptr_l)
return ctx.sip.wrapinstance(vptr.__int__(), ctx.QtWidgets.QWidget)
FormToPyQtWidget = TWidgetToPyQtWidget
@staticmethod
def QtWidgetToTWidget(w, ctx = sys.modules['__main__']):
"""
Convert a QWidget to a TWidget* to be used by IDA
@param ctx: Context. Reference to a module that already imported SIP and QtWidgets modules
"""
PluginForm._ensure_widget_deps(ctx)
as_long = long(ctx.sip.unwrapinstance(w))
return TWidget__from_ptrval__(as_long)
@staticmethod
def TWidgetToPySideWidget(form, ctx = sys.modules['__main__']):
"""
Use this method to convert a TWidget* to a QWidget to be used by PySide
@param ctx: Context. Reference to a module that already imported QtWidgets module
"""
if form is None:
return None
if type(form).__name__ == "SwigPyObject":
# Since 'form' is a SwigPyObject, we first need to convert it to a PyCObject.
# However, there's no easy way of doing it, so we'll use a rather brutal approach:
# converting the SwigPyObject to a 'long' (will go through 'SwigPyObject_long',
# that will return the pointer's value as a long), and then convert that value
# back to a pointer into a PyCObject.
ptr_l = long(form)
from ctypes import pythonapi, c_void_p, py_object
pythonapi.PyCObject_FromVoidPtr.restype = py_object
pythonapi.PyCObject_AsVoidPtr.argtypes = [c_void_p, c_void_p]
form = pythonapi.PyCObject_FromVoidPtr(ptr_l, 0)
return ctx.QtGui.QWidget.FromCObject(form)
FormToPySideWidget = TWidgetToPySideWidget
def OnCreate(self, form):
"""
This event is called when the plugin form is created.
The programmer should populate the form when this event is triggered.
@return: None
"""
pass
def OnClose(self, form):
"""
Called when the plugin form is closed
@return: None
"""
pass
def Close(self, options):
"""
Closes the form.
@param options: Close options (WCLS_SAVE, WCLS_NO_CONTEXT, ...)
@return: None
"""
return _ida_kernwin.plgform_close(self.__clink__, options)
def GetWidget(self):
"""
Return the TWidget underlying this view.
@return: The TWidget underlying this view, or None.
"""
return _ida_kernwin.plgform_get_widget(self.__clink__)
WCLS_SAVE = 0x1
"""Save state in desktop config"""
WCLS_NO_CONTEXT = 0x2
"""Don't change the current context (useful for toolbars)"""
WCLS_DONT_SAVE_SIZE = 0x4
"""Don't save size of the window"""
WCLS_CLOSE_LATER = 0x8
"""This flag should be used when Close() is called from an event handler"""
#</pycode(py_kernwin_plgform)>
plg = PluginForm()
plg.Show("This is it")
#<pycode_BC695(py_kernwin_plgform)>
PluginForm.FORM_MDI = PluginForm.WOPN_MDI
PluginForm.FORM_TAB = PluginForm.WOPN_TAB
PluginForm.FORM_RESTORE = PluginForm.WOPN_RESTORE
PluginForm.FORM_ONTOP = PluginForm.WOPN_ONTOP
PluginForm.FORM_MENU = PluginForm.WOPN_MENU
PluginForm.FORM_CENTERED = PluginForm.WOPN_CENTERED
PluginForm.FORM_PERSIST = PluginForm.WOPN_PERSIST
PluginForm.FORM_SAVE = PluginForm.WCLS_SAVE
PluginForm.FORM_NO_CONTEXT = PluginForm.WCLS_NO_CONTEXT
PluginForm.FORM_DONT_SAVE_SIZE = PluginForm.WCLS_DONT_SAVE_SIZE
PluginForm.FORM_CLOSE_LATER = PluginForm.WCLS_CLOSE_LATER
#</pycode_BC695(py_kernwin_plgform)>
| 32.4 | 98 | 0.650034 |
4a1dd972e7c436062cc4ea5e1615cb7d629ef3bf | 9,282 | py | Python | openstack_dashboard/dashboards/admin/overview/tests.py | maofutian/horizon | dab92e7d2f576caea8f81c8e22a516fb45633794 | [
"Apache-2.0"
] | null | null | null | openstack_dashboard/dashboards/admin/overview/tests.py | maofutian/horizon | dab92e7d2f576caea8f81c8e22a516fb45633794 | [
"Apache-2.0"
] | null | null | null | openstack_dashboard/dashboards/admin/overview/tests.py | maofutian/horizon | dab92e7d2f576caea8f81c8e22a516fb45633794 | [
"Apache-2.0"
] | null | null | null | # Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
from django.core.urlresolvers import reverse
from django import http
from django.utils import encoding
from django.utils import timezone
from mox import IsA # noqa
from horizon.templatetags import sizeformat
from openstack_dashboard import api
from openstack_dashboard.test import helpers as test
from openstack_dashboard import usage
INDEX_URL = reverse('horizon:project:overview:index')
class UsageViewTests(test.BaseAdminViewTests):
def _stub_api_calls(self, nova_stu_enabled):
self.mox.StubOutWithMock(api.nova, 'usage_list')
self.mox.StubOutWithMock(api.nova, 'tenant_absolute_limits')
self.mox.StubOutWithMock(api.nova, 'extension_supported')
self.mox.StubOutWithMock(api.keystone, 'tenant_list')
self.mox.StubOutWithMock(api.neutron, 'is_extension_supported')
self.mox.StubOutWithMock(api.network, 'floating_ip_supported')
self.mox.StubOutWithMock(api.network, 'tenant_floating_ip_list')
self.mox.StubOutWithMock(api.network, 'security_group_list')
self.mox.StubOutWithMock(api.cinder, 'tenant_absolute_limits')
api.nova.extension_supported(
'SimpleTenantUsage', IsA(http.HttpRequest)) \
.AndReturn(nova_stu_enabled)
def test_usage(self):
self._test_usage(nova_stu_enabled=True)
def test_usage_disabled(self):
self._test_usage(nova_stu_enabled=False)
def test_usage_with_deleted_tenant(self):
self._test_usage(tenant_deleted=True)
def _test_usage(self, nova_stu_enabled=True, tenant_deleted=False):
self._stub_api_calls(nova_stu_enabled)
api.nova.extension_supported(
'SimpleTenantUsage', IsA(http.HttpRequest)) \
.AndReturn(nova_stu_enabled)
now = timezone.now()
usage_list = [api.nova.NovaUsage(u) for u in self.usages.list()]
if tenant_deleted:
api.keystone.tenant_list(IsA(http.HttpRequest)) \
.AndReturn([[self.tenants.first()], False])
else:
api.keystone.tenant_list(IsA(http.HttpRequest)) \
.AndReturn([self.tenants.list(), False])
if nova_stu_enabled:
api.nova.usage_list(IsA(http.HttpRequest),
datetime.datetime(now.year,
now.month,
1, 0, 0, 0, 0),
datetime.datetime(now.year,
now.month,
now.day, 23, 59, 59, 0)) \
.AndReturn(usage_list)
api.nova.tenant_absolute_limits(IsA(http.HttpRequest)) \
.AndReturn(self.limits['absolute'])
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'security-group').AndReturn(True)
api.network.floating_ip_supported(IsA(http.HttpRequest)) \
.AndReturn(True)
api.network.tenant_floating_ip_list(IsA(http.HttpRequest)) \
.AndReturn(self.floating_ips.list())
api.network.security_group_list(IsA(http.HttpRequest)) \
.AndReturn(self.q_secgroups.list())
api.cinder.tenant_absolute_limits(IsA(http.HttpRequest)) \
.AndReturn(self.cinder_limits['absolute'])
self.mox.ReplayAll()
res = self.client.get(reverse('horizon:admin:overview:index'))
self.assertTemplateUsed(res, 'admin/overview/usage.html')
self.assertTrue(isinstance(res.context['usage'], usage.GlobalUsage))
self.assertEqual(nova_stu_enabled,
res.context['simple_tenant_usage_enabled'])
usage_table = encoding.smart_str(u'''
<tr class="" data-object-id="1" id="global_usage__row__1">
<td class="sortable normal_column">test_tenant</td>
<td class="sortable normal_column">%s</td>
<td class="sortable normal_column">%s</td>
<td class="sortable normal_column">%s</td>
<td class="sortable normal_column">%.2f</td>
<td class="sortable normal_column">%.2f</td>
</tr>
''' % (usage_list[0].vcpus,
sizeformat.diskgbformat(usage_list[0].disk_gb_hours),
sizeformat.mbformat(usage_list[0].memory_mb),
usage_list[0].vcpu_hours,
usage_list[0].total_local_gb_usage)
)
# test for deleted project
usage_table_deleted = encoding.smart_str(u'''
<tr class="" data-object-id="3" id="global_usage__row__3">
<td class="sortable normal_column">3 (Deleted)</td>
<td class="sortable normal_column">%s</td>
<td class="sortable normal_column">%s</td>
<td class="sortable normal_column">%s</td>
<td class="sortable normal_column">%.2f</td>
<td class="sortable normal_column">%.2f</td>
</tr>
''' % (usage_list[1].vcpus,
sizeformat.diskgbformat(usage_list[1].disk_gb_hours),
sizeformat.mbformat(usage_list[1].memory_mb),
usage_list[1].vcpu_hours,
usage_list[1].total_local_gb_usage)
)
if nova_stu_enabled:
self.assertContains(res, usage_table, html=True)
if tenant_deleted:
self.assertContains(res, usage_table_deleted, html=True)
else:
self.assertNotContains(res, usage_table_deleted, html=True)
else:
self.assertNotContains(res, usage_table, html=True)
def test_usage_csv(self):
self._test_usage_csv(nova_stu_enabled=True)
def test_usage_csv_disabled(self):
self._test_usage_csv(nova_stu_enabled=False)
def _test_usage_csv(self, nova_stu_enabled=True):
self._stub_api_calls(nova_stu_enabled)
api.nova.extension_supported(
'SimpleTenantUsage', IsA(http.HttpRequest)) \
.AndReturn(nova_stu_enabled)
now = timezone.now()
usage_obj = [api.nova.NovaUsage(u) for u in self.usages.list()]
api.keystone.tenant_list(IsA(http.HttpRequest)) \
.AndReturn([self.tenants.list(), False])
if nova_stu_enabled:
api.nova.usage_list(IsA(http.HttpRequest),
datetime.datetime(now.year,
now.month,
1, 0, 0, 0, 0),
datetime.datetime(now.year,
now.month,
now.day, 23, 59, 59, 0)) \
.AndReturn(usage_obj)
api.nova.tenant_absolute_limits(IsA(http.HttpRequest))\
.AndReturn(self.limits['absolute'])
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'security-group').AndReturn(True)
api.network.floating_ip_supported(IsA(http.HttpRequest)) \
.AndReturn(True)
api.network.tenant_floating_ip_list(IsA(http.HttpRequest)) \
.AndReturn(self.floating_ips.list())
api.network.security_group_list(IsA(http.HttpRequest)) \
.AndReturn(self.q_secgroups.list())
api.cinder.tenant_absolute_limits(IsA(http.HttpRequest)) \
.AndReturn(self.cinder_limits['absolute'])
self.mox.ReplayAll()
csv_url = reverse('horizon:admin:overview:index') + "?format=csv"
res = self.client.get(csv_url)
self.assertTemplateUsed(res, 'admin/overview/usage.csv')
self.assertTrue(isinstance(res.context['usage'], usage.GlobalUsage))
hdr = 'Project Name,VCPUs,RAM (MB),Disk (GB),Usage (Hours)'
self.assertContains(res, '%s\r\n' % hdr)
if nova_stu_enabled:
for obj in usage_obj:
row = u'{0},{1},{2},{3},{4:.2f}\r\n'.format(obj.project_name,
obj.vcpus,
obj.memory_mb,
obj.disk_gb_hours,
obj.vcpu_hours)
self.assertContains(res, row)
| 45.950495 | 78 | 0.588128 |
4a1dd9804fb20637dc6db43b3ea0d447b7bcb304 | 9,416 | py | Python | docs/conf.py | Labaien96/DeepConvLSTM_Keras | d537e1a896de54ce1321f023798e39ac62075b0f | [
"Apache-2.0"
] | 5 | 2020-06-18T22:36:02.000Z | 2022-02-21T19:44:20.000Z | docs/conf.py | Labaien96/DeepConvLSTM_Keras | d537e1a896de54ce1321f023798e39ac62075b0f | [
"Apache-2.0"
] | null | null | null | docs/conf.py | Labaien96/DeepConvLSTM_Keras | d537e1a896de54ce1321f023798e39ac62075b0f | [
"Apache-2.0"
] | 2 | 2020-12-06T11:11:33.000Z | 2021-03-07T18:50:07.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# mcfly documentation build configuration file, created by
# sphinx-quickstart on Wed Oct 5 10:42:14 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
from mcfly import __version__
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
# 'sphinx.ext.pngmath',
'sphinx.ext.viewcode',
'sphinx.ext.napoleon',
]
napoleon_numpy_docstring = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'mcfly'
copyright = '2016, Netherlands eScience Center'
author = 'Netherlands eScience Center'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = __version__
# The full version, including alpha/beta/rc tags.
release = __version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'classic'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = 'mcflylogo.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'mcflydoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'mcfly.tex', 'mcfly Documentation',
'Netherlands eScience Center', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'mcfly', 'mcfly Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'mcfly', 'mcfly Documentation',
author, 'mcfly', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| 31.918644 | 79 | 0.717184 |
4a1ddddada47353c36759efa957eef42288310d0 | 2,933 | py | Python | nicos_demo/vsans1/setups/sample_table_1.py | jkrueger1/nicos | 5f4ce66c312dedd78995f9d91e8a6e3c891b262b | [
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | null | null | null | nicos_demo/vsans1/setups/sample_table_1.py | jkrueger1/nicos | 5f4ce66c312dedd78995f9d91e8a6e3c891b262b | [
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | null | null | null | nicos_demo/vsans1/setups/sample_table_1.py | jkrueger1/nicos | 5f4ce66c312dedd78995f9d91e8a6e3c891b262b | [
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | null | null | null | description = 'bottom sample table devices'
group = 'lowlevel'
devices = dict(
st1_omg = device('nicos.devices.generic.Axis',
description = 'table 1 omega axis',
pollinterval = 15,
maxage = 60,
fmtstr = '%.2f',
abslimits = (-180, 180),
precision = 0.01,
motor = 'st1_omgmot',
),
st1_omgmot = device('nicos.devices.generic.VirtualMotor',
description = 'table 1 omega motor',
fmtstr = '%.2f',
abslimits = (-180, 180),
visibility = (),
unit = 'deg',
),
st1_chi = device('nicos.devices.generic.Axis',
description = 'table 1 chi axis',
pollinterval = 15,
maxage = 60,
fmtstr = '%.2f',
abslimits = (-5, 5),
precision = 0.01,
motor = 'st1_chimot',
),
st1_chimot = device('nicos.devices.generic.VirtualMotor',
description = 'table 1 chi motor',
fmtstr = '%.2f',
abslimits = (-5, 5),
visibility = (),
unit = 'deg',
),
st1_phi = device('nicos.devices.generic.Axis',
description = 'table 1 phi axis',
pollinterval = 15,
maxage = 60,
fmtstr = '%.2f',
abslimits = (-5, 5),
precision = 0.01,
motor = 'st1_phimot',
),
st1_phimot = device('nicos.devices.generic.VirtualMotor',
description = 'table 1 phi motor',
fmtstr = '%.2f',
abslimits = (-5, 5),
visibility = (),
unit = 'deg',
),
st1_y = device('nicos.devices.generic.Axis',
description = 'table 1 y axis',
pollinterval = 15,
maxage = 60,
fmtstr = '%.2f',
abslimits = (-99, 99),
precision = 0.01,
motor = 'st1_ymot',
),
st1_ymot = device('nicos.devices.generic.VirtualMotor',
description = 'table 1 y motor',
fmtstr = '%.2f',
abslimits = (-99, 99),
visibility = (),
unit = 'mm',
),
st1_z = device('nicos.devices.generic.Axis',
description = 'table 1 z axis',
pollinterval = 15,
maxage = 60,
fmtstr = '%.2f',
abslimits = (-50, 50),
precision = 0.01,
motor = 'st1_zmot',
),
st1_zmot = device('nicos.devices.generic.VirtualMotor',
description = 'table 1 z motor',
fmtstr = '%.2f',
abslimits = (-50, 50),
visibility = (),
curvalue = -31,
unit = 'mm',
),
st1_x = device('nicos.devices.generic.Axis',
description = 'table 1 x axis',
pollinterval = 15,
maxage = 60,
fmtstr = '%.2f',
abslimits = (-500.9, 110.65),
precision = 0.01,
motor = 'st1_xmot',
),
st1_xmot = device('nicos.devices.generic.VirtualMotor',
description = 'table 1 x motor',
fmtstr = '%.2f',
abslimits = (-750, 150),
visibility = (),
unit = 'mm',
),
)
| 28.201923 | 61 | 0.505626 |
4a1ddddee344067e2ffc3ffbbe443bbcf3d21014 | 5,453 | py | Python | tests/validator/test_page_validator.py | JBBalling/core | b9977a28118d33e40656f5ea95e819d8cc496f32 | [
"Apache-2.0"
] | null | null | null | tests/validator/test_page_validator.py | JBBalling/core | b9977a28118d33e40656f5ea95e819d8cc496f32 | [
"Apache-2.0"
] | null | null | null | tests/validator/test_page_validator.py | JBBalling/core | b9977a28118d33e40656f5ea95e819d8cc496f32 | [
"Apache-2.0"
] | null | null | null | from tests.base import TestCase, assets, main # pylint: disable=import-error,no-name-in-module
from ocrd.resolver import Resolver
from ocrd_validators import PageValidator
from ocrd_validators.page_validator import get_text, set_text, ConsistencyError
from ocrd_models.ocrd_page import parse, TextEquivType
from ocrd_utils import pushd_popd
FAULTY_GLYPH_PAGE_FILENAME = assets.path_to('glyph-consistency/data/OCR-D-GT-PAGE/FAULTY_GLYPHS.xml')
class TestPageValidator(TestCase):
def setUp(self):
pass
def test_validate_err(self):
with self.assertRaisesRegex(Exception, 'At least one of ocrd_page, ocrd_file or filename must be set'):
PageValidator.validate()
with self.assertRaisesRegex(Exception, 'page_textequiv_strategy best not implemented'):
PageValidator.validate(filename=FAULTY_GLYPH_PAGE_FILENAME, page_textequiv_strategy='best')
# test with deprecated name
with self.assertRaisesRegex(Exception, 'page_textequiv_strategy best not implemented'):
PageValidator.validate(filename=FAULTY_GLYPH_PAGE_FILENAME, strategy='best')
with self.assertRaisesRegex(Exception, 'page_textequiv_consistency level superstrictest not implemented'):
PageValidator.validate(filename=FAULTY_GLYPH_PAGE_FILENAME, page_textequiv_consistency='superstrictest', strategy='first')
def test_validate_filename(self):
report = PageValidator.validate(filename=FAULTY_GLYPH_PAGE_FILENAME)
self.assertEqual(len([e for e in report.errors if isinstance(e, ConsistencyError)]), 17, '17 textequiv consistency errors')
def test_validate_filename_off(self):
report = PageValidator.validate(filename=FAULTY_GLYPH_PAGE_FILENAME, page_textequiv_consistency='off')
self.assertEqual(len([e for e in report.errors if isinstance(e, ConsistencyError)]), 0, '0 textequiv consistency errors')
def test_validate_ocrd_file(self):
resolver = Resolver()
workspace = resolver.workspace_from_url(assets.url_of('glyph-consistency/data/mets.xml'))
with pushd_popd(workspace.directory):
ocrd_file = workspace.mets.find_files(ID="FAULTY_GLYPHS_FILE")[0]
report = PageValidator.validate(ocrd_file=ocrd_file)
self.assertEqual(len([e for e in report.errors if isinstance(e, ConsistencyError)]), 17, '17 textequiv consistency errors')
def test_validate_lax(self):
ocrd_page = parse(assets.path_to('kant_aufklaerung_1784/data/OCR-D-GT-PAGE/PAGE_0020_PAGE.xml'), silence=True)
# introduce a single word error (not just whitespace inconsistency)
ocrd_page.get_Page().get_TextRegion()[0].get_TextLine()[0].get_Word()[1].get_TextEquiv()[0].set_Unicode('FOO')
report = PageValidator.validate(ocrd_page=ocrd_page)
self.assertEqual(len([e for e in report.errors if isinstance(e, ConsistencyError)]), 26, '26 textequiv consistency errors - strict')
report = PageValidator.validate(ocrd_page=ocrd_page, strictness='lax')
self.assertEqual(len([e for e in report.errors if isinstance(e, ConsistencyError)]), 1, '1 textequiv consistency errors - lax')
def test_validate_multi_textequiv_first(self):
ocrd_page = parse(assets.path_to('kant_aufklaerung_1784/data/OCR-D-GT-PAGE/PAGE_0020_PAGE.xml'), silence=True)
report = PageValidator.validate(ocrd_page=ocrd_page)
self.assertEqual(len([e for e in report.errors if isinstance(e, ConsistencyError)]), 25, '25 textequiv consistency errors - strict')
word = ocrd_page.get_Page().get_TextRegion()[0].get_TextLine()[0].get_Word()[1]
# delete all textequivs
word.set_TextEquiv([])
# Add textequiv
set_text(word, 'FOO', 'first')
word.add_TextEquiv(TextEquivType(Unicode='BAR', conf=.7))
word.add_TextEquiv(TextEquivType(Unicode='BAZ', conf=.5, index=0))
self.assertEqual(get_text(word, 'first'), 'BAZ')
set_text(word, 'XYZ', 'first')
self.assertEqual(get_text(word, 'first'), 'XYZ')
def test_validate_multi_textequiv(self):
ocrd_page = parse(assets.path_to('kant_aufklaerung_1784/data/OCR-D-GT-PAGE/PAGE_0020_PAGE.xml'), silence=True)
report = PageValidator.validate(ocrd_page=ocrd_page)
self.assertEqual(len([e for e in report.errors if isinstance(e, ConsistencyError)]), 25, '25 textequiv consistency errors - strict')
word = ocrd_page.get_Page().get_TextRegion()[0].get_TextLine()[0].get_Word()[1]
# delete all textequivs
del(word.get_TextEquiv()[0])
# Add textequiv
set_text(word, 'FOO', 'index1')
word.add_TextEquiv(TextEquivType(Unicode='BAR', conf=.7))
self.assertEqual(get_text(word, 'index1'), 'FOO')
set_text(word, 'BAR', 'index1')
self.assertEqual(get_text(word, 'index1'), 'BAR')
def test_fix(self):
ocrd_page = parse(FAULTY_GLYPH_PAGE_FILENAME, silence=True)
report = PageValidator.validate(ocrd_page=ocrd_page)
self.assertEqual(len([e for e in report.errors if isinstance(e, ConsistencyError)]), 17, '17 textequiv consistency errors')
PageValidator.validate(ocrd_page=ocrd_page, strictness='fix')
report = PageValidator.validate(ocrd_page=ocrd_page)
self.assertEqual(len([e for e in report.errors if isinstance(e, ConsistencyError)]), 0, 'no more textequiv consistency errors')
if __name__ == '__main__':
main()
| 53.990099 | 140 | 0.721254 |
4a1ddeb6c943bbeb167d9a6262c0bb65dd423b3b | 666 | py | Python | __init__.py | viralinkio/viralink-python-client-sdk | 110d0f9ac2f69ce496a16608ddacb8d4dad7b736 | [
"Apache-2.0"
] | null | null | null | __init__.py | viralinkio/viralink-python-client-sdk | 110d0f9ac2f69ce496a16608ddacb8d4dad7b736 | [
"Apache-2.0"
] | null | null | null | __init__.py | viralinkio/viralink-python-client-sdk | 110d0f9ac2f69ce496a16608ddacb8d4dad7b736 | [
"Apache-2.0"
] | null | null | null | # Copyright 2021. ViraLink
# #
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# #
# http://www.apache.org/licenses/LICENSE-2.0
# #
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
__name__ = "vl_mqtt_client"
| 35.052632 | 79 | 0.689189 |
4a1ddf1357c44369bd84f4d9dd1b4712a3a758ff | 4,182 | py | Python | venv/lib/python3.6/site-packages/ansible_collections/community/network/plugins/modules/network/avi/avi_gslbgeodbprofile.py | usegalaxy-no/usegalaxy | 75dad095769fe918eb39677f2c887e681a747f3a | [
"MIT"
] | 1 | 2020-01-22T13:11:23.000Z | 2020-01-22T13:11:23.000Z | venv/lib/python3.6/site-packages/ansible_collections/community/network/plugins/modules/network/avi/avi_gslbgeodbprofile.py | usegalaxy-no/usegalaxy | 75dad095769fe918eb39677f2c887e681a747f3a | [
"MIT"
] | 12 | 2020-02-21T07:24:52.000Z | 2020-04-14T09:54:32.000Z | venv/lib/python3.6/site-packages/ansible_collections/community/network/plugins/modules/network/avi/avi_gslbgeodbprofile.py | usegalaxy-no/usegalaxy | 75dad095769fe918eb39677f2c887e681a747f3a | [
"MIT"
] | null | null | null | #!/usr/bin/python
#
# @author: Gaurav Rastogi ([email protected])
# Eric Anderson ([email protected])
# module_check: supported
# Avi Version: 17.1.2
#
# Copyright: (c) 2017 Gaurav Rastogi, <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
---
module: avi_gslbgeodbprofile
author: Gaurav Rastogi (@grastogi23) <[email protected]>
short_description: Module for setup of GslbGeoDbProfile Avi RESTful Object
description:
- This module is used to configure GslbGeoDbProfile object
- more examples at U(https://github.com/avinetworks/devops)
requirements: [ avisdk ]
options:
state:
description:
- The state that should be applied on the entity.
default: present
choices: ["absent", "present"]
avi_api_update_method:
description:
- Default method for object update is HTTP PUT.
- Setting to patch will override that behavior to use HTTP PATCH.
default: put
choices: ["put", "patch"]
avi_api_patch_op:
description:
- Patch operation to use when using avi_api_update_method as patch.
choices: ["add", "replace", "delete"]
description:
description:
- Field introduced in 17.1.1.
entries:
description:
- List of geodb entries.
- An entry can either be a geodb file or an ip address group with geo properties.
- Field introduced in 17.1.1.
is_federated:
description:
- This field indicates that this object is replicated across gslb federation.
- Field introduced in 17.1.3.
- Default value when not specified in API or module is interpreted by Avi Controller as True.
type: bool
name:
description:
- A user-friendly name for the geodb profile.
- Field introduced in 17.1.1.
required: true
tenant_ref:
description:
- It is a reference to an object of type tenant.
- Field introduced in 17.1.1.
url:
description:
- Avi controller URL of the object.
uuid:
description:
- Uuid of the geodb profile.
- Field introduced in 17.1.1.
extends_documentation_fragment:
- community.network.avi
'''
EXAMPLES = """
- name: Example to create GslbGeoDbProfile object
community.network.avi_gslbgeodbprofile:
controller: 10.10.25.42
username: admin
password: something
state: present
name: sample_gslbgeodbprofile
"""
RETURN = '''
obj:
description: GslbGeoDbProfile (api/gslbgeodbprofile) object
returned: success, changed
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
try:
from ansible_collections.community.network.plugins.module_utils.network.avi.avi import (
avi_common_argument_spec, avi_ansible_api, HAS_AVI)
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
state=dict(default='present',
choices=['absent', 'present']),
avi_api_update_method=dict(default='put',
choices=['put', 'patch']),
avi_api_patch_op=dict(choices=['add', 'replace', 'delete']),
description=dict(type='str',),
entries=dict(type='list',),
is_federated=dict(type='bool',),
name=dict(type='str', required=True),
tenant_ref=dict(type='str',),
url=dict(type='str',),
uuid=dict(type='str',),
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(
argument_spec=argument_specs, supports_check_mode=True)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk>=17.1) or requests is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
return avi_ansible_api(module, 'gslbgeodbprofile',
set([]))
if __name__ == '__main__':
main()
| 32.418605 | 105 | 0.64132 |
4a1ddf71b181ff18fdeacd48dac64c7087f909f1 | 3,287 | py | Python | test/test_semi_structured_datatypes.py | zuarbase/snowflake-sqlalchemy | 6615f51444c539d56ade2be08c542dd4827a70a8 | [
"Apache-2.0"
] | 1 | 2020-08-04T08:30:58.000Z | 2020-08-04T08:30:58.000Z | test/test_semi_structured_datatypes.py | zuarbase/snowflake-sqlalchemy | 6615f51444c539d56ade2be08c542dd4827a70a8 | [
"Apache-2.0"
] | null | null | null | test/test_semi_structured_datatypes.py | zuarbase/snowflake-sqlalchemy | 6615f51444c539d56ade2be08c542dd4827a70a8 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2012-2019 Snowflake Computing Inc. All right reserved.
#
import json
import pytest
from parameters import CONNECTION_PARAMETERS
from snowflake.sqlalchemy import ARRAY, OBJECT, VARIANT
from sqlalchemy import Column, Integer, MetaData, Table, inspect
from sqlalchemy.sql import select
try:
from parameters import (CONNECTION_PARAMETERS2)
except ImportError:
CONNECTION_PARAMETERS2 = CONNECTION_PARAMETERS
def test_create_table_semi_structured_datatypes(engine_testaccount):
"""
Create table including semi-structured data types
"""
metadata = MetaData()
table_name = "test_variant0"
test_variant = Table(
table_name,
metadata,
Column('id', Integer, primary_key=True),
Column('va', VARIANT),
Column('ob', OBJECT),
Column('ar', ARRAY))
metadata.create_all(engine_testaccount)
try:
assert test_variant is not None
finally:
test_variant.drop(engine_testaccount)
@pytest.mark.skip("""
Semi-structured data cannot be inserted by INSERT VALUES. Instead,
INSERT SELECT must be used. The fix should be either 1) SQLAlchemy dialect
transforms INSERT statement or 2) Snwoflake DB supports INSERT VALUES for
semi-structured data types. No ETA for this fix.
""")
def test_insert_semi_structured_datatypes(engine_testaccount):
metadata = MetaData()
table_name = "test_variant1"
test_variant = Table(
table_name,
metadata,
Column('id', Integer, primary_key=True),
Column('va', VARIANT),
Column('ob', OBJECT),
Column('ar', ARRAY))
metadata.create_all(engine_testaccount)
try:
ins = test_variant.insert().values(
id=1,
va='{"vk1":100, "vk2":200, "vk3":300}')
results = engine_testaccount.execute(ins)
results.close()
finally:
test_variant.drop(engine_testaccount)
def test_inspect_semi_structured_datatypes(engine_testaccount):
"""
Inspects semi-structured data type columns
"""
table_name = "test_variant2"
metadata = MetaData()
test_variant = Table(
table_name,
metadata,
Column('id', Integer, primary_key=True),
Column('va', VARIANT),
Column('ar', ARRAY))
metadata.create_all(engine_testaccount)
try:
engine_testaccount.execute("""
INSERT INTO {0}(id, va, ar)
SELECT 1,
PARSE_JSON('{{"vk1":100, "vk2":200, "vk3":300}}'),
PARSE_JSON('[
{{"k":1, "v":"str1"}},
{{"k":2, "v":"str2"}},
{{"k":3, "v":"str3"}}]'
)""".format(table_name))
inspecter = inspect(engine_testaccount)
columns = inspecter.get_columns(table_name)
assert isinstance(columns[1]['type'], VARIANT)
assert isinstance(columns[2]['type'], ARRAY)
conn = engine_testaccount.connect()
s = select([test_variant])
results = conn.execute(s)
rows = results.fetchone()
results.close()
assert rows[0] == 1
data = json.loads(rows[1])
assert data['vk1'] == 100
assert data['vk3'] == 300
assert data is not None
data = json.loads(rows[2])
assert data[1]['k'] == 2
finally:
test_variant.drop(engine_testaccount)
| 29.881818 | 74 | 0.647399 |
4a1ddfec317aa26d03a90a81c32f9d9b7bc26c40 | 19,309 | py | Python | saleor/dashboard/views.py | glosoftgroup/glosoftgroup-django-pos | b489c402939b9ebabd164c449e7da38fe849d550 | [
"BSD-3-Clause"
] | 2 | 2017-07-11T12:40:59.000Z | 2017-10-18T18:02:46.000Z | saleor/dashboard/views.py | glosoftgroup/glosoftgroup-django-pos | b489c402939b9ebabd164c449e7da38fe849d550 | [
"BSD-3-Clause"
] | 12 | 2017-06-19T07:20:41.000Z | 2022-03-15T19:03:33.000Z | saleor/dashboard/views.py | glosoftgroup/glosoftgroup-django-pos | b489c402939b9ebabd164c449e7da38fe849d550 | [
"BSD-3-Clause"
] | null | null | null | from django.contrib.admin.views.decorators import \
staff_member_required as _staff_member_required
from django.template.response import TemplateResponse
from payments import PaymentStatus
from ..order.models import Order, Payment
from ..order import OrderStatus
from ..sale.models import Sales, SoldItem
from ..product.models import Category, Stock
from ..credit.models import Credit
from django.db.models import Count, Sum
from django.core.paginator import Paginator, EmptyPage, InvalidPage, PageNotAnInteger
from .reports.hours_chart import get_item_results, get_category_results
from django.utils.dateformat import DateFormat
from decimal import Decimal
import datetime
import random
import calendar
import dateutil.relativedelta
from structlog import get_logger
logger = get_logger(__name__)
def staff_member_required(f):
return _staff_member_required(f, login_url='home')
@staff_member_required
def index(request):
month = request.GET.get('month')
year = request.GET.get('year')
period = request.GET.get('period')
try:
last_sale = Sales.objects.latest('id')
date = DateFormat(last_sale.created).format('Y-m-d')
except:
date = DateFormat(datetime.datetime.today()).format('Y-m-d')
try:
orders_to_ship = Order.objects.filter(status=OrderStatus.FULLY_PAID)
orders_to_ship = (orders_to_ship
.select_related('user')
.prefetch_related('groups', 'groups__items', 'payments'))
payments = Payment.objects.filter(
status=PaymentStatus.PREAUTH).order_by('-created')
payments = payments.select_related('order', 'order__user')
#top categories
if period:
cat = top_categories(month, year, period)
else:
cat = top_categories()
items = top_items()
low_stock_order = dashbord_get_low_stock_products()
try:
startYear = Sales.objects.all().first().created.year
startMonth = Sales.objects.all().first().created.month
except:
startYear = datetime.datetime.today().year
startMonth = datetime.datetime.today().month
ctx = {'preauthorized_payments': payments,
'orders_to_ship': orders_to_ship,
'low_stock': low_stock_order['low_stock'],
'pn':low_stock_order['pn'],
'sz': low_stock_order['sz'],
'gid': low_stock_order['gid'],
#top_cat
"sales_by_category": cat['sales_by_category'],
"categs": cat['categs'],
"avg": cat['avg'],
"labels": cat['labels'],
"default": cat['default'],
"hcateg": cat['hcateg'],
"date_total_sales": cat['date_total_sales'],
"no_of_customers": cat['no_of_customers'],
"date_period": cat['date_period'],
#items
"sales_by_item": items['sales_by_item'],
"items": items['items'],
"items_avg": items['items_avg'],
"items_labels": items['items_labels'],
"items_default": items['items_default'],
"items_hcateg": items['items_hcateg'],
"highest_item": items['highest_item'],
"lowest_item": items['lowest_item'],
'startYear': startYear,
'startMonth': startMonth,
}
if period:
return TemplateResponse(request, 'dashboard/ajax.html', ctx)
else:
return TemplateResponse(request, 'dashboard/index.html', ctx)
except BaseException as e:
if period:
return TemplateResponse(request, 'dashboard/ajax.html', {"e":e, "date":date})
else:
return TemplateResponse(request, 'dashboard/index.html', {"e":e, "date":date})
@staff_member_required
def landing_page(request):
ctx = {}
return TemplateResponse(request, 'dashboard/landing-page.html', ctx)
def top_categories(month=None, year=None, period=None):
today = datetime.datetime.now()
try:
last_sale = Sales.objects.latest('id')
date = DateFormat(last_sale.created).format('Y-m-d')
except:
date = DateFormat(datetime.datetime.today()).format('Y-m-d')
if year and month:
if len(str(month)) == 1:
m = '0' + str(month)
fdate = str(year) + '-' + m
else:
fdate = str(year) + '-' + str(month)
d = datetime.datetime.strptime(fdate, "%Y-%m")
if period == 'year':
lastyear = d - dateutil.relativedelta.relativedelta(years=1)
y = str(lastyear.strftime("%Y"))
month = str(datetime.datetime.strptime(month, "%m").strftime("%m"))
sales_by_category = SoldItem.objects.filter(sales__created__year__range=[y, year], sales__created__month__lte=month
).values('product_category'
).annotate(c=Count('product_category', distinct=True)
).annotate(Sum('total_cost')
).annotate(Sum('quantity')).order_by('-quantity__sum')[:5]
sales_customers = Sales.objects.filter(created__year__range=[y, year], created__month__lte=month).count()
credit_customers = Credit.objects.filter(created__year__range=[y, year], created__month__lte=month).count()
date_period = str(lastyear.strftime("%B"))+'/'+str(lastyear.strftime("%Y"))+' - '+str(datetime.datetime.strptime(month, "%m").strftime("%B"))+'/'+ str(year)
elif period == 'month':
sales_by_category = SoldItem.objects.filter(sales__created__year=str(d.strftime("%Y")), sales__created__month=str(d.strftime("%m"))
).values('product_category'
).annotate(c=Count('product_category', distinct=True)
).annotate(Sum('total_cost')
).annotate(Sum('quantity')).order_by('-quantity__sum')[:5]
sales_customers = Sales.objects.filter(created__year=str(d.strftime("%Y")), created__month=str(d.strftime("%m"))).count()
credit_customers = Credit.objects.filter(created__year=str(d.strftime("%Y")), created__month=str(d.strftime("%m"))).count()
date_period = str(datetime.datetime.strptime(month, "%m").strftime("%B")) + '/' + str(datetime.datetime.strptime(year, "%Y").strftime("%Y"))
elif period == 'quarter':
p = d - dateutil.relativedelta.relativedelta(months=3)
month = str(datetime.datetime.strptime(month, "%m").strftime("%m"))
sales_by_category = SoldItem.objects.filter(sales__created__year=str(p.strftime("%Y")),
sales__created__month__range=[str(p.strftime("%m")), month]
).values('product_category'
).annotate(c=Count('product_category', distinct=True)
).annotate(Sum('total_cost')
).annotate(Sum('quantity')).order_by('-quantity__sum')[:5]
sales_customers = Sales.objects.filter(created__year=str(p.strftime("%Y")),
created__month__range=[str(p.strftime("%m")), month]).count()
credit_customers = Credit.objects.filter(created__year=str(p.strftime("%Y")),
created__month__range=[str(p.strftime("%m")), month]).count()
date_period = str(p.strftime("%B")) + '/' + str(p.strftime("%Y")) + ' - ' + str(
datetime.datetime.strptime(month, "%m").strftime("%B")) + '/' + str(year)
else:
sales_by_category = SoldItem.objects.filter(sales__created__contains=date).values('product_category').annotate(
c=Count('product_category', distinct=True)).annotate(Sum('total_cost')).annotate(Sum('quantity')).order_by(
'-quantity__sum')[:5]
sales_customers = Sales.objects.filter(created__contains=date).count()
credit_customers = Credit.objects.filter(created__contains=date).count()
date_period = date
try:
sales_by_category = sales_by_category
quantity_totals = sales_by_category.aggregate(Sum('quantity__sum'))['quantity__sum__sum']
new_sales = []
for sales in sales_by_category:
color = "#%03x" % random.randint(0, 0xFFF)
sales['color'] = color
percent = (Decimal(sales['quantity__sum']) / Decimal(quantity_totals)) * 100
percentage = round(percent, 2)
sales['percentage'] = percentage
for s in range(0, sales_by_category.count(), 1):
sales['count'] = s
new_sales.append(sales)
sales['total_cost'] = int(sales['total_cost__sum'])
categs = Category.objects.all()
this_year = today.year
avg_m = Sales.objects.filter(created__year=this_year).annotate(c=Count('total_net'))
highest_category_sales = new_sales[0]['product_category']
default = []
labels = []
for i in range(1, (today.month + 1), 1):
if len(str(i)) == 1:
m = str('0' + str(i))
else:
m = str(i)
amount = get_category_results(highest_category_sales, str(today.year), m)
labels.append(calendar.month_name[int(m)][0:3])
default.append(amount)
date_total_sales = Sales.objects.filter(created__contains=date).aggregate(Sum('total_net'))[
'total_net__sum']
try:
sales_customers = sales_customers
credit_customers = credit_customers
except:
sales_customers = 0
credit_customers = 0
no_of_customers = sales_customers + credit_customers
data = {
"sales_by_category": new_sales,
"categs": categs,
"avg": avg_m,
"labels": labels,
"default": default,
"hcateg": highest_category_sales,
"date_total_sales": date_total_sales,
"no_of_customers":no_of_customers,
"date_period":date_period
}
return data
except Exception,e:
logger.error(e)
data = {
"sales_by_category": None,
"categs": None,
"avg": None,
"labels": None,
"default": None,
"hcateg": None,
"date_total_sales": None,
"no_of_customers": None,
}
return data
def top_items(month=None, year=None, period=None):
today = datetime.datetime.now()
try:
last_sale = Sales.objects.latest('id')
date = DateFormat(last_sale.created).format('Y-m-d')
except:
date = DateFormat(datetime.datetime.today()).format('Y-m-d')
if year and month:
if len(str(month)) == 1:
m = '0' + str(month)
fdate = str(year) + '-' + m
else:
fdate = str(year) + '-' + str(month)
d = datetime.datetime.strptime(fdate, "%Y-%m")
if period == 'year':
lastyear = d - dateutil.relativedelta.relativedelta(years=1)
y = str(lastyear.strftime("%Y"))
month = str(datetime.datetime.strptime(month, "%m").strftime("%m"))
sales_by_category = SoldItem.objects.filter(sales__created__year__range=[y, year], sales__created__month__lte=month).values(
'product_name').annotate(
c=Count('product_name', distinct=True)).annotate(
Sum('total_cost')).annotate(Sum('quantity')).order_by(
'-quantity__sum')[:5]
highest_item = SoldItem.objects.filter(sales__created__year__range=[y, year], sales__created__month__lte=month).values(
'product_name').annotate(
c=Count('product_name', distinct=False)).annotate(
Sum('total_cost')).annotate(Sum('quantity')).order_by(
'-quantity__sum')[:1]
lowest_item = SoldItem.objects.filter(sales__created__year__range=[y, year], sales__created__month__lte=month).values('product_name').annotate(
c=Count('product_name', distinct=True)).annotate(Sum('total_cost')).annotate(Sum('quantity')).order_by(
'quantity__sum', 'total_cost__sum')[:1]
elif period == 'month':
sales_by_category = SoldItem.objects.filter(sales__created__year=str(d.strftime("%Y")), sales__created__month=str(d.strftime("%m"))).values('product_name').annotate(
c=Count('product_name', distinct=True)).annotate(Sum('total_cost')).annotate(Sum('quantity')).order_by(
'-quantity__sum')[:5]
highest_item = SoldItem.objects.filter(sales__created__year=str(d.strftime("%Y")), sales__created__month=str(d.strftime("%m"))).values('product_name').annotate(
c=Count('product_name', distinct=False)).annotate(Sum('total_cost')).annotate(Sum('quantity')).order_by(
'-quantity__sum')[:1]
lowest_item = SoldItem.objects.filter(sales__created__year=str(d.strftime("%Y")), sales__created__month=str(d.strftime("%m"))).values('product_name').annotate(
c=Count('product_name', distinct=True)).annotate(Sum('total_cost')).annotate(Sum('quantity')).order_by(
'quantity__sum', 'total_cost__sum')[:1]
elif period == 'quarter':
p = d - dateutil.relativedelta.relativedelta(months=3)
month = str(datetime.datetime.strptime(month, "%m").strftime("%m"))
sales_by_category = SoldItem.objects.filter(sales__created__year=str(p.strftime("%Y")),
sales__created__month__range=[str(p.strftime("%m")), month]).values('product_name').annotate(
c=Count('product_name', distinct=True)).annotate(Sum('total_cost')).annotate(Sum('quantity')).order_by(
'-quantity__sum')[:5]
highest_item = SoldItem.objects.filter(sales__created__year=str(p.strftime("%Y")),
sales__created__month__range=[str(p.strftime("%m")), month]).values('product_name').annotate(
c=Count('product_name', distinct=False)).annotate(Sum('total_cost')).annotate(Sum('quantity')).order_by(
'-quantity__sum')[:1]
lowest_item = SoldItem.objects.filter(sales__created__year=str(p.strftime("%Y")),
sales__created__month__range=[str(p.strftime("%m")), month]).values('product_name').annotate(
c=Count('product_name', distinct=True)).annotate(Sum('total_cost')).annotate(Sum('quantity')).order_by(
'quantity__sum', 'total_cost__sum')[:1]
else:
sales_by_category = SoldItem.objects.filter(sales__created__contains=date).values('product_name').annotate(
c=Count('product_name', distinct=True)).annotate(Sum('total_cost')).annotate(Sum('quantity')).order_by(
'-quantity__sum')[:5]
highest_item = SoldItem.objects.filter(sales__created__contains=date).values('product_name').annotate(
c=Count('product_name', distinct=False)).annotate(Sum('total_cost')).annotate(Sum('quantity')).order_by(
'-quantity__sum')[:1]
lowest_item = SoldItem.objects.filter(sales__created__contains=date).values('product_name').annotate(
c=Count('product_name', distinct=True)).annotate(Sum('total_cost')).annotate(Sum('quantity')).order_by(
'quantity__sum', 'total_cost__sum')[:1]
try:
sales_by_category = sales_by_category
highest_item = highest_item
lowest_item = lowest_item
sales_by_category_totals = sales_by_category.aggregate(Sum('quantity__sum'))['quantity__sum__sum']
new_sales = []
for sales in sales_by_category:
color = "#%03x" % random.randint(0, 0xFFF)
sales['color'] = color
percent = (Decimal(sales['quantity__sum']) / Decimal(sales_by_category_totals)) * 100
percentage = round(percent, 2)
sales['percentage'] = percentage
for s in range(0, sales_by_category.count(), 1):
sales['count'] = s
new_sales.append(sales)
categs = SoldItem.objects.values('product_name').annotate(Count('product_name', distinct=True)).order_by()
this_year = today.year
avg_m = Sales.objects.filter(created__year=this_year).annotate(c=Count('total_net'))
highest_category_sales = new_sales[0]['product_name']
default = []
labels = []
for i in range(1, (today.month + 1), 1):
if len(str(i)) == 1:
m = str('0' + str(i))
else:
m = str(i)
amount = get_item_results(highest_category_sales, str(today.year), m)
labels.append(calendar.month_name[int(m)][0:3])
default.append(amount)
data = {
"sales_by_item": new_sales,
"items": categs,
"items_avg": avg_m,
"items_labels": labels,
"items_default": default,
"items_hcateg": highest_category_sales,
"highest_item":highest_item,
"lowest_item":lowest_item,
}
return data
except IndexError as e:
logger.error(e)
data = {
"sales_by_item": None,
"items": None,
"items_avg": None,
"items_labels": None,
"items_default": None,
"items_hcateg": None,
"highest_item": None,
"lowest_item": None,
}
return data
@staff_member_required
def styleguide(request):
return TemplateResponse(request, 'dashboard/styleguide/index.html', {})
def dashbord_get_low_stock_products():
products = Stock.objects.get_low_stock().order_by('-id')
paginator = Paginator(products, 10)
try:
low_stock = paginator.page(1)
except PageNotAnInteger:
low_stock = paginator.page(1)
except InvalidPage:
low_stock = paginator.page(1)
except EmptyPage:
low_stock = paginator.page(paginator.num_pages)
data = {'low_stock': low_stock, 'pn': paginator.num_pages, 'sz': 10, 'gid': 0}
return data
def get_low_stock_products():
products = Stock.objects.get_low_stock()
return products
| 49.383632 | 174 | 0.573411 |
4a1ddff18e3240cfbf72674fdb891adf25b249eb | 3,934 | py | Python | contrib/macdeploy/custom_dsstore.py | Benitoitedev/Benitoitecoin | 45e4762128d9c0e888ffda37b34feddc7680ca71 | [
"MIT"
] | null | null | null | contrib/macdeploy/custom_dsstore.py | Benitoitedev/Benitoitecoin | 45e4762128d9c0e888ffda37b34feddc7680ca71 | [
"MIT"
] | null | null | null | contrib/macdeploy/custom_dsstore.py | Benitoitedev/Benitoitecoin | 45e4762128d9c0e888ffda37b34feddc7680ca71 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# Copyright (c) 2013-2016 The Bitcoin Core developers
# Copyright (c) 2018 The LightCoinPay developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from __future__ import division,print_function,unicode_literals
import biplist
from ds_store import DSStore
from mac_alias import Alias
import sys
output_file = sys.argv[1]
package_name_ns = sys.argv[2]
ds = DSStore.open(output_file, 'w+')
ds['.']['bwsp'] = {
'ShowStatusBar': False,
'WindowBounds': b'{{300, 280}, {500, 343}}',
'ContainerShowSidebar': False,
'SidebarWidth': 0,
'ShowTabView': False,
'PreviewPaneVisibility': False,
'ShowToolbar': False,
'ShowSidebar': False,
'ShowPathbar': True
}
icvp = {
'gridOffsetX': 0.0,
'textSize': 12.0,
'viewOptionsVersion': 1,
'backgroundImageAlias': b'\x00\x00\x00\x00\x02\x1e\x00\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xd1\x94\\\xb0H+\x00\x05\x00\x00\x00\x98\x0fbackground.tiff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x99\xd19\xb0\xf8\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\x00\x00\r\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0b.background\x00\x00\x10\x00\x08\x00\x00\xd1\x94\\\xb0\x00\x00\x00\x11\x00\x08\x00\x00\xd19\xb0\xf8\x00\x00\x00\x01\x00\x04\x00\x00\x00\x98\x00\x0e\x00 \x00\x0f\x00b\x00a\x00c\x00k\x00g\x00r\x00o\x00u\x00n\x00d\x00.\x00t\x00i\x00f\x00f\x00\x0f\x00\x02\x00\x00\x00\x12\x00\x1c/.background/background.tiff\x00\x14\x01\x06\x00\x00\x00\x00\x01\x06\x00\x02\x00\x00\x0cMacintosh HD\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xce\x97\xab\xc3H+\x00\x00\x01\x88[\x88\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02u\xab\x8d\xd1\x94\\\xb0devrddsk\xff\xff\xff\xff\x00\x00\t \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x07bitcoin\x00\x00\x10\x00\x08\x00\x00\xce\x97\xab\xc3\x00\x00\x00\x11\x00\x08\x00\x00\xd1\x94\\\xb0\x00\x00\x00\x01\x00\x14\x01\x88[\x88\x00\x16\xa9\t\x00\x08\xfaR\x00\x08\xfaQ\x00\x02d\x8e\x00\x0e\x00\x02\x00\x00\x00\x0f\x00\x1a\x00\x0c\x00M\x00a\x00c\x00i\x00n\x00t\x00o\x00s\x00h\x00 \x00H\x00D\x00\x13\x00\x01/\x00\x00\x15\x00\x02\x00\x14\xff\xff\x00\x00\xff\xff\x00\x00',
'backgroundColorBlue': 1.0,
'iconSize': 96.0,
'backgroundColorGreen': 1.0,
'arrangeBy': 'none',
'showIconPreview': True,
'gridSpacing': 100.0,
'gridOffsetY': 0.0,
'showItemInfo': False,
'labelOnBottom': True,
'backgroundType': 2,
'backgroundColorRed': 1.0
}
alias = Alias.from_bytes(icvp['backgroundImageAlias'])
alias.volume.name = package_name_ns
alias.volume.posix_path = '/Volumes/' + package_name_ns
alias.volume.disk_image_alias.target.filename = package_name_ns + '.temp.dmg'
alias.volume.disk_image_alias.target.carbon_path = 'Macintosh HD:Users:\x00bitcoinuser:\x00Documents:\x00bit' +\
'coin:\x00bitcoin:\x00' + package_name_ns + '.temp.dmg'
alias.volume.disk_image_alias.target.posix_path = 'Users/bitcoinuser/Documents/bitcoin/bitcoin/' + package_name_ns + \
'.temp.dmg'
alias.target.carbon_path = package_name_ns + ':.background:\x00background.tiff'
icvp['backgroundImageAlias'] = biplist.Data(alias.to_bytes())
ds['.']['icvp'] = icvp
ds['.']['vSrn'] = ('long', 1)
ds['Applications']['Iloc'] = (370, 156)
ds['Benitoite-Qt.app']['Iloc'] = (128, 156)
ds.flush()
ds.close()
| 61.46875 | 1,817 | 0.708185 |
4a1de078fd24f0e3aefe5bcd80bbafa87f795aa1 | 19,464 | py | Python | hw/ip/otbn/dv/rig/rig/gens/straight_line_insn.py | vsukhoml/opentitan | bb0bd16b3eca0ef2dd4144b5df49b8663c59101f | [
"Apache-2.0"
] | 1 | 2020-05-11T05:18:20.000Z | 2020-05-11T05:18:20.000Z | hw/ip/otbn/dv/rig/rig/gens/straight_line_insn.py | vsukhoml/opentitan | bb0bd16b3eca0ef2dd4144b5df49b8663c59101f | [
"Apache-2.0"
] | null | null | null | hw/ip/otbn/dv/rig/rig/gens/straight_line_insn.py | vsukhoml/opentitan | bb0bd16b3eca0ef2dd4144b5df49b8663c59101f | [
"Apache-2.0"
] | null | null | null | # Copyright lowRISC contributors.
# Licensed under the Apache License, Version 2.0, see LICENSE for details.
# SPDX-License-Identifier: Apache-2.0
import random
from typing import List, Optional, Tuple
from shared.insn_yaml import Insn, InsnsFile
from shared.lsu_desc import LSUDesc
from shared.operand import ImmOperandType, OptionOperandType, RegOperandType
from ..config import Config
from ..program import ProgInsn, Program
from ..model import Model
from ..snippet import ProgSnippet, Snippet
from ..snippet_gen import GenCont, GenRet, SimpleGenRet, SnippetGen
class StraightLineInsn(SnippetGen):
'''A super-simple snippet consisting of a single instruction'''
def __init__(self, cfg: Config, insns_file: InsnsFile) -> None:
super().__init__()
# Find all the straight line, non-pseudo instructions in insns_file
self.insns = []
self.weights = []
seen_insns = set()
for insn in insns_file.insns:
# Skip pseudo-ops
if insn.python_pseudo_op or insn.literal_pseudo_op:
continue
seen_insns.add(insn.mnemonic)
# Skip anything that isn't straight-line
if not insn.straight_line:
continue
weight = cfg.insn_weights.get(insn.mnemonic)
if weight > 0:
self.insns.append(insn)
self.weights.append(weight)
# Check that the config's insn-weights dictionary didn't have any
# non-existent instructions. Note that we even add jumps to seen_insns:
# although weighting those won't make any difference here, this is the
# code that makes sense of what's allowed.
missing_insns = set(cfg.insn_weights.values.keys()) - seen_insns
if missing_insns:
raise ValueError('Config at {} defines weights for '
'non-existent instructions: {}'
.format(cfg.path,
', '.join(sorted(missing_insns))))
# Check that at least one instruction has a positive weight
assert len(self.insns) == len(self.weights)
if not self.insns:
raise ValueError('Config at {} defines a zero weight '
'for all instructions.'
.format(cfg.path))
def gen(self,
cont: GenCont,
model: Model,
program: Program) -> Optional[GenRet]:
return SnippetGen._unsimple_genret(self._gen(model, program))
def gen_some(self,
count: int,
model: Model,
program: Program) -> Optional[SimpleGenRet]:
'''Generate a block of count straight-line instructions'''
assert 0 < count
# Take a copy of model, so that we fail atomically
model = model.copy()
children = [] # type: List[Snippet]
for i in range(count):
gen_ret = self._gen(model, program)
if gen_ret is None:
return None
snippet, model = gen_ret
children.append(snippet)
return (Snippet.merge_list(children), model)
def _gen(self,
model: Model,
program: Program) -> Optional[SimpleGenRet]:
# Return None if this is the last instruction in the current gap
# because we need to either jump or do an ECALL to avoid getting stuck.
#
# Note that we could do this by defining pick_weight, but we don't
# expect it to happen very often so it's probably best (and cheaper)
# just to disable ourselves on the rare occasions when it does.
if program.get_insn_space_at(model.pc) <= 1:
return None
weights = self.weights
prog_insn = None
while prog_insn is None:
idx = random.choices(range(len(self.insns)), weights=weights)[0]
# At least one weight should be positive. This is guaranteed so
# long as fill_insn doesn't fail for absolutely everything. We know
# that doesn't happen, because we have some instructions (such as
# addi) where it will not fail.
assert weights[idx] > 0
# Try to fill out the instruction. On failure, clear the weight for
# this index and go around again. We take the copy here, rather
# than outside the loop, because we don't expect this to happen
# very often.
prog_insn = self.fill_insn(self.insns[idx], model)
if prog_insn is None:
weights = self.weights.copy()
weights[idx] = 0
continue
# Success! We have generated an instruction. Put it in a snippet and
# add that to the program
snippet = ProgSnippet(model.pc, [prog_insn])
snippet.insert_into_program(program)
# Then update the model with the instruction and update the model PC
model.update_for_insn(prog_insn)
model.pc += 4
return (snippet, model)
def fill_insn(self, insn: Insn, model: Model) -> Optional[ProgInsn]:
'''Try to fill out an instruction
This might fail if, for example, the model doesn't have enough
registers with architectural values. In that case, return None.
Note that there are some instructions that will never return None. For
example, addi, which can always expand to "addi x0, x0, 0" (also known
as nop).
'''
# Special-case BN load/store instructions by mnemonic. These use
# complicated indirect addressing, so it's probably more sensible to
# give them special code.
if insn.mnemonic in ['bn.lid', 'bn.sid']:
return self._fill_bn_xid(insn, model)
if insn.mnemonic == 'bn.movr':
return self._fill_bn_movr(insn, model)
# If this is not an LSU operation, or it is an LSU operation that
# operates on CSR/WSRs, we can pick operands independently.
if insn.lsu is None:
return self._fill_non_lsu_insn(insn, model)
return self._fill_lsu_insn(insn, model)
def _fill_non_lsu_insn(self,
insn: Insn,
model: Model) -> Optional[ProgInsn]:
'''Fill out an instruction with no LSU component'''
assert insn.lsu is None
# For each operand, pick a value that's allowed by the model (i.e.
# one that won't trigger any undefined behaviour)
op_vals = []
for operand in insn.operands:
op_val = model.pick_operand_value(operand.op_type)
if op_val is None:
return None
op_vals.append(op_val)
assert len(op_vals) == len(insn.operands)
return ProgInsn(insn, op_vals, None)
def _pick_gpr_for_indirect_wdr(self,
known_regs: List[Tuple[int, int]],
model: Model,
wdr_is_src: bool) -> Optional[int]:
'''Return index of a GPR pointing to a WDR with architectural value
Here, the GPR value must be known and must be at most 31. If wdr_is_src
then the WDR that it points to must also have an architectural value.
'''
arch_wdrs = set(model.regs_with_architectural_vals('wdr'))
valid_gprs = []
for gpr, gpr_val in known_regs:
if gpr_val is None or gpr_val > 31:
continue
if wdr_is_src and gpr_val not in arch_wdrs:
continue
valid_gprs.append(gpr)
if not valid_gprs:
return None
return random.choices(valid_gprs)[0]
def _pick_inc_vals(self,
idx0: int,
idx1: int,
model: Model) -> Tuple[int, int]:
'''Pick two values in 0, 1 that aren't both 1
These are appropriate to use as the increment flags for
BN.LID/BN.SID/BN.MOVR. idx0 and idx1 are the indices of the GPRs in
question.
'''
options = [(0, 0), (1, 0), (0, 1)]
wt10 = 0.0 if model.is_const('gpr', idx0) else 1.0
wt01 = 0.0 if model.is_const('gpr', idx1) else 1.0
return random.choices(options, weights=[1.0, wt10, wt01])[0]
def _fill_bn_xid(self, insn: Insn, model: Model) -> Optional[ProgInsn]:
'''Fill out a BN.LID or BN.SID instruction'''
if insn.mnemonic == 'bn.lid':
is_load = True
# bn.lid expects the operands: grd, grs1, offset, grs1_inc, grd_inc
if len(insn.operands) != 5:
raise RuntimeError('Unexpected number of operands for bn.lid')
grd, grs1, offset, grs1_inc, grd_inc = insn.operands
exp_shape = (
# grd
isinstance(grd.op_type, RegOperandType) and
grd.op_type.reg_type == 'gpr' and
not grd.op_type.is_dest() and
# grs1
isinstance(grs1.op_type, RegOperandType) and
grs1.op_type.reg_type == 'gpr' and
not grs1.op_type.is_dest() and
# offset
isinstance(offset.op_type, ImmOperandType) and
offset.op_type.signed and
# grs1_inc
isinstance(grs1_inc.op_type, OptionOperandType) and
# grd_inc
isinstance(grd_inc.op_type, OptionOperandType)
)
else:
assert insn.mnemonic == 'bn.sid'
is_load = False
# bn.sid expects the operands: grs1, grs2, offset, grs1_inc,
# grs2_inc
if len(insn.operands) != 5:
raise RuntimeError('Unexpected number of operands for bn.sid')
grs1, grs2, offset, grs1_inc, grs2_inc = insn.operands
exp_shape = (
# grs1
isinstance(grs1.op_type, RegOperandType) and
grs1.op_type.reg_type == 'gpr' and
not grs1.op_type.is_dest() and
# grs2
isinstance(grs2.op_type, RegOperandType) and
grs2.op_type.reg_type == 'gpr' and
not grs2.op_type.is_dest() and
# offset
isinstance(offset.op_type, ImmOperandType) and
offset.op_type.signed and
# grs1_inc
isinstance(grs1_inc.op_type, OptionOperandType) and
# grs2_inc
isinstance(grs2_inc.op_type, OptionOperandType)
)
if not exp_shape:
raise RuntimeError('Unexpected shape for {}'.format(insn.mnemonic))
# Assertions to guide mypy
assert isinstance(offset.op_type, ImmOperandType)
known_regs = model.regs_with_known_vals('gpr')
wdr_gpr_idx = self._pick_gpr_for_indirect_wdr(known_regs, model,
not is_load)
if wdr_gpr_idx is None:
return None
# Now pick the source register and offset. The range for offset
# shouldn't be none (because we know the width of the underlying bit
# field).
offset_rng = offset.op_type.get_op_val_range(model.pc)
assert offset_rng is not None
op_to_known_regs = {'grs1': known_regs}
tgt = model.pick_lsu_target('dmem',
is_load,
op_to_known_regs,
offset_rng,
offset.op_type.shift,
32)
if tgt is None:
return None
addr, imm_val, reg_indices = tgt
assert offset_rng[0] <= imm_val <= offset_rng[1]
assert list(reg_indices.keys()) == ['grs1']
grs1_idx = reg_indices['grs1']
offset_val = offset.op_type.op_val_to_enc_val(imm_val, model.pc)
assert offset_val is not None
# Do we increment the GPRs? We can increment up to one of them.
grs1_inc_val, wdr_gpr_inc_val = \
self._pick_inc_vals(grs1_idx, wdr_gpr_idx, model)
# Finally, package up the operands properly for the instruction we're
# building.
if is_load:
# bn.lid: grd, grs1, offset, grs1_inc, grd_inc
enc_vals = [wdr_gpr_idx, grs1_idx, offset_val,
grs1_inc_val, wdr_gpr_inc_val]
else:
# bn.sid: grs1, grs2, offset, grs1_inc, grs2_inc
enc_vals = [grs1_idx, wdr_gpr_idx, offset_val,
grs1_inc_val, wdr_gpr_inc_val]
return ProgInsn(insn, enc_vals, ('dmem', addr))
def _fill_bn_movr(self, insn: Insn, model: Model) -> Optional[ProgInsn]:
'''Fill out a BN.MOVR instruction'''
# bn.lid expects the operands: grd, grs, grd_inc, grs_inc
if len(insn.operands) != 4:
raise RuntimeError('Unexpected number of operands for bn.movr')
grd, grs, grd_inc, grs_inc = insn.operands
exp_shape = (
# grd
isinstance(grd.op_type, RegOperandType) and
grd.op_type.reg_type == 'gpr' and
not grd.op_type.is_dest() and
# grs
isinstance(grs.op_type, RegOperandType) and
grs.op_type.reg_type == 'gpr' and
not grs.op_type.is_dest() and
# grd_inc
isinstance(grd_inc.op_type, OptionOperandType) and
# grs_inc
isinstance(grs_inc.op_type, OptionOperandType)
)
if not exp_shape:
raise RuntimeError('Unexpected shape for bn.movr')
known_regs = model.regs_with_known_vals('gpr')
grs_idx = self._pick_gpr_for_indirect_wdr(known_regs, model, True)
if grs_idx is None:
return None
grd_idx = self._pick_gpr_for_indirect_wdr(known_regs, model, False)
if grd_idx is None:
return None
grd_inc_val, grs_inc_val = self._pick_inc_vals(grd_idx, grs_idx, model)
return ProgInsn(insn,
[grd_idx, grs_idx, grd_inc_val, grs_inc_val],
None)
def _fill_lsu_insn(self, insn: Insn, model: Model) -> Optional[ProgInsn]:
'''Fill out a generic load/store instruction'''
# If this is an LSU operation, then the target address is given by the
# sum of one or more operands. For each of these operands with a
# register type, we are going to need to look in the model to figure
# out the list of different known values we can give it. At the moment,
# we only support the case when there is at most one non-register
# operand, which must be an immediate. Grab that operand's name too.
lsu_imm_op = None
lsu_reg_ops = []
lsu_reg_types = set()
imm_op_range = (0, 0)
imm_op_shift = 0
assert insn.lsu is not None
for tgt_op_name in insn.lsu.target:
tgt_op = insn.name_to_operand[tgt_op_name]
if isinstance(tgt_op.op_type, ImmOperandType):
if lsu_imm_op is not None:
raise RuntimeError('Multiple immediate operands '
'contribute to target for instruction '
'{!r}. Not currently supported.'
.format(insn.mnemonic))
lsu_imm_op = tgt_op_name
rng = tgt_op.op_type.get_op_val_range(model.pc)
if rng is None:
assert tgt_op.op_type.width is None
raise RuntimeError('The {!r} immediate operand for the '
'{!r} instruction contributes to its '
'LSU target but has no width.'
.format(tgt_op_name, insn.mnemonic))
imm_op_range = rng
imm_op_shift = tgt_op.op_type.shift
continue
if isinstance(tgt_op.op_type, RegOperandType):
reg_type = tgt_op.op_type.reg_type
lsu_reg_ops.append((tgt_op_name, reg_type))
lsu_reg_types.add(reg_type)
continue
raise RuntimeError('Unknown operand type for {!r} operand of '
'{!r} instruction: {}.'
.format(tgt_op_name, insn.mnemonic,
type(tgt_op.op_type).__name__))
# We have a list of register operands, together with their types. Get a
# list of registers with known values for each register type we've seen.
known_regs_by_type = {rtype: model.regs_with_known_vals(rtype)
for rtype in lsu_reg_types}
# And turn that into a dict keyed by operand name
op_to_known_regs = {op_name: known_regs_by_type[op_type]
for op_name, op_type in lsu_reg_ops}
# Ask the model to try to find a target we can use. If this is a load
# or a CSR operation, it will have to be an address that already has an
# architectural value. If a store, it can be any address in range.
#
# We cheat a bit for WSR stores. These don't actually load a value, but
# we still want to make sure we pick a valid WSR index, so we claim we
# loaded one anyway.
lsu_type_to_info = {
'mem-load': ('dmem', True),
'mem-store': ('dmem', False),
'csr': ('csr', True),
'wsr-load': ('wsr', True),
'wsr-store': ('wsr', True)
}
assert set(lsu_type_to_info.keys()) == set(LSUDesc.TYPES)
mem_type, loads_value = lsu_type_to_info[insn.lsu.lsu_type]
tgt = model.pick_lsu_target(mem_type,
loads_value,
op_to_known_regs,
imm_op_range,
imm_op_shift,
insn.lsu.idx_width)
if tgt is None:
return None
addr, imm_val, reg_indices = tgt
assert imm_op_range[0] <= imm_val <= imm_op_range[1]
enc_vals = []
for operand in insn.operands:
# Is this the immediate? If the immediate operand is signed then
# note that imm_op_min < 0 and we might have that imm_val < 0.
# However, we store everything as an enc_val, so we have to
# "re-encode" here.
if operand.name == lsu_imm_op:
enc_val = operand.op_type.op_val_to_enc_val(imm_val, model.pc)
assert enc_val is not None
enc_vals.append(enc_val)
continue
# Or is it a register operand contributing to the target address?
reg_val = reg_indices.get(operand.name)
if reg_val is not None:
enc_vals.append(reg_val)
continue
# Otherwise it's some other operand. Pick any old value.
val = model.pick_operand_value(operand.op_type)
if val is None:
return None
enc_vals.append(val)
assert len(enc_vals) == len(insn.operands)
return ProgInsn(insn, enc_vals, (mem_type, addr))
| 40.890756 | 80 | 0.569307 |
4a1de125d05c0c36dfe539886cadbfcc8f39cc36 | 1,067 | py | Python | test/test_account_response_body.py | mxenabled/mx-platform-python | 060dae7ddb02fdcf41fa7f7aebfa4b8a0273afac | [
"MIT"
] | null | null | null | test/test_account_response_body.py | mxenabled/mx-platform-python | 060dae7ddb02fdcf41fa7f7aebfa4b8a0273afac | [
"MIT"
] | 14 | 2021-11-30T21:56:19.000Z | 2022-02-07T18:47:10.000Z | test/test_account_response_body.py | mxenabled/mx-platform-python | 060dae7ddb02fdcf41fa7f7aebfa4b8a0273afac | [
"MIT"
] | 1 | 2022-01-12T14:59:39.000Z | 2022-01-12T14:59:39.000Z | """
MX Platform API
The MX Platform API is a powerful, fully-featured API designed to make aggregating and enhancing financial data easy and reliable. It can seamlessly connect your app or website to tens of thousands of financial institutions. # noqa: E501
The version of the OpenAPI document: 0.1.0
Generated by: https://openapi-generator.tech
"""
import sys
import unittest
import mx_platform_python
from mx_platform_python.model.account_response import AccountResponse
globals()['AccountResponse'] = AccountResponse
from mx_platform_python.model.account_response_body import AccountResponseBody
class TestAccountResponseBody(unittest.TestCase):
"""AccountResponseBody unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testAccountResponseBody(self):
"""Test AccountResponseBody"""
# FIXME: construct object with mandatory attributes with example values
# model = AccountResponseBody() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 28.078947 | 242 | 0.737582 |
4a1de2a9adf4e69c067f73de6056ca17154fd7a4 | 1,198 | py | Python | music_site/tracks/models.py | UVG-Teams/music-space | 8f464b6b1cbe59afea3be3ab1b9ed4e25ab0b424 | [
"MIT"
] | null | null | null | music_site/tracks/models.py | UVG-Teams/music-space | 8f464b6b1cbe59afea3be3ab1b9ed4e25ab0b424 | [
"MIT"
] | null | null | null | music_site/tracks/models.py | UVG-Teams/music-space | 8f464b6b1cbe59afea3be3ab1b9ed4e25ab0b424 | [
"MIT"
] | null | null | null | from django.db import models
#Track
class Track(models.Model):
id = models.IntegerField(primary_key=True, blank=False, null=False)
name = models.CharField(max_length=200, blank=False, null=False)
composer = models.CharField(max_length=220, blank=True, null=True)
milliseconds = models.IntegerField(blank=False, null=False)
bytes = models.IntegerField()
unitprice = models.DecimalField(max_digits=10, decimal_places=2)
active = models.BooleanField(default=True)
albumid = models.ForeignKey("albums.Album", on_delete=models.SET_NULL, blank=True, null=True, db_column="albumid") #albumid = models.IntegerField()
genreid = models.ForeignKey("genres.Genre", on_delete=models.SET_NULL, blank=True, null=True, db_column="genreid") #genreid = models.IntegerField()
mediatypeid = models.ForeignKey("mediaTypes.MediaType", on_delete=models.SET_NULL, blank=True, null=True, db_column="mediatypeid") #mediatypeid = models.IntegerField(blank=False, null=False)
url = models.URLField(null=True)
class Meta:
db_table = 'track'
def __str__(self):
return "{id} - {name}".format(
id = self.id,
name = self.name
)
| 47.92 | 194 | 0.711185 |
4a1de2df1781101d2753dc9e4c4d6261390d79ae | 933 | py | Python | adventure_game/game_objects/puzzles/bathtub_dilemma.py | Def4l71diot/adventure-game-base | 6f62ecf07980beba8804114b007d6bd7a4f55cc7 | [
"MIT"
] | 2 | 2017-10-23T14:40:35.000Z | 2017-10-23T14:40:38.000Z | adventure_game/game_objects/puzzles/bathtub_dilemma.py | Def4l71diot/A-Day-in-the-Kremlin | 6f62ecf07980beba8804114b007d6bd7a4f55cc7 | [
"MIT"
] | null | null | null | adventure_game/game_objects/puzzles/bathtub_dilemma.py | Def4l71diot/A-Day-in-the-Kremlin | 6f62ecf07980beba8804114b007d6bd7a4f55cc7 | [
"MIT"
] | null | null | null | from adventure_game.models import Puzzle
from adventure_game.exceptions import PlayerDeadException
NOT_ENOUGH_BUBBLES_ERROR_MESSAGE = "YOU'VE FAILED COMRADE! Stalin loves bubbles. Try with more next time. GULAG!"
FLOODED_BATH_ERROR_MESSAGE = "The whole room turns into a bathtub. Stalin rushes in with fury and sends you to GULAG!"
class BathtubDilemma(Puzzle):
def __init__(self):
super().__init__("bathtub_dilemma")
def answer_is_correct(self, answer):
correct_answer = int(self.correct_answer)
try:
answer = int(answer)
except ValueError:
return False
if answer == correct_answer:
self._is_solved = True
return True
if answer < correct_answer:
raise PlayerDeadException(NOT_ENOUGH_BUBBLES_ERROR_MESSAGE)
if answer > correct_answer:
raise PlayerDeadException(FLOODED_BATH_ERROR_MESSAGE)
| 32.172414 | 118 | 0.700965 |
4a1de3e84b2a1c3d9a27c48da319a50b1dd7bb64 | 31 | py | Python | codigoRama.py | RobertVJ/HCAP2021 | a5176d5785c85aa9f27b04ce85570f831c9210ae | [
"MIT"
] | null | null | null | codigoRama.py | RobertVJ/HCAP2021 | a5176d5785c85aa9f27b04ce85570f831c9210ae | [
"MIT"
] | null | null | null | codigoRama.py | RobertVJ/HCAP2021 | a5176d5785c85aa9f27b04ce85570f831c9210ae | [
"MIT"
] | null | null | null | print("Archivo nuevo en rama")
| 15.5 | 30 | 0.741935 |
4a1de46b11cd64204597e55ead982f68e7260135 | 2,563 | py | Python | src/seller_products/migrations/0006_auto_20211007_2124.py | evis-market/web-interface-backend | f8930ff1c009ad18e522ab29680b4bcd50a6020e | [
"MIT"
] | 2 | 2021-08-30T22:58:32.000Z | 2021-12-12T10:47:52.000Z | src/seller_products/migrations/0006_auto_20211007_2124.py | evis-market/web-interface-backend | f8930ff1c009ad18e522ab29680b4bcd50a6020e | [
"MIT"
] | null | null | null | src/seller_products/migrations/0006_auto_20211007_2124.py | evis-market/web-interface-backend | f8930ff1c009ad18e522ab29680b4bcd50a6020e | [
"MIT"
] | 1 | 2021-08-22T19:12:44.000Z | 2021-08-22T19:12:44.000Z | # Generated by Django 3.2.7 on 2021-10-07 21:24
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('product_data_types', '0001_initial'),
('data_delivery_types', '0001_initial'),
('seller_products', '0005_auto_20211004_1808'),
]
operations = [
migrations.AlterField(
model_name='sellerproductdatasample',
name='data_delivery_type',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='data_delivery_types.datadeliverytype'),
),
migrations.AlterField(
model_name='sellerproductdatasample',
name='data_format',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='product_data_types.dataformat'),
),
migrations.AlterField(
model_name='sellerproductdatasamplearchive',
name='data_delivery_type',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='data_delivery_types.datadeliverytype'),
),
migrations.AlterField(
model_name='sellerproductdatasamplearchive',
name='data_format',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='product_data_types.dataformat'),
),
migrations.AlterField(
model_name='sellerproductdataurl',
name='data_delivery_type',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='data_delivery_types.datadeliverytype'),
),
migrations.AlterField(
model_name='sellerproductdataurl',
name='data_format',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='product_data_types.dataformat'),
),
migrations.AlterField(
model_name='sellerproductdataurlarchive',
name='data_delivery_type',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='data_delivery_types.datadeliverytype'),
),
migrations.AlterField(
model_name='sellerproductdataurlarchive',
name='data_format',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='product_data_types.dataformat'),
),
]
| 44.964912 | 147 | 0.668748 |
4a1de56e918a3de42da4510f054e9b33962de358 | 40,971 | py | Python | python/ccxt/exmo.py | gslopez/ccxt | 2ea6857dce788f1f556ee94f08a58757decf0900 | [
"MIT"
] | 13 | 2019-01-26T14:41:37.000Z | 2022-03-26T03:33:12.000Z | python/ccxt/exmo.py | gslopez/ccxt | 2ea6857dce788f1f556ee94f08a58757decf0900 | [
"MIT"
] | 17 | 2018-10-02T04:43:13.000Z | 2018-11-01T17:07:37.000Z | python/ccxt/exmo.py | gslopez/ccxt | 2ea6857dce788f1f556ee94f08a58757decf0900 | [
"MIT"
] | 12 | 2018-12-24T02:19:02.000Z | 2022-03-26T05:04:25.000Z | # -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.base.exchange import Exchange
# -----------------------------------------------------------------------------
try:
basestring # Python 3
except NameError:
basestring = str # Python 2
import hashlib
import json
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import ExchangeNotAvailable
from ccxt.base.errors import InvalidNonce
class exmo (Exchange):
def describe(self):
return self.deep_extend(super(exmo, self).describe(), {
'id': 'exmo',
'name': 'EXMO',
'countries': ['ES', 'RU'], # Spain, Russia
'rateLimit': 350, # once every 350 ms ≈ 180 requests per minute ≈ 3 requests per second
'version': 'v1',
'has': {
'CORS': False,
'fetchClosedOrders': 'emulated',
'fetchDepositAddress': True,
'fetchOpenOrders': True,
'fetchOrder': 'emulated',
'fetchOrders': 'emulated',
'fetchOrderTrades': True,
'fetchOrderBooks': True,
'fetchMyTrades': True,
'fetchTickers': True,
'withdraw': True,
'fetchTradingFees': True,
'fetchFundingFees': True,
'fetchCurrencies': True,
'fetchTransactions': True,
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/27766491-1b0ea956-5eda-11e7-9225-40d67b481b8d.jpg',
'api': {
'public': 'https://api.exmo.com',
'private': 'https://api.exmo.com',
'web': 'https://exmo.me',
},
'www': 'https://exmo.me',
'referral': 'https://exmo.me/?ref=131685',
'doc': [
'https://exmo.me/en/api_doc?ref=131685',
'https://github.com/exmo-dev/exmo_api_lib/tree/master/nodejs',
],
'fees': 'https://exmo.com/en/docs/fees',
},
'api': {
'web': {
'get': [
'ctrl/feesAndLimits',
'en/docs/fees',
],
},
'public': {
'get': [
'currency',
'order_book',
'pair_settings',
'ticker',
'trades',
],
},
'private': {
'post': [
'user_info',
'order_create',
'order_cancel',
'user_open_orders',
'user_trades',
'user_cancelled_orders',
'order_trades',
'required_amount',
'deposit_address',
'withdraw_crypt',
'withdraw_get_txid',
'excode_create',
'excode_load',
'wallet_history',
],
},
},
'fees': {
'trading': {
'tierBased': False,
'percentage': True,
'maker': 0.2 / 100,
'taker': 0.2 / 100,
},
'funding': {
'tierBased': False,
'percentage': False, # fixed funding fees for crypto, see fetchFundingFees below
},
},
'exceptions': {
'40005': AuthenticationError, # Authorization error, incorrect signature
'40009': InvalidNonce, #
'40015': ExchangeError, # API function do not exist
'40016': ExchangeNotAvailable, # Maintenance work in progress
'40017': AuthenticationError, # Wrong API Key
'50052': InsufficientFunds,
'50054': InsufficientFunds,
'50304': OrderNotFound, # "Order was not found '123456789'"(fetching order trades for an order that does not have trades yet)
'50173': OrderNotFound, # "Order with id X was not found."(cancelling non-existent, closed and cancelled order)
'50319': InvalidOrder, # Price by order is less than permissible minimum for self pair
'50321': InvalidOrder, # Price by order is more than permissible maximum for self pair
},
})
def fetch_trading_fees(self, params={}):
response = None
oldParseJsonResponse = self.parseJsonResponse
try:
self.parseJsonResponse = False
response = self.webGetEnDocsFees(params)
self.parseJsonResponse = oldParseJsonResponse
except Exception as e:
# ensure parseJsonResponse is restored no matter what
self.parseJsonResponse = oldParseJsonResponse
raise e
parts = response.split('<td class="th_fees_2" colspan="2">')
numParts = len(parts)
if numParts != 2:
raise ExchangeError(self.id + ' fetchTradingFees format has changed')
rest = parts[1]
parts = rest.split('</td>')
numParts = len(parts)
if numParts < 2:
raise ExchangeError(self.id + ' fetchTradingFees format has changed')
fee = float(parts[0].replace('%', '')) * 0.01
taker = fee
maker = fee
return {
'info': response,
'maker': maker,
'taker': taker,
}
def parse_fixed_float_value(self, input):
if (input is None) or (input == '-'):
return None
isPercentage = (input.find('%') >= 0)
parts = input.split(' ')
value = parts[0].replace('%', '')
result = float(value)
if (result > 0) and isPercentage:
raise ExchangeError(self.id + ' parseFixedFloatValue detected an unsupported non-zero percentage-based fee ' + input)
return result
def fetch_funding_fees(self, params={}):
response = self.webGetCtrlFeesAndLimits(params)
#
# {success: 1,
# ctlr: "feesAndLimits",
# error: "",
# data: {limits: [{ pair: "BTC/USD",
# min_q: "0.001",
# max_q: "100",
# min_p: "1",
# max_p: "30000",
# min_a: "1",
# max_a: "200000" },
# { pair: "KICK/ETH",
# min_q: "100",
# max_q: "200000",
# min_p: "0.000001",
# max_p: "1",
# min_a: "0.0001",
# max_a: "100" } ],
# fees: [{group: "crypto",
# title: "Криптовалюта",
# items: [{prov: "BTC", dep: "0%", wd: "0.0005 BTC"},
# {prov: "LTC", dep: "0%", wd: "0.01 LTC"},
# {prov: "DOGE", dep: "0%", wd: "1 Doge"},
# {prov: "DASH", dep: "0%", wd: "0.01 DASH"},
# {prov: "ETH", dep: "0%", wd: "0.01 ETH"},
# {prov: "WAVES", dep: "0%", wd: "0.001 WAVES"},
# {prov: "ZEC", dep: "0%", wd: "0.001 ZEC"},
# {prov: "USDT", dep: "5 USDT", wd: "5 USDT"},
# {prov: "NEO", dep: "0%", wd: "0%"},
# {prov: "GAS", dep: "0%", wd: "0%"},
# {prov: "ZRX", dep: "0%", wd: "1 ZRX"},
# {prov: "GNT", dep: "0%", wd: "1 GNT"}]},
# {group: "usd",
# title: "USD",
# items: [{prov: "AdvCash", dep: "1%", wd: "3%"},
# {prov: "Perfect Money", dep: "-", wd: "1%"},
# {prov: "Neteller", dep: "3.5% + 0.29 USD, wd: "1.95%"},
# {prov: "Wire Transfer", dep: "0%", wd: "1% + 20 USD"},
# {prov: "CryptoCapital", dep: "0.5%", wd: "1.9%"},
# {prov: "Skrill", dep: "3.5% + 0.36 USD", wd: "3%"},
# {prov: "Payeer", dep: "1.95%", wd: "3.95%"},
# {prov: "Visa/MasterCard(Simplex)", dep: "6%", wd: "-"}]},
# {group: "eur",
# title: "EUR",
# items: [{prov: "CryptoCapital", dep: "0%", wd: "-"},
# {prov: "SEPA", dep: "25 EUR", wd: "1%"},
# {prov: "Perfect Money", dep: "-", wd: "1.95%"},
# {prov: "Neteller", dep: "3.5%+0.25 EUR", wd: "1.95%"},
# {prov: "Payeer", dep: "2%", wd: "1%"},
# {prov: "AdvCash", dep: "1%", wd: "3%"},
# {prov: "Skrill", dep: "3.5% + 0.29 EUR", wd: "3%"},
# {prov: "Rapid Transfer", dep: "1.5% + 0.29 EUR", wd: "-"},
# {prov: "MisterTango SEPA", dep: "5 EUR", wd: "1%"},
# {prov: "Visa/MasterCard(Simplex)", dep: "6%", wd: "-"}]},
# {group: "rub",
# title: "RUB",
# items: [{prov: "Payeer", dep: "2.45%", wd: "5.95%"},
# {prov: "Yandex Money", dep: "4.5%", wd: "-"},
# {prov: "AdvCash", dep: "1.45%", wd: "5.45%"},
# {prov: "Qiwi", dep: "4.95%", wd: "-"},
# {prov: "Visa/Mastercard", dep: "-", wd: "6.95% + 100 RUB" }]},
# {group: "pln",
# title: "PLN",
# items: [{prov: "Neteller", dep: "3.5% + 4 PLN", wd: "-"},
# {prov: "Rapid Transfer", dep: "1.5% + 1.21 PLN", wd: "-"},
# {prov: "CryptoCapital", dep: "20 PLN", wd: "-"},
# {prov: "Skrill", dep: "3.5% + 1.21 PLN", wd: "-"},
# {prov: "Visa/MasterCard(Simplex)", dep: "6%", wd: "-"}]},
# {group: "uah",
# title: "UAH",
# items: [{prov: "AdvCash", dep: "1%", wd: "6%"},
# {prov: "Visa/MasterCard", dep: "2.6%", wd: "8% + 30 UAH"}]}]} }
#
#
# the code below assumes all non-zero crypto fees are fixed(for now)
withdraw = {}
deposit = {}
groups = self.safe_value(response['data'], 'fees')
groupsByGroup = self.index_by(groups, 'group')
items = groupsByGroup['crypto']['items']
for i in range(0, len(items)):
item = items[i]
code = self.common_currency_code(self.safe_string(item, 'prov'))
withdraw[code] = self.parse_fixed_float_value(self.safe_string(item, 'wd'))
deposit[code] = self.parse_fixed_float_value(self.safe_string(item, 'dep'))
result = {
'info': response,
'withdraw': withdraw,
'deposit': deposit,
}
# cache them for later use
self.options['fundingFees'] = result
return result
def fetch_currencies(self, params={}):
fees = self.fetch_funding_fees(params)
# todo redesign the 'fee' property in currencies
ids = list(fees['withdraw'].keys())
limitsByMarketId = self.index_by(fees['info']['data']['limits'], 'pair')
marketIds = list(limitsByMarketId.keys())
minAmounts = {}
minPrices = {}
minCosts = {}
maxAmounts = {}
maxPrices = {}
maxCosts = {}
for i in range(0, len(marketIds)):
marketId = marketIds[i]
limit = limitsByMarketId[marketId]
baseId, quoteId = marketId.split('/')
base = self.common_currency_code(baseId)
quote = self.common_currency_code(quoteId)
maxAmount = self.safe_float(limit, 'max_q')
maxPrice = self.safe_float(limit, 'max_p')
maxCost = self.safe_float(limit, 'max_a')
minAmount = self.safe_float(limit, 'min_q')
minPrice = self.safe_float(limit, 'min_p')
minCost = self.safe_float(limit, 'min_a')
minAmounts[base] = min(self.safe_float(minAmounts, base, minAmount), minAmount)
maxAmounts[base] = max(self.safe_float(maxAmounts, base, maxAmount), maxAmount)
minPrices[quote] = min(self.safe_float(minPrices, quote, minPrice), minPrice)
minCosts[quote] = min(self.safe_float(minCosts, quote, minCost), minCost)
maxPrices[quote] = max(self.safe_float(maxPrices, quote, maxPrice), maxPrice)
maxCosts[quote] = max(self.safe_float(maxCosts, quote, maxCost), maxCost)
result = {}
for i in range(0, len(ids)):
id = ids[i]
code = self.common_currency_code(id)
fee = self.safe_value(fees['withdraw'], code)
active = True
result[code] = {
'id': id,
'code': code,
'name': code,
'active': active,
'fee': fee,
'precision': 8,
'limits': {
'amount': {
'min': self.safe_float(minAmounts, code),
'max': self.safe_float(maxAmounts, code),
},
'price': {
'min': self.safe_float(minPrices, code),
'max': self.safe_float(maxPrices, code),
},
'cost': {
'min': self.safe_float(minCosts, code),
'max': self.safe_float(maxCosts, code),
},
},
'info': fee,
}
return result
def fetch_markets(self):
fees = self.fetch_trading_fees()
markets = self.publicGetPairSettings()
keys = list(markets.keys())
result = []
for p in range(0, len(keys)):
id = keys[p]
market = markets[id]
symbol = id.replace('_', '/')
base, quote = symbol.split('/')
result.append({
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'active': True,
'taker': fees['taker'],
'maker': fees['maker'],
'limits': {
'amount': {
'min': self.safe_float(market, 'min_quantity'),
'max': self.safe_float(market, 'max_quantity'),
},
'price': {
'min': self.safe_float(market, 'min_price'),
'max': self.safe_float(market, 'max_price'),
},
'cost': {
'min': self.safe_float(market, 'min_amount'),
'max': self.safe_float(market, 'max_amount'),
},
},
'precision': {
'amount': 8,
'price': 8,
},
'info': market,
})
return result
def fetch_balance(self, params={}):
self.load_markets()
response = self.privatePostUserInfo(params)
result = {'info': response}
currencies = list(self.currencies.keys())
for i in range(0, len(currencies)):
currency = currencies[i]
account = self.account()
if currency in response['balances']:
account['free'] = float(response['balances'][currency])
if currency in response['reserved']:
account['used'] = float(response['reserved'][currency])
account['total'] = self.sum(account['free'], account['used'])
result[currency] = account
return self.parse_balance(result)
def fetch_order_book(self, symbol, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
request = self.extend({
'pair': market['id'],
}, params)
if limit is not None:
request['limit'] = limit
response = self.publicGetOrderBook(request)
result = response[market['id']]
return self.parse_order_book(result, None, 'bid', 'ask')
def fetch_order_books(self, symbols=None, params={}):
self.load_markets()
ids = None
if symbols is None:
ids = ','.join(self.ids)
# max URL length is 2083 symbols, including http schema, hostname, tld, etc...
if len(ids) > 2048:
numIds = len(self.ids)
raise ExchangeError(self.id + ' has ' + str(numIds) + ' symbols exceeding max URL length, you are required to specify a list of symbols in the first argument to fetchOrderBooks')
else:
ids = self.market_ids(symbols)
ids = ','.join(ids)
response = self.publicGetOrderBook(self.extend({
'pair': ids,
}, params))
result = {}
ids = list(response.keys())
for i in range(0, len(ids)):
id = ids[i]
symbol = self.find_symbol(id)
result[symbol] = self.parse_order_book(response[id], None, 'bid', 'ask')
return result
def parse_ticker(self, ticker, market=None):
timestamp = ticker['updated'] * 1000
symbol = None
if market:
symbol = market['symbol']
last = self.safe_float(ticker, 'last_trade')
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_float(ticker, 'high'),
'low': self.safe_float(ticker, 'low'),
'bid': self.safe_float(ticker, 'buy_price'),
'bidVolume': None,
'ask': self.safe_float(ticker, 'sell_price'),
'askVolume': None,
'vwap': None,
'open': None,
'close': last,
'last': last,
'previousClose': None,
'change': None,
'percentage': None,
'average': self.safe_float(ticker, 'avg'),
'baseVolume': self.safe_float(ticker, 'vol'),
'quoteVolume': self.safe_float(ticker, 'vol_curr'),
'info': ticker,
}
def fetch_tickers(self, symbols=None, params={}):
self.load_markets()
response = self.publicGetTicker(params)
result = {}
ids = list(response.keys())
for i in range(0, len(ids)):
id = ids[i]
market = self.markets_by_id[id]
symbol = market['symbol']
ticker = response[id]
result[symbol] = self.parse_ticker(ticker, market)
return result
def fetch_ticker(self, symbol, params={}):
self.load_markets()
response = self.publicGetTicker(params)
market = self.market(symbol)
return self.parse_ticker(response[market['id']], market)
def parse_trade(self, trade, market=None):
timestamp = trade['date'] * 1000
fee = None
symbol = None
id = self.safe_string(trade, 'trade_id')
orderId = self.safe_string(trade, 'order_id')
price = self.safe_float(trade, 'price')
amount = self.safe_float(trade, 'quantity')
cost = self.safe_float(trade, 'amount')
side = self.safe_string(trade, 'type')
type = None
if market is not None:
symbol = market['symbol']
if market['taker'] != market['maker']:
raise ExchangeError(self.id + ' parseTrade can not deduce proper fee costs, taker and maker fees now differ')
if (side == 'buy') and(amount is not None):
fee = {
'currency': market['base'],
'cost': amount * market['taker'],
'rate': market['taker'],
}
elif (side == 'sell') and(cost is not None):
fee = {
'currency': market['quote'],
'cost': amount * market['taker'],
'rate': market['taker'],
}
return {
'id': id,
'info': trade,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'order': orderId,
'type': type,
'side': side,
'price': price,
'amount': amount,
'cost': cost,
'fee': fee,
}
def fetch_trades(self, symbol, since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
response = self.publicGetTrades(self.extend({
'pair': market['id'],
}, params))
return self.parse_trades(response[market['id']], market, since, limit)
def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
self.load_markets()
request = {}
market = None
if symbol is not None:
market = self.market(symbol)
request['pair'] = market['id']
response = self.privatePostUserTrades(self.extend(request, params))
if market is not None:
response = response[market['id']]
return self.parse_trades(response, market, since, limit)
def create_order(self, symbol, type, side, amount, price=None, params={}):
self.load_markets()
prefix = (type + '_') if (type == 'market') else ''
market = self.market(symbol)
if (type == 'market') and(price is None):
price = 0
request = {
'pair': market['id'],
'quantity': self.amount_to_precision(symbol, amount),
'type': prefix + side,
'price': self.price_to_precision(symbol, price),
}
response = self.privatePostOrderCreate(self.extend(request, params))
id = self.safe_string(response, 'order_id')
timestamp = self.milliseconds()
amount = float(amount)
price = float(price)
status = 'open'
order = {
'id': id,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'status': status,
'symbol': symbol,
'type': type,
'side': side,
'price': price,
'cost': price * amount,
'amount': amount,
'remaining': amount,
'filled': 0.0,
'fee': None,
'trades': None,
}
self.orders[id] = order
return self.extend({'info': response}, order)
def cancel_order(self, id, symbol=None, params={}):
self.load_markets()
response = self.privatePostOrderCancel({'order_id': id})
if id in self.orders:
self.orders[id]['status'] = 'canceled'
return response
def fetch_order(self, id, symbol=None, params={}):
self.load_markets()
try:
response = self.privatePostOrderTrades({
'order_id': str(id),
})
return self.parse_order(response)
except Exception as e:
if isinstance(e, OrderNotFound):
if id in self.orders:
return self.orders[id]
raise OrderNotFound(self.id + ' fetchOrder order id ' + str(id) + ' not found in cache.')
def fetch_order_trades(self, id, symbol=None, since=None, limit=None, params={}):
market = None
if symbol is not None:
market = self.market(symbol)
response = self.privatePostOrderTrades({
'order_id': str(id),
})
return self.parse_trades(response, market, since, limit)
def update_cached_orders(self, openOrders, symbol):
# update local cache with open orders
for j in range(0, len(openOrders)):
id = openOrders[j]['id']
self.orders[id] = openOrders[j]
openOrdersIndexedById = self.index_by(openOrders, 'id')
cachedOrderIds = list(self.orders.keys())
for k in range(0, len(cachedOrderIds)):
# match each cached order to an order in the open orders array
# possible reasons why a cached order may be missing in the open orders array:
# - order was closed or canceled -> update cache
# - symbol mismatch(e.g. cached BTC/USDT, fetched ETH/USDT) -> skip
id = cachedOrderIds[k]
order = self.orders[id]
if not(id in list(openOrdersIndexedById.keys())):
# cached order is not in open orders array
# if we fetched orders by symbol and it doesn't match the cached order -> won't update the cached order
if symbol is not None and symbol != order['symbol']:
continue
# order is cached but not present in the list of open orders -> mark the cached order as closed
if order['status'] == 'open':
order = self.extend(order, {
'status': 'closed', # likewise it might have been canceled externally(unnoticed by "us")
'cost': None,
'filled': order['amount'],
'remaining': 0.0,
})
if order['cost'] is None:
if order['filled'] is not None:
order['cost'] = order['filled'] * order['price']
self.orders[id] = order
return self.to_array(self.orders)
def fetch_orders(self, symbol=None, since=None, limit=None, params={}):
self.load_markets()
response = self.privatePostUserOpenOrders(params)
marketIds = list(response.keys())
orders = []
for i in range(0, len(marketIds)):
marketId = marketIds[i]
market = None
if marketId in self.markets_by_id:
market = self.markets_by_id[marketId]
parsedOrders = self.parse_orders(response[marketId], market)
orders = self.array_concat(orders, parsedOrders)
self.update_cached_orders(orders, symbol)
return self.filter_by_symbol_since_limit(self.to_array(self.orders), symbol, since, limit)
def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
self.fetch_orders(symbol, since, limit, params)
orders = self.filter_by(self.orders, 'status', 'open')
return self.filter_by_symbol_since_limit(orders, symbol, since, limit)
def fetch_closed_orders(self, symbol=None, since=None, limit=None, params={}):
self.fetch_orders(symbol, since, limit, params)
orders = self.filter_by(self.orders, 'status', 'closed')
return self.filter_by_symbol_since_limit(orders, symbol, since, limit)
def parse_order(self, order, market=None):
id = self.safe_string(order, 'order_id')
timestamp = self.safe_integer(order, 'created')
if timestamp is not None:
timestamp *= 1000
symbol = None
side = self.safe_string(order, 'type')
if market is None:
marketId = None
if 'pair' in order:
marketId = order['pair']
elif ('in_currency' in list(order.keys())) and('out_currency' in list(order.keys())):
if side == 'buy':
marketId = order['in_currency'] + '_' + order['out_currency']
else:
marketId = order['out_currency'] + '_' + order['in_currency']
if (marketId is not None) and(marketId in list(self.markets_by_id.keys())):
market = self.markets_by_id[marketId]
amount = self.safe_float(order, 'quantity')
if amount is None:
amountField = 'in_amount' if (side == 'buy') else 'out_amount'
amount = self.safe_float(order, amountField)
price = self.safe_float(order, 'price')
cost = self.safe_float(order, 'amount')
filled = 0.0
trades = []
transactions = self.safe_value(order, 'trades')
feeCost = None
if transactions is not None:
if isinstance(transactions, list):
for i in range(0, len(transactions)):
trade = self.parse_trade(transactions[i], market)
if id is None:
id = trade['order']
if timestamp is None:
timestamp = trade['timestamp']
if timestamp > trade['timestamp']:
timestamp = trade['timestamp']
filled += trade['amount']
if feeCost is None:
feeCost = 0.0
feeCost += trade['fee']['cost']
if cost is None:
cost = 0.0
cost += trade['cost']
trades.append(trade)
remaining = None
if amount is not None:
remaining = amount - filled
status = self.safe_string(order, 'status') # in case we need to redefine it for canceled orders
if filled >= amount:
status = 'closed'
else:
status = 'open'
if market is None:
market = self.get_market_from_trades(trades)
feeCurrency = None
if market is not None:
symbol = market['symbol']
feeCurrency = market['quote']
if cost is None:
if price is not None:
cost = price * filled
elif price is None:
if filled > 0:
price = cost / filled
fee = {
'cost': feeCost,
'currency': feeCurrency,
}
return {
'id': id,
'datetime': self.iso8601(timestamp),
'timestamp': timestamp,
'lastTradeTimestamp': None,
'status': status,
'symbol': symbol,
'type': 'limit',
'side': side,
'price': price,
'cost': cost,
'amount': amount,
'filled': filled,
'remaining': remaining,
'trades': trades,
'fee': fee,
'info': order,
}
def fetch_deposit_address(self, code, params={}):
self.load_markets()
response = self.privatePostDepositAddress(params)
depositAddress = self.safe_string(response, code)
address = None
tag = None
if depositAddress:
addressAndTag = depositAddress.split(',')
address = addressAndTag[0]
numParts = len(addressAndTag)
if numParts > 1:
tag = addressAndTag[1]
self.check_address(address)
return {
'currency': code,
'address': address,
'tag': tag,
'info': response,
}
def get_market_from_trades(self, trades):
tradesBySymbol = self.index_by(trades, 'pair')
symbols = list(tradesBySymbol.keys())
numSymbols = len(symbols)
if numSymbols == 1:
return self.markets[symbols[0]]
return None
def calculate_fee(self, symbol, type, side, amount, price, takerOrMaker='taker', params={}):
market = self.markets[symbol]
rate = market[takerOrMaker]
cost = float(self.cost_to_precision(symbol, amount * rate))
key = 'quote'
if side == 'sell':
cost *= price
else:
key = 'base'
return {
'type': takerOrMaker,
'currency': market[key],
'rate': rate,
'cost': float(self.fee_to_precision(symbol, cost)),
}
def withdraw(self, currency, amount, address, tag=None, params={}):
self.load_markets()
request = {
'amount': amount,
'currency': currency,
'address': address,
}
if tag is not None:
request['invoice'] = tag
result = self.privatePostWithdrawCrypt(self.extend(request, params))
return {
'info': result,
'id': result['task_id'],
}
def parse_transaction_status(self, status):
statuses = {
'transferred': 'ok',
'paid': 'ok',
'pending': 'pending',
'processing': 'pending',
}
return self.safe_string(statuses, status, status)
def parse_transaction(self, transaction, currency=None):
#
# fetchTransactions
#
# {
# "dt": 1461841192,
# "type": "deposit",
# "curr": "RUB",
# "status": "processing",
# "provider": "Qiwi(LA) [12345]",
# "amount": "1",
# "account": "",
# "txid": "ec46f784ad976fd7f7539089d1a129fe46...",
# }
#
timestamp = self.safe_float(transaction, 'dt')
if timestamp is not None:
timestamp = timestamp * 1000
amount = self.safe_float(transaction, 'amount')
if amount is not None:
amount = abs(amount)
status = self.parse_transaction_status(self.safe_string(transaction, 'status'))
txid = self.safe_string(transaction, 'txid')
type = self.safe_string(transaction, 'type')
code = self.safe_string(transaction, 'curr')
if currency is None:
currency = self.safe_value(self.currencies_by_id, code)
if currency is not None:
code = currency['code']
else:
code = self.common_currency_code(code)
address = self.safe_string(transaction, 'account')
if address is not None:
parts = address.split(':')
numParts = len(parts)
if numParts == 2:
address = parts[1]
fee = None
# fixed funding fees only(for now)
if not self.fees['funding']['percentage']:
key = 'withdraw' if (type == 'withdrawal') else 'deposit'
feeCost = self.safe_float(self.options['fundingFees'][key], code)
if feeCost is not None:
fee = {
'cost': feeCost,
'currency': code,
'rate': None,
}
return {
'info': transaction,
'id': None,
'currency': code,
'amount': amount,
'address': address,
'tag': None, # refix it properly
'status': status,
'type': type,
'updated': None,
'txid': txid,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'fee': fee,
}
def fetch_transactions(self, code=None, since=None, limit=None, params={}):
self.load_markets()
request = {}
if since is not None:
request['date'] = int(since / 1000)
currency = None
if code is not None:
currency = self.currency(code)
response = self.privatePostWalletHistory(self.extend(request, params))
#
# {
# "result": True,
# "error": "",
# "begin": "1493942400",
# "end": "1494028800",
# "history": [
# {
# "dt": 1461841192,
# "type": "deposit",
# "curr": "RUB",
# "status": "processing",
# "provider": "Qiwi(LA) [12345]",
# "amount": "1",
# "account": "",
# "txid": "ec46f784ad976fd7f7539089d1a129fe46...",
# },
# {
# "dt": 1463414785,
# "type": "withdrawal",
# "curr": "USD",
# "status": "paid",
# "provider": "EXCODE",
# "amount": "-1",
# "account": "EX-CODE_19371_USDda...",
# "txid": "",
# },
# ],
# }
#
return self.parseTransactions(response['history'], currency, since, limit)
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
url = self.urls['api'][api] + '/'
if api != 'web':
url += self.version + '/'
url += path
if (api == 'public') or (api == 'web'):
if params:
url += '?' + self.urlencode(params)
elif api == 'private':
self.check_required_credentials()
nonce = self.nonce()
body = self.urlencode(self.extend({'nonce': nonce}, params))
headers = {
'Content-Type': 'application/x-www-form-urlencoded',
'Key': self.apiKey,
'Sign': self.hmac(self.encode(body), self.encode(self.secret), hashlib.sha512),
}
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def nonce(self):
return self.milliseconds()
def handle_errors(self, httpCode, reason, url, method, headers, body):
if not isinstance(body, basestring):
return # fallback to default error handler
if len(body) < 2:
return # fallback to default error handler
if (body[0] == '{') or (body[0] == '['):
response = json.loads(body)
if 'result' in response:
#
# {"result":false,"error":"Error 50052: Insufficient funds"}
#
success = self.safe_value(response, 'result', False)
if isinstance(success, basestring):
if (success == 'true') or (success == '1'):
success = True
else:
success = False
if not success:
code = None
message = self.safe_string(response, 'error')
errorParts = message.split(':')
numParts = len(errorParts)
if numParts > 1:
errorSubParts = errorParts[0].split(' ')
numSubParts = len(errorSubParts)
code = errorSubParts[1] if (numSubParts > 1) else errorSubParts[0]
feedback = self.id + ' ' + self.json(response)
exceptions = self.exceptions
if code in exceptions:
raise exceptions[code](feedback)
else:
raise ExchangeError(feedback)
| 42.369183 | 194 | 0.464499 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.