max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
27. Remove Element/solution2.py | sunshot/LeetCode | 0 | 3600 | <gh_stars>0
from typing import List
class Solution:
def removeElement(self, nums: List[int], val: int) -> int:
if not nums:
return 0
curr = 0
n = len(nums)
while curr < n:
if nums[curr] == val:
nums[curr] = nums[n-1]
n -= 1
else:
curr += 1
return n
if __name__== '__main__':
solution = Solution()
nums = [3,2,2,3]
val = 3
ans = solution.removeElement(nums, val)
# print(ans)
print(nums[:ans]) | 3.71875 | 4 |
platformio/commands/home/run.py | Granjow/platformio-core | 4,744 | 3601 | # Copyright (c) 2014-present PlatformIO <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from urllib.parse import urlparse
import click
import uvicorn
from starlette.applications import Starlette
from starlette.middleware import Middleware
from starlette.responses import PlainTextResponse
from starlette.routing import Mount, Route, WebSocketRoute
from starlette.staticfiles import StaticFiles
from starlette.status import HTTP_403_FORBIDDEN
from platformio.commands.home.rpc.handlers.account import AccountRPC
from platformio.commands.home.rpc.handlers.app import AppRPC
from platformio.commands.home.rpc.handlers.ide import IDERPC
from platformio.commands.home.rpc.handlers.misc import MiscRPC
from platformio.commands.home.rpc.handlers.os import OSRPC
from platformio.commands.home.rpc.handlers.piocore import PIOCoreRPC
from platformio.commands.home.rpc.handlers.project import ProjectRPC
from platformio.commands.home.rpc.server import WebSocketJSONRPCServerFactory
from platformio.compat import aio_get_running_loop
from platformio.exception import PlatformioException
from platformio.package.manager.core import get_core_package_dir
from platformio.proc import force_exit
class ShutdownMiddleware:
def __init__(self, app):
self.app = app
async def __call__(self, scope, receive, send):
if scope["type"] == "http" and b"__shutdown__" in scope.get("query_string", {}):
await shutdown_server()
await self.app(scope, receive, send)
async def shutdown_server(_=None):
aio_get_running_loop().call_later(0.5, force_exit)
return PlainTextResponse("Server has been shutdown!")
async def protected_page(_):
return PlainTextResponse(
"Protected PlatformIO Home session", status_code=HTTP_403_FORBIDDEN
)
def run_server(host, port, no_open, shutdown_timeout, home_url):
contrib_dir = get_core_package_dir("contrib-piohome")
if not os.path.isdir(contrib_dir):
raise PlatformioException("Invalid path to PIO Home Contrib")
ws_rpc_factory = WebSocketJSONRPCServerFactory(shutdown_timeout)
ws_rpc_factory.addObjectHandler(AccountRPC(), namespace="account")
ws_rpc_factory.addObjectHandler(AppRPC(), namespace="app")
ws_rpc_factory.addObjectHandler(IDERPC(), namespace="ide")
ws_rpc_factory.addObjectHandler(MiscRPC(), namespace="misc")
ws_rpc_factory.addObjectHandler(OSRPC(), namespace="os")
ws_rpc_factory.addObjectHandler(PIOCoreRPC(), namespace="core")
ws_rpc_factory.addObjectHandler(ProjectRPC(), namespace="project")
path = urlparse(home_url).path
routes = [
WebSocketRoute(path + "wsrpc", ws_rpc_factory, name="wsrpc"),
Route(path + "__shutdown__", shutdown_server, methods=["POST"]),
Mount(path, StaticFiles(directory=contrib_dir, html=True), name="static"),
]
if path != "/":
routes.append(Route("/", protected_page))
uvicorn.run(
Starlette(
middleware=[Middleware(ShutdownMiddleware)],
routes=routes,
on_startup=[
lambda: click.echo(
"PIO Home has been started. Press Ctrl+C to shutdown."
),
lambda: None if no_open else click.launch(home_url),
],
),
host=host,
port=port,
log_level="warning",
)
| 1.523438 | 2 |
appengine/components/components/machine_provider/rpc_messages.py | stefb965/luci-py | 1 | 3602 | # Copyright 2015 The LUCI Authors. All rights reserved.
# Use of this source code is governed under the Apache License, Version 2.0
# that can be found in the LICENSE file.
"""Messages for the Machine Provider API."""
# pylint: disable=unused-wildcard-import, wildcard-import
from protorpc import messages
from components.machine_provider.dimensions import *
from components.machine_provider.instructions import *
from components.machine_provider.policies import *
class CatalogMachineRetrievalRequest(messages.Message):
"""Represents a request to retrieve a machine from the catalog."""
# Hostname of the machine to retrieve.
hostname = messages.StringField(1, required=True)
# Backend which added the machine.
backend = messages.EnumField(Backend, 2)
class CatalogMachineRetrievalResponse(messages.Message):
"""Represents a response to a catalog machine retrieval request."""
# Dimensions instance specifying what sort of machine this is.
dimensions = messages.MessageField(Dimensions, 1)
# Policies governing this machine.
policies = messages.MessageField(Policies, 2)
# State of the CatalogMachineEntry.
state = messages.StringField(3)
# Cloud Pub/Sub subscription the machine must listen to for instructions.
pubsub_subscription = messages.StringField(4)
# Project the Cloud Pub/Sub subscription exists in.
pubsub_subscription_project = messages.StringField(5)
# Cloud Pub/Sub topic the machine must be subscribed to.
pubsub_topic = messages.StringField(6)
# Project the Cloud Pub/Sub topic exists in.
pubsub_topic_project = messages.StringField(7)
# Timestamp indicating lease expiration seconds from epoch in UTC.
lease_expiration_ts = messages.IntegerField(8)
class CatalogMachineAdditionRequest(messages.Message):
"""Represents a request to add a machine to the catalog.
dimensions.backend must be specified.
dimensions.hostname must be unique per backend.
"""
# Dimensions instance specifying what sort of machine this is.
dimensions = messages.MessageField(Dimensions, 1, required=True)
# Policies instance specifying machine-specific configuration.
policies = messages.MessageField(Policies, 2, required=True)
class CatalogMachineBatchAdditionRequest(messages.Message):
"""Represents a batched set of CatalogMachineAdditionRequests.
dimensions.backend must be specified in each CatalogMachineAdditionRequest.
dimensions.hostname must be unique per backend.
"""
# CatalogMachineAdditionRequest instances to batch together.
requests = messages.MessageField(
CatalogMachineAdditionRequest, 1, repeated=True)
class CatalogMachineDeletionRequest(messages.Message):
"""Represents a request to delete a machine in the catalog."""
# Dimensions instance specifying what sort of machine this is.
dimensions = messages.MessageField(Dimensions, 1, required=True)
class CatalogManipulationRequestError(messages.Enum):
"""Represents an error in a catalog manipulation request."""
# Per backend, hostnames must be unique in the catalog.
HOSTNAME_REUSE = 1
# Tried to lookup an entry that didn't exist.
ENTRY_NOT_FOUND = 2
# Didn't specify a backend.
UNSPECIFIED_BACKEND = 3
# Specified backend didn't match the backend originating the request.
MISMATCHED_BACKEND = 4
# Didn't specify a hostname.
UNSPECIFIED_HOSTNAME = 5
# Proposed Cloud Pub/Sub topic was invalid.
INVALID_TOPIC = 6
# Proposed Cloud Pub/Sub project was invalid.
INVALID_PROJECT = 7
# Didn't specify a Cloud Pub/Sub topic.
UNSPECIFIED_TOPIC = 8
# Attempted to delete a leased machine.
LEASED = 9
class CatalogManipulationResponse(messages.Message):
"""Represents a response to a catalog manipulation request."""
# CatalogManipulationRequestError instance indicating an error with the
# request, or None if there is no error.
error = messages.EnumField(CatalogManipulationRequestError, 1)
# CatalogMachineAdditionRequest this response is in reference to.
machine_addition_request = messages.MessageField(
CatalogMachineAdditionRequest, 2)
# CatalogMachineDeletionRequest this response is in reference to.
machine_deletion_request = messages.MessageField(
CatalogMachineDeletionRequest, 3)
class CatalogBatchManipulationResponse(messages.Message):
"""Represents a response to a batched catalog manipulation request."""
responses = messages.MessageField(
CatalogManipulationResponse, 1, repeated=True)
class LeaseRequest(messages.Message):
"""Represents a request for a lease on a machine."""
# Per-user unique ID used to deduplicate requests.
request_id = messages.StringField(1, required=True)
# Dimensions instance specifying what sort of machine to lease.
dimensions = messages.MessageField(Dimensions, 2, required=True)
# Desired length of the lease in seconds.
duration = messages.IntegerField(3)
# Cloud Pub/Sub topic name to communicate on regarding this request.
pubsub_topic = messages.StringField(4)
# Cloud Pub/Sub project name to communicate on regarding this request.
pubsub_project = messages.StringField(5)
# Instructions to give the machine once it's been leased.
on_lease = messages.MessageField(Instruction, 6)
# UTC seconds from epoch when lease should expire.
lease_expiration_ts = messages.IntegerField(7)
class BatchedLeaseRequest(messages.Message):
"""Represents a batched set of LeaseRequests."""
# LeaseRequest instances to batch together.
requests = messages.MessageField(LeaseRequest, 1, repeated=True)
class LeaseRequestError(messages.Enum):
"""Represents an error in a LeaseRequest."""
# Request IDs are intended to be unique.
# Reusing a request ID in a different request is an error.
REQUEST_ID_REUSE = 1
# Proposed Cloud Pub/Sub topic was invalid.
INVALID_TOPIC = 2
# Proposed Cloud Pub/Sub project was invalid.
INVALID_PROJECT = 3
# Didn't specify a Cloud Pub/Sub topic.
UNSPECIFIED_TOPIC = 4
# Request couldn't be processed in time.
DEADLINE_EXCEEDED = 5
# Miscellaneous transient error.
TRANSIENT_ERROR = 6
# Mutually exclusive duration and lease_expiration_ts both specified.
MUTUAL_EXCLUSION_ERROR = 7
# Proposed duration was zero or negative.
NONPOSITIVE_DEADLINE = 8
# Proposed expiration time is not in the future.
LEASE_EXPIRATION_TS_ERROR = 9
# Neither duration nor lease_expiration_ts were specified.
LEASE_LENGTH_UNSPECIFIED = 10
# Requested lease duration is too long.
LEASE_TOO_LONG = 11
class LeaseRequestState(messages.Enum):
"""Represents the state of a LeaseRequest."""
# LeaseRequest has been received, but not processed yet.
UNTRIAGED = 0
# LeaseRequest is pending provisioning of additional capacity.
PENDING = 1
# LeaseRequest has been fulfilled.
FULFILLED = 2
# LeaseRequest has been denied.
DENIED = 3
class LeaseResponse(messages.Message):
"""Represents a response to a LeaseRequest."""
# SHA-1 identifying the LeaseRequest this response refers to.
request_hash = messages.StringField(1)
# LeaseRequestError instance indicating an error with the request, or None
# if there is no error.
error = messages.EnumField(LeaseRequestError, 2)
# Request ID used by the client to generate the LeaseRequest.
client_request_id = messages.StringField(3, required=True)
# State of the LeaseRequest.
state = messages.EnumField(LeaseRequestState, 4)
# Hostname of the machine available for this request.
hostname = messages.StringField(5)
# Timestamp indicating lease expiration seconds from epoch in UTC.
lease_expiration_ts = messages.IntegerField(6)
class BatchedLeaseResponse(messages.Message):
"""Represents a response to a batched lease request."""
responses = messages.MessageField(LeaseResponse, 1, repeated=True)
class LeaseReleaseRequest(messages.Message):
"""Represents a request to voluntarily cancel a LeaseRequest."""
# Per-user unique ID used to identify the LeaseRequest.
request_id = messages.StringField(1, required=True)
class BatchedLeaseReleaseRequest(messages.Message):
"""Represents a batched set of lease release requests."""
requests = messages.MessageField(LeaseReleaseRequest, 1, repeated=True)
class LeaseReleaseRequestError(messages.Enum):
"""Represents an error in a LeaseReleaseRequest."""
# Request ID referred to non-existent request for this user.
NOT_FOUND = 1
# Request ID referred to an unfulfilled request.
NOT_FULFILLED = 2
# Request ID referred to a fulfilled request whose machine was
# already reclaimed.
ALREADY_RECLAIMED = 3
# Request couldn't be processed in time.
DEADLINE_EXCEEDED = 4
# Miscellaneous transient error.
TRANSIENT_ERROR = 5
class LeaseReleaseResponse(messages.Message):
"""Represents a response to a LeaseReleaseRequest."""
# SHA-1 identifying the LeaseRequest this response refers to.
request_hash = messages.StringField(1)
# LeaseReleaseRequestError indicating an error with the request, or None
# if there is no error.
error = messages.EnumField(LeaseReleaseRequestError, 2)
# Request ID used by the client to generate the LeaseRequest
# referred to by the LeaseReleaseRequest.
client_request_id = messages.StringField(3, required=True)
class BatchedLeaseReleaseResponse(messages.Message):
"""Represents responses to a batched set of lease release requests."""
responses = messages.MessageField(LeaseReleaseResponse, 1, repeated=True)
class MachineInstructionRequest(messages.Message):
"""Represents a request to send an instruction to a leased machine."""
# Request ID for the fulfilled LeaseRequest whose machine should be
# instructed.
request_id = messages.StringField(1, required=True)
# Instruction to send the leased machine.
instruction = messages.MessageField(Instruction, 2)
class MachineInstructionError(messages.Enum):
"""Represents an error in a MachineInstructionRequest."""
# Request ID referred to an unfulfilled request.
NOT_FULFILLED = 1
# Request ID referred to a fulfilled request whose machine was
# already reclaimed.
ALREADY_RECLAIMED = 2
# Invalid instruction for the machine.
INVALID_INSTRUCTION = 3
class MachineInstructionResponse(messages.Message):
"""Represents a response to a MachineInstructionRequest."""
# Request ID used by the client to generate the LeaseRequest for the
# machine being instructed.
client_request_id = messages.StringField(1, required=True)
# MachineInstructionError indicating an error with the request, or None
# if there is no error.
error = messages.EnumField(MachineInstructionError, 2)
class PollRequest(messages.Message):
"""Represents a request to poll for instructions given to a machine."""
# Hostname of the machine whose instructions to retrieve.
hostname = messages.StringField(1, required=True)
# Backend the machine belongs to. Generally required.
backend = messages.EnumField(Backend, 2)
class PollResponse(messages.Message):
"""Represents a response to a request for instructions given to a machine."""
# Instruction given to the machine.
instruction = messages.MessageField(Instruction, 1)
# State of the instruction.
state = messages.StringField(2)
class AckRequest(messages.Message):
"""Represents a request to ack an instruction received by a machine."""
# Hostname of the machine whose instruction to ack.
hostname = messages.StringField(1, required=True)
# Backend the machine belongs to.
backend = messages.EnumField(Backend, 2)
| 1.90625 | 2 |
webscraping.py | carvalho-fdec/DesafioDSA | 0 | 3603 | <reponame>carvalho-fdec/DesafioDSA
# webscraping test
import urllib.request
from bs4 import BeautifulSoup
with urllib.request.urlopen('http://www.netvasco.com.br') as url:
page = url.read()
#print(page)
print(url.geturl())
print(url.info())
print(url.getcode())
# Analise o html na variável 'page' e armazene-o no formato Beautiful Soup
soup = BeautifulSoup(page, 'html.parser')
#print(soup.prettify())
print(soup.title)
print(soup.title.string)
print(soup.title.name)
soup_a = soup.find_all('a')[:10]
for a in soup_a:
print(a.get('href'))
print(a.get_text())
| 3.296875 | 3 |
tensorboard/backend/event_processing/data_provider_test.py | hongxu-jia/tensorboard | 1 | 3604 | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Unit tests for `tensorboard.backend.event_processing.data_provider`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import six
from six.moves import xrange # pylint: disable=redefined-builtin
import numpy as np
from tensorboard import context
from tensorboard.backend.event_processing import data_provider
from tensorboard.backend.event_processing import (
plugin_event_multiplexer as event_multiplexer,
)
from tensorboard.compat.proto import summary_pb2
from tensorboard.data import provider as base_provider
from tensorboard.plugins.graph import metadata as graph_metadata
from tensorboard.plugins.histogram import metadata as histogram_metadata
from tensorboard.plugins.histogram import summary_v2 as histogram_summary
from tensorboard.plugins.scalar import metadata as scalar_metadata
from tensorboard.plugins.scalar import summary_v2 as scalar_summary
from tensorboard.plugins.image import metadata as image_metadata
from tensorboard.plugins.image import summary_v2 as image_summary
from tensorboard.util import tensor_util
import tensorflow.compat.v1 as tf1
import tensorflow.compat.v2 as tf
tf1.enable_eager_execution()
class MultiplexerDataProviderTest(tf.test.TestCase):
def setUp(self):
super(MultiplexerDataProviderTest, self).setUp()
self.logdir = self.get_temp_dir()
self.ctx = context.RequestContext()
logdir = os.path.join(self.logdir, "polynomials")
with tf.summary.create_file_writer(logdir).as_default():
for i in xrange(10):
scalar_summary.scalar(
"square", i ** 2, step=2 * i, description="boxen"
)
scalar_summary.scalar("cube", i ** 3, step=3 * i)
logdir = os.path.join(self.logdir, "waves")
with tf.summary.create_file_writer(logdir).as_default():
for i in xrange(10):
scalar_summary.scalar("sine", tf.sin(float(i)), step=i)
scalar_summary.scalar(
"square", tf.sign(tf.sin(float(i))), step=i
)
# Summary with rank-0 data but not owned by the scalars plugin.
metadata = summary_pb2.SummaryMetadata()
metadata.plugin_data.plugin_name = "marigraphs"
metadata.data_class = summary_pb2.DATA_CLASS_SCALAR
tf.summary.write(
"high_tide", tensor=i, step=i, metadata=metadata
)
# Summary with rank-1 data of scalar data class (bad!).
metadata = summary_pb2.SummaryMetadata()
metadata.plugin_data.plugin_name = "greetings"
metadata.data_class = summary_pb2.DATA_CLASS_SCALAR
tf.summary.write(
"bad", tensor=[i, i], step=i, metadata=metadata
)
logdir = os.path.join(self.logdir, "lebesgue")
with tf.summary.create_file_writer(logdir).as_default():
data = [
("very smooth", (0.0, 0.25, 0.5, 0.75, 1.0), "uniform"),
("very smoothn't", (0.0, 0.01, 0.99, 1.0), "bimodal"),
]
for (description, distribution, name) in data:
tensor = tf.constant([distribution], dtype=tf.float64)
for i in xrange(1, 11):
histogram_summary.histogram(
name, tensor * i, step=i, description=description
)
logdir = os.path.join(self.logdir, "mondrian")
with tf.summary.create_file_writer(logdir).as_default():
data = [
("red", (221, 28, 38), "top-right"),
("blue", (1, 91, 158), "bottom-left"),
("yellow", (239, 220, 111), "bottom-right"),
]
for (name, color, description) in data:
image_1x1 = tf.constant([[[color]]], dtype=tf.uint8)
for i in xrange(1, 11):
# Use a non-monotonic sequence of sample sizes to
# test `max_length` calculation.
k = 6 - abs(6 - i) # 1, .., 6, .., 2
# a `k`-sample image summary of `i`-by-`i` images
image = tf.tile(image_1x1, [k, i, i, 1])
image_summary.image(
name,
image,
step=i,
description=description,
max_outputs=99,
)
def create_multiplexer(self):
multiplexer = event_multiplexer.EventMultiplexer()
multiplexer.AddRunsFromDirectory(self.logdir)
multiplexer.Reload()
return multiplexer
def create_provider(self):
multiplexer = self.create_multiplexer()
return data_provider.MultiplexerDataProvider(multiplexer, self.logdir)
def test_data_location(self):
provider = self.create_provider()
result = provider.data_location(self.ctx, experiment_id="unused")
self.assertEqual(result, self.logdir)
def test_list_plugins_with_no_graph(self):
provider = self.create_provider()
result = provider.list_plugins(self.ctx, experiment_id="unused")
self.assertItemsEqual(
result,
[
"greetings",
"marigraphs",
histogram_metadata.PLUGIN_NAME,
image_metadata.PLUGIN_NAME,
scalar_metadata.PLUGIN_NAME,
],
)
def test_list_plugins_with_graph(self):
with tf.compat.v1.Graph().as_default() as graph:
writer = tf.compat.v1.summary.FileWriter(self.logdir)
writer.add_graph(graph)
writer.flush()
provider = self.create_provider()
result = provider.list_plugins(self.ctx, experiment_id="unused")
self.assertItemsEqual(
result,
[
"greetings",
"marigraphs",
graph_metadata.PLUGIN_NAME,
histogram_metadata.PLUGIN_NAME,
image_metadata.PLUGIN_NAME,
scalar_metadata.PLUGIN_NAME,
],
)
def test_list_runs(self):
# We can't control the timestamps of events written to disk (without
# manually reading the tfrecords, modifying the data, and writing
# them back out), so we provide a fake multiplexer instead.
start_times = {
"second_2": 2.0,
"first": 1.5,
"no_time": None,
"second_1": 2.0,
}
class FakeMultiplexer(object):
def Runs(multiplexer):
result = ["second_2", "first", "no_time", "second_1"]
self.assertItemsEqual(result, start_times)
return result
def FirstEventTimestamp(multiplexer, run):
self.assertIn(run, start_times)
result = start_times[run]
if result is None:
raise ValueError("No event timestep could be found")
else:
return result
multiplexer = FakeMultiplexer()
provider = data_provider.MultiplexerDataProvider(
multiplexer, "fake_logdir"
)
result = provider.list_runs(self.ctx, experiment_id="unused")
self.assertItemsEqual(
result,
[
base_provider.Run(
run_id=run, run_name=run, start_time=start_time
)
for (run, start_time) in six.iteritems(start_times)
],
)
def test_list_scalars_all(self):
provider = self.create_provider()
result = provider.list_scalars(
self.ctx,
experiment_id="unused",
plugin_name=scalar_metadata.PLUGIN_NAME,
run_tag_filter=None,
)
self.assertItemsEqual(result.keys(), ["polynomials", "waves"])
self.assertItemsEqual(result["polynomials"].keys(), ["square", "cube"])
self.assertItemsEqual(result["waves"].keys(), ["square", "sine"])
sample = result["polynomials"]["square"]
self.assertIsInstance(sample, base_provider.ScalarTimeSeries)
self.assertEqual(sample.max_step, 18)
# nothing to test for wall time, as it can't be mocked out
self.assertEqual(sample.plugin_content, b"")
self.assertEqual(
sample.display_name, ""
) # not written by V2 summary ops
self.assertEqual(sample.description, "boxen")
def test_list_scalars_filters(self):
provider = self.create_provider()
result = provider.list_scalars(
self.ctx,
experiment_id="unused",
plugin_name=scalar_metadata.PLUGIN_NAME,
run_tag_filter=base_provider.RunTagFilter(["waves"], ["square"]),
)
self.assertItemsEqual(result.keys(), ["waves"])
self.assertItemsEqual(result["waves"].keys(), ["square"])
result = provider.list_scalars(
self.ctx,
experiment_id="unused",
plugin_name=scalar_metadata.PLUGIN_NAME,
run_tag_filter=base_provider.RunTagFilter(
tags=["square", "quartic"]
),
)
self.assertItemsEqual(result.keys(), ["polynomials", "waves"])
self.assertItemsEqual(result["polynomials"].keys(), ["square"])
self.assertItemsEqual(result["waves"].keys(), ["square"])
result = provider.list_scalars(
self.ctx,
experiment_id="unused",
plugin_name=scalar_metadata.PLUGIN_NAME,
run_tag_filter=base_provider.RunTagFilter(runs=["waves", "hugs"]),
)
self.assertItemsEqual(result.keys(), ["waves"])
self.assertItemsEqual(result["waves"].keys(), ["sine", "square"])
result = provider.list_scalars(
self.ctx,
experiment_id="unused",
plugin_name=scalar_metadata.PLUGIN_NAME,
run_tag_filter=base_provider.RunTagFilter(["un"], ["likely"]),
)
self.assertEqual(result, {})
def test_read_scalars(self):
multiplexer = self.create_multiplexer()
provider = data_provider.MultiplexerDataProvider(
multiplexer, self.logdir
)
run_tag_filter = base_provider.RunTagFilter(
runs=["waves", "polynomials", "unicorns"],
tags=["sine", "square", "cube", "iridescence"],
)
result = provider.read_scalars(
self.ctx,
experiment_id="unused",
plugin_name=scalar_metadata.PLUGIN_NAME,
run_tag_filter=run_tag_filter,
downsample=100,
)
self.assertItemsEqual(result.keys(), ["polynomials", "waves"])
self.assertItemsEqual(result["polynomials"].keys(), ["square", "cube"])
self.assertItemsEqual(result["waves"].keys(), ["square", "sine"])
for run in result:
for tag in result[run]:
tensor_events = multiplexer.Tensors(run, tag)
self.assertLen(result[run][tag], len(tensor_events))
for (datum, event) in zip(result[run][tag], tensor_events):
self.assertEqual(datum.step, event.step)
self.assertEqual(datum.wall_time, event.wall_time)
self.assertEqual(
datum.value,
tensor_util.make_ndarray(event.tensor_proto).item(),
)
def test_read_scalars_downsamples(self):
# TODO(@wchargin): Verify that this always includes the most
# recent datum, as specified by the interface.
multiplexer = self.create_multiplexer()
provider = data_provider.MultiplexerDataProvider(
multiplexer, self.logdir
)
result = provider.read_scalars(
self.ctx,
experiment_id="unused",
plugin_name=scalar_metadata.PLUGIN_NAME,
downsample=3,
)
self.assertLen(result["waves"]["sine"], 3)
def test_read_scalars_but_not_rank_0(self):
provider = self.create_provider()
run_tag_filter = base_provider.RunTagFilter(["waves"], ["bad"])
# No explicit checks yet.
with six.assertRaisesRegex(
self,
ValueError,
"can only convert an array of size 1 to a Python scalar",
):
provider.read_scalars(
self.ctx,
experiment_id="unused",
plugin_name="greetings",
run_tag_filter=run_tag_filter,
downsample=100,
)
def test_list_tensors_all(self):
provider = self.create_provider()
result = provider.list_tensors(
self.ctx,
experiment_id="unused",
plugin_name=histogram_metadata.PLUGIN_NAME,
run_tag_filter=None,
)
self.assertItemsEqual(result.keys(), ["lebesgue"])
self.assertItemsEqual(result["lebesgue"].keys(), ["uniform", "bimodal"])
sample = result["lebesgue"]["uniform"]
self.assertIsInstance(sample, base_provider.TensorTimeSeries)
self.assertEqual(sample.max_step, 10)
# nothing to test for wall time, as it can't be mocked out
self.assertEqual(sample.plugin_content, b"")
self.assertEqual(
sample.display_name, ""
) # not written by V2 summary ops
self.assertEqual(sample.description, "very smooth")
def test_list_tensors_filters(self):
provider = self.create_provider()
# Quick check only, as scalars and tensors use the same underlying
# filtering implementation.
result = provider.list_tensors(
self.ctx,
experiment_id="unused",
plugin_name=histogram_metadata.PLUGIN_NAME,
run_tag_filter=base_provider.RunTagFilter(
["lebesgue"], ["uniform"]
),
)
self.assertItemsEqual(result.keys(), ["lebesgue"])
self.assertItemsEqual(result["lebesgue"].keys(), ["uniform"])
def test_read_tensors(self):
multiplexer = self.create_multiplexer()
provider = data_provider.MultiplexerDataProvider(
multiplexer, self.logdir
)
run_tag_filter = base_provider.RunTagFilter(
runs=["lebesgue"],
tags=["uniform", "bimodal"],
)
result = provider.read_tensors(
self.ctx,
experiment_id="unused",
plugin_name=histogram_metadata.PLUGIN_NAME,
run_tag_filter=run_tag_filter,
downsample=100,
)
self.assertItemsEqual(result.keys(), ["lebesgue"])
self.assertItemsEqual(result["lebesgue"].keys(), ["uniform", "bimodal"])
for run in result:
for tag in result[run]:
tensor_events = multiplexer.Tensors(run, tag)
self.assertLen(result[run][tag], len(tensor_events))
for (datum, event) in zip(result[run][tag], tensor_events):
self.assertEqual(datum.step, event.step)
self.assertEqual(datum.wall_time, event.wall_time)
np.testing.assert_equal(
datum.numpy,
tensor_util.make_ndarray(event.tensor_proto),
)
def test_read_tensors_downsamples(self):
multiplexer = self.create_multiplexer()
provider = data_provider.MultiplexerDataProvider(
multiplexer, self.logdir
)
result = provider.read_tensors(
self.ctx,
experiment_id="unused",
plugin_name=histogram_metadata.PLUGIN_NAME,
downsample=3,
)
self.assertLen(result["lebesgue"]["uniform"], 3)
def test_list_blob_sequences(self):
provider = self.create_provider()
with self.subTest("finds all time series for a plugin"):
result = provider.list_blob_sequences(
self.ctx,
experiment_id="unused",
plugin_name=image_metadata.PLUGIN_NAME,
)
self.assertItemsEqual(result.keys(), ["mondrian"])
self.assertItemsEqual(
result["mondrian"].keys(), ["red", "blue", "yellow"]
)
sample = result["mondrian"]["blue"]
self.assertIsInstance(sample, base_provider.BlobSequenceTimeSeries)
self.assertEqual(sample.max_step, 10)
# nothing to test for wall time, as it can't be mocked out
self.assertEqual(sample.plugin_content, b"")
self.assertEqual(sample.max_length, 6 + 2)
self.assertEqual(sample.description, "bottom-left")
self.assertEqual(sample.display_name, "")
with self.subTest("filters by run/tag"):
result = provider.list_blob_sequences(
self.ctx,
experiment_id="unused",
plugin_name=image_metadata.PLUGIN_NAME,
run_tag_filter=base_provider.RunTagFilter(
runs=["mondrian", "picasso"], tags=["yellow", "green't"]
),
)
self.assertItemsEqual(result.keys(), ["mondrian"])
self.assertItemsEqual(result["mondrian"].keys(), ["yellow"])
self.assertIsInstance(
result["mondrian"]["yellow"],
base_provider.BlobSequenceTimeSeries,
)
def test_read_blob_sequences_and_read_blob(self):
provider = self.create_provider()
with self.subTest("reads all time series for a plugin"):
result = provider.read_blob_sequences(
self.ctx,
experiment_id="unused",
plugin_name=image_metadata.PLUGIN_NAME,
downsample=4,
)
self.assertItemsEqual(result.keys(), ["mondrian"])
self.assertItemsEqual(
result["mondrian"].keys(), ["red", "blue", "yellow"]
)
sample = result["mondrian"]["blue"]
self.assertLen(sample, 4) # downsampled from 10
last = sample[-1]
self.assertIsInstance(last, base_provider.BlobSequenceDatum)
self.assertEqual(last.step, 10)
self.assertLen(last.values, 2 + 2)
blobs = [
provider.read_blob(self.ctx, blob_key=v.blob_key)
for v in last.values
]
self.assertEqual(blobs[0], b"10")
self.assertEqual(blobs[1], b"10")
self.assertStartsWith(blobs[2], b"\x89PNG")
self.assertStartsWith(blobs[3], b"\x89PNG")
blue1 = blobs[2]
blue2 = blobs[3]
red1 = provider.read_blob(
self.ctx,
blob_key=result["mondrian"]["red"][-1].values[2].blob_key,
)
self.assertEqual(blue1, blue2)
self.assertNotEqual(blue1, red1)
with self.subTest("filters by run/tag"):
result = provider.read_blob_sequences(
self.ctx,
experiment_id="unused",
plugin_name=image_metadata.PLUGIN_NAME,
run_tag_filter=base_provider.RunTagFilter(
runs=["mondrian", "picasso"], tags=["yellow", "green't"]
),
downsample=1,
)
self.assertItemsEqual(result.keys(), ["mondrian"])
self.assertItemsEqual(result["mondrian"].keys(), ["yellow"])
self.assertIsInstance(
result["mondrian"]["yellow"][0],
base_provider.BlobSequenceDatum,
)
class DownsampleTest(tf.test.TestCase):
"""Tests for the `_downsample` private helper function."""
def test_deterministic(self):
xs = "abcdefg"
expected = data_provider._downsample(xs, k=4)
for _ in range(100):
actual = data_provider._downsample(xs, k=4)
self.assertEqual(actual, expected)
def test_underlong_ok(self):
xs = list("abcdefg")
actual = data_provider._downsample(xs, k=10)
expected = list("abcdefg")
self.assertIsNot(actual, xs)
self.assertEqual(actual, expected)
def test_inorder(self):
xs = list(range(10000))
actual = data_provider._downsample(xs, k=100)
self.assertEqual(actual, sorted(actual))
def test_zero(self):
xs = "abcdefg"
actual = data_provider._downsample(xs, k=0)
self.assertEqual(actual, [])
if __name__ == "__main__":
tf.test.main()
| 1.421875 | 1 |
extras/amld/cloud/quickdraw_rnn/task.py | luyang1210/tensorflow | 1 | 3605 | <filename>extras/amld/cloud/quickdraw_rnn/task.py
"""Experiment wrapper for training on Cloud ML."""
import argparse, glob, os
import tensorflow as tf
# From this package.
import model
def generate_experiment_fn(data_dir, train_batch_size, eval_batch_size,
train_steps, eval_steps, cell_size, hidden,
**experiment_args):
"""Returns experiment_fn for a RNN classifier.
Args:
data_dir: Where {train,eval}-* tf.train.Example datasets can be found.
train_batch_size: Batch size during training.
train_batch_size: Batch size during evaluation.
train_steps: Number of training steps.
eval_steps: Number of evaluation steps.
cell_size: LSTM cell size.
hidden: Number of units in hidden layers (note that None means "use default"
wich is equivalent to [] -- see code in model).
experiment_args: Additional arguments when `tf.contrib.learn.Experiment`
is instantiated.
"""
classes = tf.gfile.Open('%s/labels.txt' % data_dir).read().splitlines()
n_classes = len(classes)
params = tf.contrib.training.HParams(
cell_size=cell_size,
hidden=hidden or None, # Default is empty list.
)
config = tf.contrib.learn.RunConfig()
def _experiment_fn(output_dir):
return tf.contrib.learn.Experiment(
model.build_estimator(output_dir, n_classes, params, config),
train_input_fn=model.make_input_fn_stroke(
files_pattern=os.path.join(data_dir, 'train-*'),
batch_size=train_batch_size),
eval_input_fn=model.make_input_fn_stroke(
files_pattern=os.path.join(data_dir, 'eval-*'),
batch_size=eval_batch_size),
export_strategies=[
tf.contrib.learn.utils.saved_model_export_utils.make_export_strategy(
model.serving_input_fn,
exports_to_keep=1)
],
train_steps=train_steps,
eval_steps=eval_steps,
**experiment_args
)
return _experiment_fn
if __name__ == '__main__':
tf.logging.set_verbosity(tf.logging.INFO)
parser = argparse.ArgumentParser()
parser.add_argument(
'--data_dir',
help='GCS or local path to training data',
required=True
)
parser.add_argument(
'--train_batch_size',
help='Batch size for training steps',
type=int,
default=100
)
parser.add_argument(
'--eval_batch_size',
help='Batch size for evaluation steps',
type=int,
default=100
)
parser.add_argument(
'--train_steps',
help='Steps to run the training job for.',
type=int,
default=10000
)
parser.add_argument(
'--eval_steps',
help='Number of steps to run evalution for at each checkpoint',
default=100,
type=int
)
parser.add_argument(
'--output_dir',
help='GCS location to write checkpoints and export models',
required=True
)
parser.add_argument(
'--job-dir',
help='this model ignores this field, but it is required by gcloud',
default='junk'
)
parser.add_argument(
'--eval_delay_secs',
help='How long to wait before running first evaluation',
default=10,
type=int
)
parser.add_argument(
'--min_eval_frequency',
help='Minimum number of training steps between evaluations',
default=1,
type=int
)
# Hyper parameters.
parser.add_argument(
'--cell_size',
help='LSTM cell size.',
default=256,
type=int
)
parser.add_argument(
'--hidden',
help='Units in hidden layers.',
default=(),
nargs='+',
type=int
)
args = parser.parse_args()
arguments = args.__dict__
# unused args provided by service
arguments.pop('job_dir', None)
arguments.pop('job-dir', None)
output_dir = arguments.pop('output_dir')
# Run the training job
tf.contrib.learn.learn_runner.run(
generate_experiment_fn(**arguments), output_dir)
| 2.484375 | 2 |
A/116A.py | johnggo/Codeforces-Solutions | 1 | 3606 | <filename>A/116A.py
# Time: 310 ms
# Memory: 1664 KB
n = int(input())
e = 0
s = 0
for i in range(n):
s =s- eval(input().replace(' ', '-'))
e = max(e, s)
print(e)
| 2.890625 | 3 |
tests/test_serialize.py | aferrall/redner | 1,146 | 3607 | import pyredner
import numpy as np
import torch
cam = pyredner.Camera(position = torch.tensor([0.0, 0.0, -5.0]),
look_at = torch.tensor([0.0, 0.0, 0.0]),
up = torch.tensor([0.0, 1.0, 0.0]),
fov = torch.tensor([45.0]), # in degree
clip_near = 1e-2, # needs to > 0
resolution = (256, 256),
fisheye = False)
mat_grey = pyredner.Material(\
diffuse_reflectance = \
torch.tensor([0.5, 0.5, 0.5], device = pyredner.get_device()))
materials = [mat_grey]
shape_triangle = pyredner.Shape(\
vertices = torch.tensor([[-1.7, 1.0, 0.0], [1.0, 1.0, 0.0], [-0.5, -1.0, 0.0]],
device = pyredner.get_device()),
indices = torch.tensor([[0, 1, 2]], dtype = torch.int32,
device = pyredner.get_device()),
uvs = None,
normals = None,
material_id = 0)
shape_light = pyredner.Shape(\
vertices = torch.tensor([[-1.0, -1.0, -7.0],
[ 1.0, -1.0, -7.0],
[-1.0, 1.0, -7.0],
[ 1.0, 1.0, -7.0]], device = pyredner.get_device()),
indices = torch.tensor([[0, 1, 2],[1, 3, 2]],
dtype = torch.int32, device = pyredner.get_device()),
uvs = None,
normals = None,
material_id = 0)
shapes = [shape_triangle, shape_light]
light = pyredner.AreaLight(shape_id = 1,
intensity = torch.tensor([20.0,20.0,20.0]))
area_lights = [light]
scene = pyredner.Scene(cam, shapes, materials, area_lights)
scene_state_dict = scene.state_dict()
scene = pyredner.Scene.load_state_dict(scene_state_dict)
scene_args = pyredner.RenderFunction.serialize_scene(\
scene = scene,
num_samples = 16,
max_bounces = 1)
render = pyredner.RenderFunction.apply
img = render(0, *scene_args)
pyredner.imwrite(img.cpu(), 'results/test_serialize/img.exr')
| 1.953125 | 2 |
src/zope/publisher/tests/test_requestdataproperty.py | Shoobx/zope.publisher | 3 | 3608 | <reponame>Shoobx/zope.publisher
##############################################################################
#
# Copyright (c) 2001, 2002 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Request Data-Property Tests
"""
from unittest import TestCase, makeSuite
from zope.interface.common.tests.basemapping \
import testIEnumerableMapping, testIReadMapping
from zope.publisher.base \
import RequestDataProperty, RequestDataGetter, RequestDataMapper
class TestDataGettr(RequestDataGetter):
_gettrname = 'getSomething'
class TestDataMapper(RequestDataMapper):
_mapname = '_data'
_marker = object()
class Data(object):
def getSomething(self, name, default=_marker):
if name.startswith('Z'):
return "something %s" % name
if default is not _marker:
return default
raise KeyError(name)
something = RequestDataProperty(TestDataGettr)
somedata = RequestDataProperty(TestDataMapper)
class Test(TestCase):
def testRequestDataGettr(self):
testIReadMapping(self, Data().something,
{"Zope": "something Zope"}, ["spam"])
def testRequestDataMapper(self):
data = Data()
sample = {'foo': 'Foo', 'bar': 'Bar'}
data._data = sample
inst = data.somedata
testIReadMapping(self, inst, sample, ["spam"])
testIEnumerableMapping(self, inst, sample)
def testNoAssign(self):
data = Data()
try:
data.something = {}
except AttributeError:
pass
else:
raise AssertionError("Shouldn't be able to assign")
try:
data.somedata = {}
except AttributeError:
pass
else:
raise AssertionError("Shouldn't be able to assign")
def test_suite():
return makeSuite(Test)
| 2.125 | 2 |
tools/scoring/dimensions/__init__.py | ahemphill/digitalbuildings | 0 | 3609 | <reponame>ahemphill/digitalbuildings
""" Enable import """
from os import path
import sys
sys.path.append(
path.abspath(path.join('tools', 'validators', 'instance_validator')))
| 1.117188 | 1 |
src/thornfield/caches/cache_compression_decorator.py | drorvinkler/thornfield | 2 | 3610 | from typing import Callable, AnyStr, Optional
from zlib import compress as default_compress, decompress as default_decompress
from .cache import Cache
from ..constants import NOT_FOUND
class CacheCompressionDecorator(Cache):
def __init__(
self,
cache: Cache,
compress: Optional[Callable[[str], AnyStr]] = ...,
decompress: Optional[Callable[[AnyStr], str]] = ...,
) -> None:
super().__init__()
self._cache = cache
if compress is None:
self._compress = self._noop
elif compress is ...:
self._compress = self._default_compress
else:
self._compress = compress
if decompress is None:
self._decompress = self._noop
elif decompress is ...:
self._decompress = self._default_decompress
else:
self._decompress = decompress
def get(self, key):
value = self._cache.get(key)
return value if value is NOT_FOUND else self._decompress(value)
def set(self, key, value, expiration: int) -> None:
self._cache.set(key, self._compress(value), expiration)
@staticmethod
def _noop(x):
return x
@staticmethod
def _default_compress(obj: str) -> bytes:
return default_compress(obj.encode("UTF-8"))
@staticmethod
def _default_decompress(data: bytes) -> str:
return default_decompress(data).decode("UTF-8")
| 2.609375 | 3 |
manim/mobject/vector_field.py | kdkasad/manim | 2 | 3611 | """Mobjects representing vector fields."""
__all__ = [
"VectorField",
"ArrowVectorField",
"StreamLines",
]
import itertools as it
import random
from math import ceil, floor
from typing import Callable, Iterable, Optional, Sequence, Tuple, Type
import numpy as np
from colour import Color
from PIL import Image
from .. import config
from ..animation.composition import AnimationGroup, Succession
from ..animation.creation import Create
from ..animation.indication import ShowPassingFlash
from ..animation.update import UpdateFromAlphaFunc
from ..constants import OUT, RIGHT, UP
from ..mobject.geometry import Vector
from ..mobject.mobject import Mobject
from ..mobject.types.vectorized_mobject import VGroup, VMobject
from ..utils.bezier import interpolate, inverse_interpolate
from ..utils.color import BLUE_E, GREEN, RED, YELLOW, color_to_rgb, rgb_to_color
from ..utils.deprecation import deprecated_params
from ..utils.rate_functions import ease_out_sine, linear
from ..utils.simple_functions import sigmoid
from .types.opengl_vectorized_mobject import OpenGLVMobject
DEFAULT_SCALAR_FIELD_COLORS: list = [BLUE_E, GREEN, YELLOW, RED]
class VectorField(VGroup):
"""A vector field.
Vector fields are based on a function defining a vector at every position.
This class does by default not include any visible elements but provides
methods to move other :class:`~.Mobject` s along the vector field.
Parameters
----------
func
The function defining the rate of change at every position of the `VectorField`.
color
The color of the vector field. If set, position-specific coloring is disabled.
color_scheme
A function mapping a vector to a single value. This value gives the position in the color gradient defined using `min_color_scheme_value`, `max_color_scheme_value` and `colors`.
min_color_scheme_value
The value of the color_scheme function to be mapped to the first color in `colors`. Lower values also result in the first color of the gradient.
max_color_scheme_value
The value of the color_scheme function to be mapped to the last color in `colors`. Higher values also result in the last color of the gradient.
colors
The colors defining the color gradient of the vector field.
kwargs : Any
Additional arguments to be passed to the :class:`~.VGroup` constructor
"""
def __init__(
self,
func: Callable[[np.ndarray], np.ndarray],
color: Optional[Color] = None,
color_scheme: Optional[Callable[[np.ndarray], float]] = None,
min_color_scheme_value: float = 0,
max_color_scheme_value: float = 2,
colors: Sequence[Color] = DEFAULT_SCALAR_FIELD_COLORS,
**kwargs
):
super().__init__(**kwargs)
self.func = func
if color is None:
self.single_color = False
if color_scheme is None:
def color_scheme(p):
return np.linalg.norm(p)
self.color_scheme = color_scheme # TODO maybe other default for direction?
self.rgbs = np.array(list(map(color_to_rgb, colors)))
def pos_to_rgb(pos: np.ndarray) -> Tuple[float, float, float, float]:
vec = self.func(pos)
color_value = np.clip(
self.color_scheme(vec),
min_color_scheme_value,
max_color_scheme_value,
)
alpha = inverse_interpolate(
min_color_scheme_value,
max_color_scheme_value,
color_value,
)
alpha *= len(self.rgbs) - 1
c1 = self.rgbs[int(alpha)]
c2 = self.rgbs[min(int(alpha + 1), len(self.rgbs) - 1)]
alpha %= 1
return interpolate(c1, c2, alpha)
self.pos_to_rgb = pos_to_rgb
self.pos_to_color = lambda pos: rgb_to_color(self.pos_to_rgb(pos))
else:
self.single_color = True
self.color = color
self.submob_movement_updater = None
@staticmethod
def shift_func(
func: Callable[[np.ndarray], np.ndarray],
shift_vector: np.ndarray,
) -> Callable[[np.ndarray], np.ndarray]:
"""Shift a vector field function.
Parameters
----------
func
The function defining a vector field.
shift_vector
The shift to be applied to the vector field.
Returns
-------
`Callable[[np.ndarray], np.ndarray]`
The shifted vector field function.
"""
return lambda p: func(p - shift_vector)
@staticmethod
def scale_func(
func: Callable[[np.ndarray], np.ndarray],
scalar: float,
) -> Callable[[np.ndarray], np.ndarray]:
"""Scale a vector field function.
Parameters
----------
func
The function defining a vector field.
shift_vector
The scalar to be applied to the vector field.
Examples
--------
.. manim:: ScaleVectorFieldFunction
class ScaleVectorFieldFunction(Scene):
def construct(self):
func = lambda pos: np.sin(pos[1]) * RIGHT + np.cos(pos[0]) * UP
vector_field = ArrowVectorField(func)
self.add(vector_field)
self.wait()
func = VectorField.scale_func(func, 0.5)
self.play(vector_field.animate.become(ArrowVectorField(func)))
self.wait()
Returns
-------
`Callable[[np.ndarray], np.ndarray]`
The scaled vector field function.
"""
return lambda p: func(p * scalar)
def nudge(
self,
mob: Mobject,
dt: float = 1,
substeps: int = 1,
pointwise: bool = False,
) -> "VectorField":
"""Nudge a :class:`~.Mobject` along the vector field.
Parameters
----------
mob
The mobject to move along the vector field
dt
A scalar to the amount the mobject is moved along the vector field.
The actual distance is based on the magnitude of the vector field.
substeps
The amount of steps the whole nudge is divided into. Higher values
give more accurate approximations.
pointwise
Whether to move the mobject along the vector field. If `False` the
vector field takes effect on the center of the given
:class:`~.Mobject`. If `True` the vector field takes effect on the
points of the individual points of the :class:`~.Mobject`,
potentially distorting it.
Returns
-------
VectorField
This vector field.
Examples
--------
.. manim:: Nudging
class Nudging(Scene):
def construct(self):
func = lambda pos: np.sin(pos[1] / 2) * RIGHT + np.cos(pos[0] / 2) * UP
vector_field = ArrowVectorField(
func, x_range=[-7, 7, 1], y_range=[-4, 4, 1], length_func=lambda x: x / 2
)
self.add(vector_field)
circle = Circle(radius=2).shift(LEFT)
self.add(circle.copy().set_color(GRAY))
dot = Dot().move_to(circle)
vector_field.nudge(circle, -2, 60, True)
vector_field.nudge(dot, -2, 60)
circle.add_updater(vector_field.get_nudge_updater(pointwise=True))
dot.add_updater(vector_field.get_nudge_updater())
self.add(circle, dot)
self.wait(6)
"""
def runge_kutta(self, p: Sequence[float], step_size: float) -> float:
"""Returns the change in position of a point along a vector field.
Parameters
----------
p
The position of each point being moved along the vector field.
step_size
A scalar that is used to determine how much a point is shifted in a single step.
Returns
-------
float
How much the point is shifted.
"""
k_1 = self.func(p)
k_2 = self.func(p + step_size * (k_1 * 0.5))
k_3 = self.func(p + step_size * (k_2 * 0.5))
k_4 = self.func(p + step_size * k_3)
return step_size / 6.0 * (k_1 + 2.0 * k_2 + 2.0 * k_3 + k_4)
step_size = dt / substeps
for _ in range(substeps):
if pointwise:
mob.apply_function(lambda p: p + runge_kutta(self, p, step_size))
else:
mob.shift(runge_kutta(self, mob.get_center(), step_size))
return self
def nudge_submobjects(
self,
dt: float = 1,
substeps: int = 1,
pointwise: bool = False,
) -> "VectorField":
"""Apply a nudge along the vector field to all submobjects.
Parameters
----------
dt
A scalar to the amount the mobject is moved along the vector field.
The actual distance is based on the magnitude of the vector field.
substeps
The amount of steps the whole nudge is divided into. Higher values
give more accurate approximations.
pointwise
Whether to move the mobject along the vector field. See :meth:`nudge` for details.
Returns
-------
VectorField
This vector field.
"""
for mob in self.submobjects:
self.nudge(mob, dt, substeps, pointwise)
return self
def get_nudge_updater(
self,
speed: float = 1,
pointwise: bool = False,
) -> Callable[[Mobject, float], Mobject]:
"""Get an update function to move a :class:`~.Mobject` along the vector field.
When used with :meth:`~.Mobject.add_updater`, the mobject will move along the vector field, where its speed is determined by the magnitude of the vector field.
Parameters
----------
speed
At `speed=1` the distance a mobject moves per second is equal to the magnitude of the vector field along its path. The speed value scales the speed of such a mobject.
pointwise
Whether to move the mobject along the vector field. See :meth:`nudge` for details.
Returns
-------
Callable[[Mobject, float], Mobject]
The update function.
"""
return lambda mob, dt: self.nudge(mob, dt * speed, pointwise=pointwise)
def start_submobject_movement(
self,
speed: float = 1,
pointwise: bool = False,
) -> "VectorField":
"""Start continuously moving all submobjects along the vector field.
Calling this method multiple times will result in removing the previous updater created by this method.
Parameters
----------
speed
The speed at which to move the submobjects. See :meth:`get_nudge_updater` for details.
pointwise
Whether to move the mobject along the vector field. See :meth:`nudge` for details.
Returns
-------
VectorField
This vector field.
"""
self.stop_submobject_movement()
self.submob_movement_updater = lambda mob, dt: mob.nudge_submobjects(
dt * speed,
pointwise=pointwise,
)
self.add_updater(self.submob_movement_updater)
return self
def stop_submobject_movement(self) -> "VectorField":
"""Stops the continuous movement started using :meth:`start_submobject_movement`.
Returns
-------
VectorField
This vector field.
"""
self.remove_updater(self.submob_movement_updater)
self.submob_movement_updater = None
return self
def get_colored_background_image(self, sampling_rate: int = 5) -> Image.Image:
"""Generate an image that displays the vector field.
The color at each position is calculated by passing the positing through a
series of steps:
Calculate the vector field function at that position, map that vector to a
single value using `self.color_scheme` and finally generate a color from
that value using the color gradient.
Parameters
----------
sampling_rate
The stepsize at which pixels get included in the image. Lower values give
more accurate results, but may take a long time to compute.
Returns
-------
Image.Imgae
The vector field image.
"""
if self.single_color:
raise ValueError(
"There is no point in generating an image if the vector field uses a single color.",
)
ph = int(config["pixel_height"] / sampling_rate)
pw = int(config["pixel_width"] / sampling_rate)
fw = config["frame_width"]
fh = config["frame_height"]
points_array = np.zeros((ph, pw, 3))
x_array = np.linspace(-fw / 2, fw / 2, pw)
y_array = np.linspace(fh / 2, -fh / 2, ph)
x_array = x_array.reshape((1, len(x_array)))
y_array = y_array.reshape((len(y_array), 1))
x_array = x_array.repeat(ph, axis=0)
y_array.repeat(pw, axis=1) # TODO why not y_array = y_array.repeat(...)?
points_array[:, :, 0] = x_array
points_array[:, :, 1] = y_array
rgbs = np.apply_along_axis(self.pos_to_rgb, 2, points_array)
return Image.fromarray((rgbs * 255).astype("uint8"))
def get_vectorized_rgba_gradient_function(
self,
start: float,
end: float,
colors: Iterable,
):
"""
Generates a gradient of rgbas as a numpy array
Parameters
----------
start
start value used for inverse interpolation at :func:`~.inverse_interpolate`
end
end value used for inverse interpolation at :func:`~.inverse_interpolate`
colors
list of colors to generate the gradient
Returns
-------
function to generate the gradients as numpy arrays representing rgba values
"""
rgbs = np.array([color_to_rgb(c) for c in colors])
def func(values, opacity=1):
alphas = inverse_interpolate(start, end, np.array(values))
alphas = np.clip(alphas, 0, 1)
scaled_alphas = alphas * (len(rgbs) - 1)
indices = scaled_alphas.astype(int)
next_indices = np.clip(indices + 1, 0, len(rgbs) - 1)
inter_alphas = scaled_alphas % 1
inter_alphas = inter_alphas.repeat(3).reshape((len(indices), 3))
result = interpolate(rgbs[indices], rgbs[next_indices], inter_alphas)
result = np.concatenate(
(result, np.full([len(result), 1], opacity)),
axis=1,
)
return result
return func
class ArrowVectorField(VectorField):
"""A :class:`VectorField` represented by a set of change vectors.
Vector fields are always based on a function defining the :class:`~.Vector` at every position.
The values of this functions is displayed as a grid of vectors.
By default the color of each vector is determined by it's magnitude.
Other color schemes can be used however.
Parameters
----------
func
The function defining the rate of change at every position of the vector field.
color
The color of the vector field. If set, position-specific coloring is disabled.
color_scheme
A function mapping a vector to a single value. This value gives the position in the color gradient defined using `min_color_scheme_value`, `max_color_scheme_value` and `colors`.
min_color_scheme_value
The value of the color_scheme function to be mapped to the first color in `colors`. Lower values also result in the first color of the gradient.
max_color_scheme_value
The value of the color_scheme function to be mapped to the last color in `colors`. Higher values also result in the last color of the gradient.
colors
The colors defining the color gradient of the vector field.
x_range
A sequence of x_min, x_max, delta_x
y_range
A sequence of y_min, y_max, delta_y
z_range
A sequence of z_min, z_max, delta_z
three_dimensions
Enables three_dimensions. Default set to False, automatically turns True if
z_range is not None.
length_func
The function determining the displayed size of the vectors. The actual size
of the vector is passed, the returned value will be used as display size for the
vector. By default this is used to cap the displayed size of vectors to reduce the clutter.
opacity
The opacity of the arrows.
vector_config
Additional arguments to be passed to the :class:`~.Vector` constructor
kwargs : Any
Additional arguments to be passed to the :class:`~.VGroup` constructor
Examples
--------
.. manim:: BasicUsage
:save_last_frame:
class BasicUsage(Scene):
def construct(self):
func = lambda pos: ((pos[0] * UR + pos[1] * LEFT) - pos) / 3
self.add(ArrowVectorField(func))
.. manim:: SizingAndSpacing
class SizingAndSpacing(Scene):
def construct(self):
func = lambda pos: np.sin(pos[0] / 2) * UR + np.cos(pos[1] / 2) * LEFT
vf = ArrowVectorField(func, x_range=[-7, 7, 1])
self.add(vf)
self.wait()
length_func = lambda x: x / 3
vf2 = ArrowVectorField(func, x_range=[-7, 7, 1], length_func=length_func)
self.play(vf.animate.become(vf2))
self.wait()
.. manim:: Coloring
:save_last_frame:
class Coloring(Scene):
def construct(self):
func = lambda pos: pos - LEFT * 5
colors = [RED, YELLOW, BLUE, DARK_GRAY]
min_radius = Circle(radius=2, color=colors[0]).shift(LEFT * 5)
max_radius = Circle(radius=10, color=colors[-1]).shift(LEFT * 5)
vf = ArrowVectorField(
func, min_color_scheme_value=2, max_color_scheme_value=10, colors=colors
)
self.add(vf, min_radius, max_radius)
"""
def __init__(
self,
func: Callable[[np.ndarray], np.ndarray],
color: Optional[Color] = None,
color_scheme: Optional[Callable[[np.ndarray], float]] = None,
min_color_scheme_value: float = 0,
max_color_scheme_value: float = 2,
colors: Sequence[Color] = DEFAULT_SCALAR_FIELD_COLORS,
# Determining Vector positions:
x_range: Sequence[float] = None,
y_range: Sequence[float] = None,
z_range: Sequence[float] = None,
three_dimensions: bool = False, # Automatically True if z_range is set
# Takes in actual norm, spits out displayed norm
length_func: Callable[[float], float] = lambda norm: 0.45 * sigmoid(norm),
opacity: float = 1.0,
vector_config: Optional[dict] = None,
**kwargs
):
self.x_range = x_range or [
floor(-config["frame_width"] / 2),
ceil(config["frame_width"] / 2),
]
self.y_range = y_range or [
floor(-config["frame_height"] / 2),
ceil(config["frame_height"] / 2),
]
self.ranges = [self.x_range, self.y_range]
if three_dimensions or z_range:
self.z_range = z_range or self.y_range.copy()
self.ranges += [self.z_range]
else:
self.ranges += [[0, 0]]
for i in range(len(self.ranges)):
if len(self.ranges[i]) == 2:
self.ranges[i] += [0.5]
self.ranges[i][1] += self.ranges[i][2]
self.x_range, self.y_range, self.z_range = self.ranges
super().__init__(
func,
color,
color_scheme,
min_color_scheme_value,
max_color_scheme_value,
colors,
**kwargs,
)
self.length_func = length_func
self.opacity = opacity
if vector_config is None:
vector_config = {}
self.vector_config = vector_config
self.func = func
x_range = np.arange(*self.x_range)
y_range = np.arange(*self.y_range)
z_range = np.arange(*self.z_range)
for x, y, z in it.product(x_range, y_range, z_range):
self.add(self.get_vector(x * RIGHT + y * UP + z * OUT))
self.set_opacity(self.opacity)
def get_vector(self, point: np.ndarray):
"""Creates a vector in the vector field.
The created vector is based on the function of the vector field and is
rooted in the given point. Color and length fit the specifications of
this vector field.
Parameters
----------
point
The root point of the vector.
kwargs : Any
Additional arguments to be passed to the :class:`~.Vector` constructor
"""
output = np.array(self.func(point))
norm = np.linalg.norm(output)
if norm != 0:
output *= self.length_func(norm) / norm
vect = Vector(output, **self.vector_config)
vect.shift(point)
if self.single_color:
vect.set_color(self.color)
else:
vect.set_color(self.pos_to_color(point))
return vect
class StreamLines(VectorField):
"""StreamLines represent the flow of a :class:`VectorField` using the trace of moving agents.
Vector fields are always based on a function defining the vector at every position.
The values of this functions is displayed by moving many agents along the vector field
and showing their trace.
Parameters
----------
func
The function defining the rate of change at every position of the vector field.
color
The color of the vector field. If set, position-specific coloring is disabled.
color_scheme
A function mapping a vector to a single value. This value gives the position in the color gradient defined using `min_color_scheme_value`, `max_color_scheme_value` and `colors`.
min_color_scheme_value
The value of the color_scheme function to be mapped to the first color in `colors`. Lower values also result in the first color of the gradient.
max_color_scheme_value
The value of the color_scheme function to be mapped to the last color in `colors`. Higher values also result in the last color of the gradient.
colors
The colors defining the color gradient of the vector field.
x_range
A sequence of x_min, x_max, delta_x
y_range
A sequence of y_min, y_max, delta_y
z_range
A sequence of z_min, z_max, delta_z
three_dimensions
Enables three_dimensions. Default set to False, automatically turns True if
z_range is not None.
noise_factor
The amount by which the starting position of each agent is altered along each axis. Defaults to :code:`delta_y / 2` if not defined.
n_repeats
The number of agents generated at each starting point.
dt
The factor by which the distance an agent moves per step is stretched. Lower values result in a better approximation of the trajectories in the vector field.
virtual_time
The time the agents get to move in the vector field. Higher values therefore result in longer stream lines. However, this whole time gets simulated upon creation.
max_anchors_per_line
The maximum number of anchors per line. Lines with more anchors get reduced in complexity, not in length.
padding
The distance agents can move out of the generation area before being terminated.
stroke_width
The stroke with of the stream lines.
opacity
The opacity of the stream lines.
Examples
--------
.. manim:: BasicUsage
:save_last_frame:
class BasicUsage(Scene):
def construct(self):
func = lambda pos: ((pos[0] * UR + pos[1] * LEFT) - pos) / 3
self.add(StreamLines(func))
.. manim:: SpawningAndFlowingArea
:save_last_frame:
class SpawningAndFlowingArea(Scene):
def construct(self):
func = lambda pos: np.sin(pos[0]) * UR + np.cos(pos[1]) * LEFT + pos / 5
stream_lines = StreamLines(
func, x_range=[-3, 3, 0.2], y_range=[-2, 2, 0.2], padding=1
)
spawning_area = Rectangle(width=6, height=4)
flowing_area = Rectangle(width=8, height=6)
labels = [Tex("Spawning Area"), Tex("Flowing Area").shift(DOWN * 2.5)]
for lbl in labels:
lbl.add_background_rectangle(opacity=0.6, buff=0.05)
self.add(stream_lines, spawning_area, flowing_area, *labels)
"""
def __init__(
self,
func: Callable[[np.ndarray], np.ndarray],
color: Optional[Color] = None,
color_scheme: Optional[Callable[[np.ndarray], float]] = None,
min_color_scheme_value: float = 0,
max_color_scheme_value: float = 2,
colors: Sequence[Color] = DEFAULT_SCALAR_FIELD_COLORS,
# Determining stream line starting positions:
x_range: Sequence[float] = None,
y_range: Sequence[float] = None,
z_range: Sequence[float] = None,
three_dimensions: bool = False,
noise_factor: Optional[float] = None,
n_repeats=1,
# Determining how lines are drawn
dt=0.05,
virtual_time=3,
max_anchors_per_line=100,
padding=3,
# Determining stream line appearance:
stroke_width=1,
opacity=1,
**kwargs
):
self.x_range = x_range or [
floor(-config["frame_width"] / 2),
ceil(config["frame_width"] / 2),
]
self.y_range = y_range or [
floor(-config["frame_height"] / 2),
ceil(config["frame_height"] / 2),
]
self.ranges = [self.x_range, self.y_range]
if three_dimensions or z_range:
self.z_range = z_range or self.y_range.copy()
self.ranges += [self.z_range]
else:
self.ranges += [[0, 0]]
for i in range(len(self.ranges)):
if len(self.ranges[i]) == 2:
self.ranges[i] += [0.5]
self.ranges[i][1] += self.ranges[i][2]
self.x_range, self.y_range, self.z_range = self.ranges
super().__init__(
func,
color,
color_scheme,
min_color_scheme_value,
max_color_scheme_value,
colors,
**kwargs,
)
self.noise_factor = (
noise_factor if noise_factor is not None else self.y_range[2] / 2
)
self.n_repeats = n_repeats
self.virtual_time = virtual_time
self.max_anchors_per_line = max_anchors_per_line
self.padding = padding
self.stroke_width = stroke_width
half_noise = self.noise_factor / 2
np.random.seed(0)
start_points = np.array(
[
(x - half_noise) * RIGHT
+ (y - half_noise) * UP
+ (z - half_noise) * OUT
+ self.noise_factor * np.random.random(3)
for n in range(self.n_repeats)
for x in np.arange(*self.x_range)
for y in np.arange(*self.y_range)
for z in np.arange(*self.z_range)
],
)
def outside_box(p):
return (
p[0] < self.x_range[0] - self.padding
or p[0] > self.x_range[1] + self.padding - self.x_range[2]
or p[1] < self.y_range[0] - self.padding
or p[1] > self.y_range[1] + self.padding - self.y_range[2]
or p[2] < self.z_range[0] - self.padding
or p[2] > self.z_range[1] + self.padding - self.z_range[2]
)
max_steps = ceil(virtual_time / dt) + 1
if not self.single_color:
self.background_img = self.get_colored_background_image()
if config["renderer"] == "opengl":
self.values_to_rgbas = self.get_vectorized_rgba_gradient_function(
min_color_scheme_value,
max_color_scheme_value,
colors,
)
for point in start_points:
points = [point]
for _ in range(max_steps):
last_point = points[-1]
new_point = last_point + dt * func(last_point)
if outside_box(new_point):
break
points.append(new_point)
step = max_steps
if not step:
continue
if config["renderer"] == "opengl":
line = OpenGLVMobject()
else:
line = VMobject()
line.duration = step * dt
step = max(1, int(len(points) / self.max_anchors_per_line))
line.set_points_smoothly(points[::step])
if self.single_color:
line.set_stroke(self.color)
else:
if config["renderer"] == "opengl":
# scaled for compatibility with cairo
line.set_stroke(width=self.stroke_width / 4.0)
norms = np.array(
[np.linalg.norm(self.func(point)) for point in line.points],
)
line.set_rgba_array_direct(
self.values_to_rgbas(norms, opacity),
name="stroke_rgba",
)
else:
if np.any(self.z_range != np.array([0, 0.5, 0.5])):
line.set_stroke(
[self.pos_to_color(p) for p in line.get_anchors()],
)
else:
line.color_using_background_image(self.background_img)
line.set_stroke(width=self.stroke_width, opacity=opacity)
self.add(line)
self.stream_lines = [*self.submobjects]
def create(
self,
lag_ratio: Optional[float] = None,
run_time: Optional[Callable[[float], float]] = None,
**kwargs
) -> AnimationGroup:
"""The creation animation of the stream lines.
The stream lines appear in random order.
Parameters
----------
lag_ratio
The lag ratio of the animation.
If undefined, it will be selected so that the total animation length is 1.5 times the run time of each stream line creation.
run_time
The run time of every single stream line creation. The runtime of the whole animation might be longer due to the `lag_ratio`.
If undefined, the virtual time of the stream lines is used as run time.
Returns
-------
:class:`~.AnimationGroup`
The creation animation of the stream lines.
Examples
--------
.. manim:: StreamLineCreation
class StreamLineCreation(Scene):
def construct(self):
func = lambda pos: (pos[0] * UR + pos[1] * LEFT) - pos
stream_lines = StreamLines(
func,
color=YELLOW,
x_range=[-7, 7, 1],
y_range=[-4, 4, 1],
stroke_width=3,
virtual_time=1, # use shorter lines
max_anchors_per_line=5, # better performance with fewer anchors
)
self.play(stream_lines.create()) # uses virtual_time as run_time
self.wait()
"""
if run_time is None:
run_time = self.virtual_time
if lag_ratio is None:
lag_ratio = run_time / 2 / len(self.submobjects)
animations = [
Create(line, run_time=run_time, **kwargs) for line in self.stream_lines
]
random.shuffle(animations)
return AnimationGroup(*animations, lag_ratio=lag_ratio)
def start_animation(
self,
warm_up=True,
flow_speed: float = 1,
time_width: float = 0.3,
rate_func: Callable[[float], float] = linear,
line_animation_class: Type[ShowPassingFlash] = ShowPassingFlash,
**kwargs
) -> None:
"""Animates the stream lines using an updater.
The stream lines will continuously flow
Parameters
----------
warm_up : bool, optional
If `True` the animation is initialized line by line. Otherwise it starts with all lines shown.
flow_speed
At `flow_speed=1` the distance the flow moves per second is equal to the magnitude of the vector field along its path. The speed value scales the speed of this flow.
time_width
The proportion of the stream line shown while being animated
rate_func
The rate function of each stream line flashing
line_animation_class
The animation class being used
Examples
--------
.. manim:: ContinuousMotion
class ContinuousMotion(Scene):
def construct(self):
func = lambda pos: np.sin(pos[0] / 2) * UR + np.cos(pos[1] / 2) * LEFT
stream_lines = StreamLines(func, stroke_width=3, max_anchors_per_line=30)
self.add(stream_lines)
stream_lines.start_animation(warm_up=False, flow_speed=1.5)
self.wait(stream_lines.virtual_time / stream_lines.flow_speed)
"""
for line in self.stream_lines:
run_time = line.duration / flow_speed
line.anim = line_animation_class(
line,
run_time=run_time,
rate_func=rate_func,
time_width=time_width,
**kwargs,
)
line.anim.begin()
line.time = random.random() * self.virtual_time
if warm_up:
line.time *= -1
self.add(line.anim.mobject)
def updater(mob, dt):
for line in mob.stream_lines:
line.time += dt * flow_speed
if line.time >= self.virtual_time:
line.time -= self.virtual_time
line.anim.interpolate(np.clip(line.time / line.anim.run_time, 0, 1))
self.add_updater(updater)
self.flow_animation = updater
self.flow_speed = flow_speed
self.time_width = time_width
def end_animation(self) -> AnimationGroup:
"""End the stream line animation smoothly.
Returns an animation resulting in fully displayed stream lines without a noticeable cut.
Returns
-------
:class:`~.AnimationGroup`
The animation fading out the running stream animation.
Raises
------
ValueError
if no stream line animation is running
Examples
--------
.. manim:: EndAnimation
class EndAnimation(Scene):
def construct(self):
func = lambda pos: np.sin(pos[0] / 2) * UR + np.cos(pos[1] / 2) * LEFT
stream_lines = StreamLines(
func, stroke_width=3, max_anchors_per_line=5, virtual_time=1, color=BLUE
)
self.add(stream_lines)
stream_lines.start_animation(warm_up=False, flow_speed=1.5, time_width=0.5)
self.wait(1)
self.play(stream_lines.end_animation())
"""
if self.flow_animation is None:
raise ValueError("You have to start the animation before fading it out.")
def hide_and_wait(mob, alpha):
if alpha == 0:
mob.set_stroke(opacity=0)
elif alpha == 1:
mob.set_stroke(opacity=1)
def finish_updater_cycle(line, alpha):
line.time += dt * self.flow_speed
line.anim.interpolate(min(line.time / line.anim.run_time, 1))
if alpha == 1:
self.remove(line.anim.mobject)
line.anim.finish()
max_run_time = self.virtual_time / self.flow_speed
creation_rate_func = ease_out_sine
creation_staring_speed = creation_rate_func(0.001) * 1000
creation_run_time = (
max_run_time / (1 + self.time_width) * creation_staring_speed
)
# creation_run_time is calculated so that the creation animation starts at the same speed
# as the regular line flash animation but eases out.
dt = 1 / config["frame_rate"]
animations = []
self.remove_updater(self.flow_animation)
self.flow_animation = None
for line in self.stream_lines:
create = Create(
line,
run_time=creation_run_time,
rate_func=creation_rate_func,
)
if line.time <= 0:
animations.append(
Succession(
UpdateFromAlphaFunc(
line,
hide_and_wait,
run_time=-line.time / self.flow_speed,
),
create,
),
)
self.remove(line.anim.mobject)
line.anim.finish()
else:
remaining_time = max_run_time - line.time / self.flow_speed
animations.append(
Succession(
UpdateFromAlphaFunc(
line,
finish_updater_cycle,
run_time=remaining_time,
),
create,
),
)
return AnimationGroup(*animations)
# TODO: Variant of StreamLines that is able to respond to changes in the vector field function
| 3.03125 | 3 |
marshmallow_dataclass/__init__.py | dan-starkware/marshmallow_dataclass | 0 | 3612 | """
This library allows the conversion of python 3.7's :mod:`dataclasses`
to :mod:`marshmallow` schemas.
It takes a python class, and generates a marshmallow schema for it.
Simple example::
from marshmallow import Schema
from marshmallow_dataclass import dataclass
@dataclass
class Point:
x:float
y:float
point = Point(x=0, y=0)
point_json = Point.Schema().dumps(point)
Full example::
from marshmallow import Schema
from dataclasses import field
from marshmallow_dataclass import dataclass
import datetime
@dataclass
class User:
birth: datetime.date = field(metadata= {
"required": True # A parameter to pass to marshmallow's field
})
website:str = field(metadata = {
"marshmallow_field": marshmallow.fields.Url() # Custom marshmallow field
})
Schema: ClassVar[Type[Schema]] = Schema # For the type checker
"""
import inspect
from enum import EnumMeta
from functools import lru_cache
from typing import (
Any,
Callable,
Dict,
List,
Mapping,
Optional,
Set,
Tuple,
Type,
TypeVar,
Union,
cast,
overload,
)
import dataclasses
import marshmallow
import typing_inspect
__all__ = ["dataclass", "add_schema", "class_schema", "field_for_schema", "NewType"]
NoneType = type(None)
_U = TypeVar("_U")
# Whitelist of dataclass members that will be copied to generated schema.
MEMBERS_WHITELIST: Set[str] = {"Meta"}
# Max number of generated schemas that class_schema keeps of generated schemas. Removes duplicates.
MAX_CLASS_SCHEMA_CACHE_SIZE = 1024
# _cls should never be specified by keyword, so start it with an
# underscore. The presence of _cls is used to detect if this
# decorator is being called with parameters or not.
def dataclass(
_cls: Type[_U] = None,
*,
repr: bool = True,
eq: bool = True,
order: bool = False,
unsafe_hash: bool = False,
frozen: bool = False,
base_schema: Optional[Type[marshmallow.Schema]] = None,
):
"""
This decorator does the same as dataclasses.dataclass, but also applies :func:`add_schema`.
It adds a `.Schema` attribute to the class object
:param base_schema: marshmallow schema used as a base class when deriving dataclass schema
>>> @dataclass
... class Artist:
... name: str
>>> Artist.Schema
<class 'marshmallow.schema.Artist'>
>>> from typing import ClassVar
>>> from marshmallow import Schema
>>> @dataclass(order=True) # preserve field order
... class Point:
... x:float
... y:float
... Schema: ClassVar[Type[Schema]] = Schema # For the type checker
...
>>> Point.Schema().load({'x':0, 'y':0}) # This line can be statically type checked
Point(x=0.0, y=0.0)
"""
# dataclass's typing doesn't expect it to be called as a function, so ignore type check
dc = dataclasses.dataclass( # type: ignore
_cls, repr=repr, eq=eq, order=order, unsafe_hash=unsafe_hash, frozen=frozen
)
if _cls is None:
return lambda cls: add_schema(dc(cls), base_schema)
return add_schema(dc, base_schema)
@overload
def add_schema(_cls: Type[_U]) -> Type[_U]:
...
@overload
def add_schema(
base_schema: Type[marshmallow.Schema] = None,
) -> Callable[[Type[_U]], Type[_U]]:
...
@overload
def add_schema(
_cls: Type[_U], base_schema: Type[marshmallow.Schema] = None
) -> Type[_U]:
...
def add_schema(_cls=None, base_schema=None):
"""
This decorator adds a marshmallow schema as the 'Schema' attribute in a dataclass.
It uses :func:`class_schema` internally.
:param type cls: The dataclass to which a Schema should be added
:param base_schema: marshmallow schema used as a base class when deriving dataclass schema
>>> class BaseSchema(marshmallow.Schema):
... def on_bind_field(self, field_name, field_obj):
... field_obj.data_key = (field_obj.data_key or field_name).upper()
>>> @add_schema(base_schema=BaseSchema)
... @dataclasses.dataclass
... class Artist:
... names: Tuple[str, str]
>>> artist = Artist.Schema().loads('{"NAMES": ["Martin", "Ramirez"]}')
>>> artist
Artist(names=('Martin', 'Ramirez'))
"""
def decorator(clazz: Type[_U]) -> Type[_U]:
clazz.Schema = class_schema(clazz, base_schema) # type: ignore
return clazz
return decorator(_cls) if _cls else decorator
def class_schema(
clazz: type, base_schema: Optional[Type[marshmallow.Schema]] = None
) -> Type[marshmallow.Schema]:
"""
Convert a class to a marshmallow schema
:param clazz: A python class (may be a dataclass)
:param base_schema: marshmallow schema used as a base class when deriving dataclass schema
:return: A marshmallow Schema corresponding to the dataclass
.. note::
All the arguments supported by marshmallow field classes can
be passed in the `metadata` dictionary of a field.
If you want to use a custom marshmallow field
(one that has no equivalent python type), you can pass it as the
``marshmallow_field`` key in the metadata dictionary.
>>> import typing
>>> Meters = typing.NewType('Meters', float)
>>> @dataclasses.dataclass()
... class Building:
... height: Optional[Meters]
... name: str = dataclasses.field(default="anonymous")
... class Meta:
... ordered = True
...
>>> class_schema(Building) # Returns a marshmallow schema class (not an instance)
<class 'marshmallow.schema.Building'>
>>> @dataclasses.dataclass()
... class City:
... name: str = dataclasses.field(metadata={'required':True})
... best_building: Building # Reference to another dataclasses. A schema will be created for it too.
... other_buildings: List[Building] = dataclasses.field(default_factory=lambda: [])
...
>>> citySchema = class_schema(City)()
>>> city = citySchema.load({"name":"Paris", "best_building": {"name": "Eiffel Tower"}})
>>> city
City(name='Paris', best_building=Building(height=None, name='Eiffel Tower'), other_buildings=[])
>>> citySchema.load({"name":"Paris"})
Traceback (most recent call last):
...
marshmallow.exceptions.ValidationError: {'best_building': ['Missing data for required field.']}
>>> city_json = citySchema.dump(city)
>>> city_json['best_building'] # We get an OrderedDict because we specified order = True in the Meta class
OrderedDict([('height', None), ('name', 'Eiffel Tower')])
>>> @dataclasses.dataclass()
... class Person:
... name: str = dataclasses.field(default="Anonymous")
... friends: List['Person'] = dataclasses.field(default_factory=lambda:[]) # Recursive field
...
>>> person = class_schema(Person)().load({
... "friends": [{"name": "<NAME>"}]
... })
>>> person
Person(name='Anonymous', friends=[Person(name='<NAME>', friends=[])])
>>> @dataclasses.dataclass()
... class C:
... important: int = dataclasses.field(init=True, default=0)
... # Only fields that are in the __init__ method will be added:
... unimportant: int = dataclasses.field(init=False, default=0)
...
>>> c = class_schema(C)().load({
... "important": 9, # This field will be imported
... "unimportant": 9 # This field will NOT be imported
... }, unknown=marshmallow.EXCLUDE)
>>> c
C(important=9, unimportant=0)
>>> @dataclasses.dataclass
... class Website:
... url:str = dataclasses.field(metadata = {
... "marshmallow_field": marshmallow.fields.Url() # Custom marshmallow field
... })
...
>>> class_schema(Website)().load({"url": "I am not a good URL !"})
Traceback (most recent call last):
...
marshmallow.exceptions.ValidationError: {'url': ['Not a valid URL.']}
>>> @dataclasses.dataclass
... class NeverValid:
... @marshmallow.validates_schema
... def validate(self, data, **_):
... raise marshmallow.ValidationError('never valid')
...
>>> class_schema(NeverValid)().load({})
Traceback (most recent call last):
...
marshmallow.exceptions.ValidationError: {'_schema': ['never valid']}
>>> # noinspection PyTypeChecker
>>> class_schema(None) # unsupported type
Traceback (most recent call last):
...
TypeError: None is not a dataclass and cannot be turned into one.
>>> @dataclasses.dataclass
... class Anything:
... name: str
... @marshmallow.validates('name')
... def validates(self, value):
... if len(value) > 5: raise marshmallow.ValidationError("Name too long")
>>> class_schema(Anything)().load({"name": "aaaaaargh"})
Traceback (most recent call last):
...
marshmallow.exceptions.ValidationError: {'name': ['Name too long']}
"""
return _proxied_class_schema(clazz, base_schema)
@lru_cache(maxsize=MAX_CLASS_SCHEMA_CACHE_SIZE)
def _proxied_class_schema(
clazz: type, base_schema: Optional[Type[marshmallow.Schema]] = None
) -> Type[marshmallow.Schema]:
try:
# noinspection PyDataclass
fields: Tuple[dataclasses.Field, ...] = dataclasses.fields(clazz)
except TypeError: # Not a dataclass
try:
return class_schema(dataclasses.dataclass(clazz), base_schema)
except Exception:
raise TypeError(
f"{getattr(clazz, '__name__', repr(clazz))} is not a dataclass and cannot be turned into one."
)
# Copy all marshmallow hooks and whitelisted members of the dataclass to the schema.
attributes = {
k: v
for k, v in inspect.getmembers(clazz)
if hasattr(v, "__marshmallow_hook__") or k in MEMBERS_WHITELIST
}
# Update the schema members to contain marshmallow fields instead of dataclass fields
attributes.update(
(
field.name,
field_for_schema(
field.type, _get_field_default(field), field.metadata, base_schema
),
)
for field in fields
if field.init
)
schema_class = type(clazz.__name__, (_base_schema(clazz, base_schema),), attributes)
return cast(Type[marshmallow.Schema], schema_class)
def _field_by_type(
typ: Union[type, Any], base_schema: Optional[Type[marshmallow.Schema]]
) -> Optional[Type[marshmallow.fields.Field]]:
return (
base_schema and base_schema.TYPE_MAPPING.get(typ)
) or marshmallow.Schema.TYPE_MAPPING.get(typ)
def _field_by_supertype(
typ: Type,
default: marshmallow.missing,
newtype_supertype: Type,
metadata: dict,
base_schema: Optional[Type[marshmallow.Schema]],
) -> marshmallow.fields.Field:
"""
Return a new field for fields based on a super field. (Usually spawned from NewType)
"""
# Add the information coming our custom NewType implementation
typ_args = getattr(typ, "_marshmallow_args", {})
# Handle multiple validators from both `typ` and `metadata`.
# See https://github.com/lovasoa/marshmallow_dataclass/issues/91
new_validators: List[Callable] = []
for meta_dict in (typ_args, metadata):
if "validate" in meta_dict:
if marshmallow.utils.is_iterable_but_not_string(meta_dict["validate"]):
new_validators.extend(meta_dict["validate"])
elif callable(meta_dict["validate"]):
new_validators.append(meta_dict["validate"])
metadata["validate"] = new_validators if new_validators else None
metadata = {"description": typ.__name__, **typ_args, **metadata}
field = getattr(typ, "_marshmallow_field", None)
if field:
return field(**metadata)
else:
return field_for_schema(
newtype_supertype,
metadata=metadata,
default=default,
base_schema=base_schema,
)
def field_for_schema(
typ: type,
default=marshmallow.missing,
metadata: Mapping[str, Any] = None,
base_schema: Optional[Type[marshmallow.Schema]] = None,
) -> marshmallow.fields.Field:
"""
Get a marshmallow Field corresponding to the given python type.
The metadata of the dataclass field is used as arguments to the marshmallow Field.
:param typ: The type for which a field should be generated
:param default: value to use for (de)serialization when the field is missing
:param metadata: Additional parameters to pass to the marshmallow field constructor
:param base_schema: marshmallow schema used as a base class when deriving dataclass schema
>>> int_field = field_for_schema(int, default=9, metadata=dict(required=True))
>>> int_field.__class__
<class 'marshmallow.fields.Integer'>
>>> int_field.default
9
>>> field_for_schema(str, metadata={"marshmallow_field": marshmallow.fields.Url()}).__class__
<class 'marshmallow.fields.Url'>
"""
metadata = {} if metadata is None else dict(metadata)
if default is not marshmallow.missing:
metadata.setdefault("default", default)
# 'missing' must not be set for required fields.
if not metadata.get("required"):
metadata.setdefault("missing", default)
else:
metadata.setdefault("required", True)
# If the field was already defined by the user
predefined_field = metadata.get("marshmallow_field")
if predefined_field:
return predefined_field
# Generic types specified without type arguments
if typ is list:
typ = List[Any]
elif typ is dict:
typ = Dict[Any, Any]
# Base types
field = _field_by_type(typ, base_schema)
if field:
return field(**metadata)
if typ is Any:
metadata.setdefault("allow_none", True)
return marshmallow.fields.Raw(**metadata)
# Generic types
origin = typing_inspect.get_origin(typ)
if origin:
arguments = typing_inspect.get_args(typ, True)
# Override base_schema.TYPE_MAPPING to change the class used for generic types below
type_mapping = base_schema.TYPE_MAPPING if base_schema else {}
if origin in (list, List):
child_type = field_for_schema(arguments[0], base_schema=base_schema)
list_type = type_mapping.get(List, marshmallow.fields.List)
return list_type(child_type, **metadata)
if origin in (tuple, Tuple):
children = tuple(
field_for_schema(arg, base_schema=base_schema) for arg in arguments
)
tuple_type = type_mapping.get(Tuple, marshmallow.fields.Tuple)
return tuple_type(children, **metadata)
elif origin in (dict, Dict):
dict_type = type_mapping.get(Dict, marshmallow.fields.Dict)
return dict_type(
keys=field_for_schema(arguments[0], base_schema=base_schema),
values=field_for_schema(arguments[1], base_schema=base_schema),
**metadata,
)
elif typing_inspect.is_optional_type(typ):
subtyp = next(t for t in arguments if t is not NoneType) # type: ignore
# Treat optional types as types with a None default
metadata["default"] = metadata.get("default", None)
metadata["missing"] = metadata.get("missing", None)
metadata["required"] = False
return field_for_schema(subtyp, metadata=metadata, base_schema=base_schema)
elif typing_inspect.is_union_type(typ):
from . import union_field
return union_field.Union(
[
(
subtyp,
field_for_schema(
subtyp, metadata=metadata, base_schema=base_schema
),
)
for subtyp in arguments
],
**metadata,
)
# typing.NewType returns a function with a __supertype__ attribute
newtype_supertype = getattr(typ, "__supertype__", None)
if newtype_supertype and inspect.isfunction(typ):
return _field_by_supertype(
typ=typ,
default=default,
newtype_supertype=newtype_supertype,
metadata=metadata,
base_schema=base_schema,
)
# enumerations
if isinstance(typ, EnumMeta):
import marshmallow_enum
return marshmallow_enum.EnumField(typ, **metadata)
# Nested marshmallow dataclass
nested_schema = getattr(typ, "Schema", None)
# Nested dataclasses
forward_reference = getattr(typ, "__forward_arg__", None)
nested = (
nested_schema or forward_reference or class_schema(typ, base_schema=base_schema)
)
return marshmallow.fields.Nested(nested, **metadata)
def _base_schema(
clazz: type, base_schema: Optional[Type[marshmallow.Schema]] = None
) -> Type[marshmallow.Schema]:
"""
Base schema factory that creates a schema for `clazz` derived either from `base_schema`
or `BaseSchema`
"""
# Remove `type: ignore` when mypy handles dynamic base classes
# https://github.com/python/mypy/issues/2813
class BaseSchema(base_schema or marshmallow.Schema): # type: ignore
def load(self, data: Mapping, *, many: bool = None, **kwargs):
all_loaded = super().load(data, many=many, **kwargs)
many = self.many if many is None else bool(many)
if many:
return [clazz(**loaded) for loaded in all_loaded]
else:
return clazz(**all_loaded)
return BaseSchema
def _get_field_default(field: dataclasses.Field):
"""
Return a marshmallow default value given a dataclass default value
>>> _get_field_default(dataclasses.field())
<marshmallow.missing>
"""
# Remove `type: ignore` when https://github.com/python/mypy/issues/6910 is fixed
default_factory = field.default_factory # type: ignore
if default_factory is not dataclasses.MISSING:
return default_factory
elif field.default is dataclasses.MISSING:
return marshmallow.missing
return field.default
def NewType(
name: str,
typ: Type[_U],
field: Optional[Type[marshmallow.fields.Field]] = None,
**kwargs,
) -> Callable[[_U], _U]:
"""NewType creates simple unique types
to which you can attach custom marshmallow attributes.
All the keyword arguments passed to this function will be transmitted
to the marshmallow field constructor.
>>> import marshmallow.validate
>>> IPv4 = NewType('IPv4', str, validate=marshmallow.validate.Regexp(r'^([0-9]{1,3}\\.){3}[0-9]{1,3}$'))
>>> @dataclass
... class MyIps:
... ips: List[IPv4]
>>> MyIps.Schema().load({"ips": ["0.0.0.0", "grumble grumble"]})
Traceback (most recent call last):
...
marshmallow.exceptions.ValidationError: {'ips': {1: ['String does not match expected pattern.']}}
>>> MyIps.Schema().load({"ips": ["127.0.0.1"]})
MyIps(ips=['127.0.0.1'])
>>> Email = NewType('Email', str, field=marshmallow.fields.Email)
>>> @dataclass
... class ContactInfo:
... mail: Email = dataclasses.field(default="<EMAIL>")
>>> ContactInfo.Schema().load({})
ContactInfo(mail='<EMAIL>')
>>> ContactInfo.Schema().load({"mail": "grumble grumble"})
Traceback (most recent call last):
...
marshmallow.exceptions.ValidationError: {'mail': ['Not a valid email address.']}
"""
def new_type(x: _U):
return x
new_type.__name__ = name
new_type.__supertype__ = typ # type: ignore
new_type._marshmallow_field = field # type: ignore
new_type._marshmallow_args = kwargs # type: ignore
return new_type
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 3.09375 | 3 |
electrum_trc/scripts/txradar.py | TheSin-/electrum-trc | 1 | 3613 | <reponame>TheSin-/electrum-trc
#!/usr/bin/env python3
import sys
import asyncio
from electrum_trc.network import filter_protocol, Network
from electrum_trc.util import create_and_start_event_loop, log_exceptions
try:
txid = sys.argv[1]
except:
print("usage: txradar txid")
sys.exit(1)
loop, stopping_fut, loop_thread = create_and_start_event_loop()
network = Network()
network.start()
@log_exceptions
async def f():
try:
peers = await network.get_peers()
peers = filter_protocol(peers, 's')
results = await network.send_multiple_requests(peers, 'blockchain.transaction.get', [txid])
r1, r2 = [], []
for k, v in results.items():
(r1 if not isinstance(v, Exception) else r2).append(k)
print(f"Received {len(results)} answers")
try: propagation = len(r1) * 100. / (len(r1) + len(r2))
except ZeroDivisionError: propagation = 0
print(f"Propagation rate: {propagation:.1f} percent")
finally:
stopping_fut.set_result(1)
asyncio.run_coroutine_threadsafe(f(), loop)
| 2.1875 | 2 |
jp.atcoder/dp/dp_g/24586988.py | kagemeka/atcoder-submissions | 1 | 3614 | import sys
import typing
import numpy as np
def solve(
n: int,
g: np.array,
) -> typing.NoReturn:
indeg = np.zeros(
n,
dtype=np.int64,
)
for v in g[:, 1]:
indeg[v] += 1
g = g[g[:, 0].argsort()]
i = np.searchsorted(
g[:, 0],
np.arange(n + 1)
)
q = [
v for v in range(n)
if not indeg[v]
]
dist = np.zeros(
n,
dtype=np.int64,
)
for u in q:
for j in range(
i[u], i[u + 1],
):
v = g[j, 1]
indeg[v] -= 1
dist[v] = max(
dist[v],
dist[u] + 1,
)
if indeg[v]: continue
q.append(v)
print(dist.max())
def main() -> typing.NoReturn:
n, m = map(
int, input().split(),
)
g = np.array(
sys.stdin.read().split(),
dtype=np.int64,
).reshape(m, 2) - 1
solve(n, g)
OJ = 'ONLINE_JUDGE'
if sys.argv[-1] == OJ:
from numba import i8, njit
from numba.pycc import CC
cc = CC('my_module')
fn = solve
signature = (i8, i8[:, :])
cc.export(
fn.__name__,
signature,
)(fn)
cc.compile()
exit(0)
from my_module import solve
main()
| 2.34375 | 2 |
starteMessung.py | jkerpe/TroubleBubble | 0 | 3615 | <filename>starteMessung.py
from datetime import datetime
from pypylon import pylon
import nimmAuf
import smbus2
import os
import argparse
import bestimmeVolumen
from threading import Thread
import time
programmstart = time.time()
# Argumente parsen (bei Aufruf im Terminal z.B. 'starteMessung.py -n 100' eingeben)
ap = argparse.ArgumentParser(description="""Skript zum Aufnehmen von Bildern der Teststrecke und der
Volumenbestimmung von Luftblasen""")
ap.add_argument("-n", "--number", default=400, type=int, help="Anzahl an Frames die aufgenommen werden sollen. Default: 400 Bilder")
ap.add_argument("-fr", "--framerate", default=100, type=int, help="Framerate in fps. Richtwerte: <Flow 3 ml/s:50 fps, 3-6ml/s:100 fps, >6ml/s:200 fps; Default: 100 fps")
args = vars(ap.parse_args())
# Argumente des Parsers extrahieren
numberOfImagesToGrab = args['number']
framerate = args['framerate']
if __name__ == '__main__':
startzeit = time.time()
#Test ob Kamera angeschlossen ist
devices = pylon.TlFactory.GetInstance().EnumerateDevices()
if len(devices) == 0:
print("Keine Kamera angeschlossen oder Kamera woanders geöffnet.")
return False
# Test ob Drucksensor angeschlossen ist
try:
bus = smbus2.SMBus(0)
bus.read_i2c_block_data(0x40, 0, 2) # 2 Bytes empfangen
except OSError:
print("Kein Drucksensor angeschlossen")
exit()
# Aus der aktuellen Zeit und den Parametern einen individuellen Ordnernamen generieren
dirname = f'{datetime.now().strftime("%Y-%m-%d-%H-%M-%S")}'
os.mkdir(dirname) # Ordner erstellen
print(f"Ordnername: {dirname}")
beginn = time.time()-programmstart
# Threads zum Aufnehmen und Verarbeiten starten
t_aufnahme = Thread(target=nimmAuf.starte, args=(dirname, numberOfImagesToGrab, framerate, startzeit))
t_tracke = Thread(target=bestimmeVolumen.tracke, args=(dirname, numberOfImagesToGrab))
t_aufnahme.start()
t_tracke.start()
t_aufnahme.join()
t_tracke.join()
| 2.625 | 3 |
application/services/decart.py | Sapfir0/web-premier-eye | 0 | 3616 | import os
import tempfile
def hasOnePointInside(bigRect, minRect): # хотя бы одна точка лежит внутри
minY, minX, maxY, maxX = bigRect
y1, x1, y2, x2 = minRect
a = (minY <= y1 <= maxY)
b = (minX <= x1 <= maxX)
c = (minY <= y2 <= maxY)
d = (minX <= x2 <= maxX)
return a or b or c or d
def isCompletelyInside(bigRect, minRect): # объект полностью внутри прямоугольника
y1, x1, y2, x2 = bigRect
minX = x1
minY = y1 # вроде верно
maxX = x2
maxY = y2
y1, x1, y2, x2 = minRect
a = (minY <= y1 <= maxY)
b = (minX <= x1 <= maxX)
c = (minY <= y2 <= maxY)
d = (minX <= x2 <= maxX)
return a and b and c and d # если тру, то объект полностью внутри большого прямоугольника
def isPartiallyInside(bigRect, minRect, innerPercent=0.5): # объект частично внутри прямоугольника
bigLUy, bigLUx, bigRDy, bigRDx = bigRect
minLUy, minLUx, minRDy, minRDx = minRect
fullSquare = (minLUy - minRDy) * (minRDx - minLUx) # не уверен что правильно
# Не уверен в ифах
if bigLUy < minLUy:
minLUy = bigLUy
if bigRDy < minRDy:
minRDy = bigRDy
if bigLUx > minLUx:
minLUx = bigLUx
if bigRDx > minRDx:
minRDx = bigRDx
inObjSquare = (minLUy - minRDy) * (minRDx - minLUx)
return inObjSquare / fullSquare >= innerPercent
def createGraphic(imagePath: str, searchRect: list, objectsListRect: list):
import matplotlib.pyplot as plt
from PIL import Image
import numpy as np
import matplotlib.patches as patches
im = np.array(Image.open(imagePath), dtype=np.uint8)
fig, ax = plt.subplots(1)
ax.imshow(im)
bigRect = Rectangle(searchRect)
minRects = [Rectangle(i) for i in objectsListRect]
rect = patches.Rectangle(*bigRect.getMTparam(), linewidth=1, edgecolor='g', facecolor='None')
ax.add_patch(rect)
for i in minRects:
rect = patches.Rectangle(*i.getMTparam(), linewidth=1, edgecolor='r', facecolor='none')
ax.add_patch(rect)
temp = tempfile.NamedTemporaryFile()
path = os.path.join(os.getcwd(), temp.name)
plt.savefig(path)
return os.path.split(temp.name + ".png")
class Rectangle:
LDx = 0
LDy = 0
RUx = 0
RUy = 0
def __init__(self, coordinates: list):
if len(coordinates) != 4:
raise ValueError("Нужно подавать координаты(х,у) двух противоложных вершин")
if coordinates[0] >= coordinates[2] or coordinates[1] >= coordinates[3]:
raise ValueError(
"Неверно заданы вершины, сначала подаются 2 координаты нижнего левого угла, потом верхнего правого")
self.LDx, self.LDy, self.RUx, self.RUy = coordinates
def getWidth(self):
return self.RUx - self.LDx
def getHeight(self):
return self.RUy - self.LDy
def getLUx(self):
return self.LDx
def getLUy(self):
return self.RUy
def getMTparam(self):
return ((self.getLUy(), self.getLUx()), # почему -? я не знаю
-self.getHeight(), self.getWidth()) # все абсолютно в другом порядке, чем должно быть? что ха дринся
def getCenterOfDown(self):
return [(self.LDx + self.RUx) / 2, self.LDy]
| 2.875 | 3 |
goopylib/objects/_BBox.py | BhavyeMathur/goopylib | 25 | 3617 | from goopylib.objects.GraphicsObject import GraphicsObject
from goopylib.styles import *
class BBox(GraphicsObject):
# Internal base class for objects represented by bounding box
# (opposite corners) Line segment is a degenerate case.
resizing_objects = []
def __init__(self, p1, p2, bounds=None, fill=None, outline=None, outline_width=None, cursor="arrow", layer=0,
tag=None):
self.p1 = p1
self.p2 = p2
# These make sure that the p2 is 'after' p1, ie the x & y value of p2 is greater than that of p1
if self.p1[0] > self.p2[0]: # Checking if p1's x value is greater than p2's. If so, then swap the values
self.p1[0], self.p2[0] = self.p2[0], self.p1[0]
if self.p1[1] > self.p2[1]: # Checking if p1's y value is greater than p2's. If so, then swap the values
self.p1[1], self.p2[1] = self.p2[1], self.p1[1]
self.anchor = [(self.p1[0] + self.p2[0]) // 2, (self.p1[1] + self.p2[1]) // 2]
GraphicsObject.__init__(self, options=(), cursor=cursor, layer=layer, bounds=bounds, tag=tag)
# abs(p2[0] - p1[0]) is not required because the p2 value is always greater than or equal to the p1 value
self.width = self.p2[0] - self.p1[0]
self.height = self.p2[1] - self.p1[1]
self.min_width = None
self.min_height = None
self.max_width = None
self.max_height = None
self.resizing_bounds = {}
self.is_resizing = {}
self.bounds_thickness = 0
if fill is None:
self.fill = STYLES["default"]["fill"]
elif isinstance(fill, Colour): # Checking if the option is a colour
self.fill = fill
else: # If not, raise an error
raise GraphicsError(f"\n\nGraphicsError: The Rectangle fill must be a Colour object , not {fill}")
if outline is None:
self.outline = STYLES["default"]["outline"]
elif isinstance(outline, Colour): # Checking if the option is a colour
self.outline = outline
else: # If not, raise an error
raise GraphicsError(f"\n\nGraphicsError: The rectangle outline must be a Colour object , not {outline}")
if outline_width is None:
self.outline_width = STYLES["default"]["width"]
elif isinstance(outline_width, int): # Checking if the option is an integer
self.outline_width = outline_width
else: # If not, raise an error
raise GraphicsError(f"\n\nGraphicsError: The rectangle outline width must be an integer, not {outline_width}")
def __repr__(self):
return "_BBox"
def _set_resizable(self, resizables, top_bounds=None, bottom_bounds=None, left_bounds=None, right_bounds=None,
thickness=10):
"""Override in subclasses"""
pass
def _move(self, dx, dy):
self.p1[0] += dx
self.p1[1] += dy
self.p2[0] += dx
self.p2[1] += dy
self.anchor[0] += dx
self.anchor[1] += dy
def is_clicked(self, mouse_pos):
if self.bounds is None:
if mouse_pos is None:
return False
else:
if (self.p1[0] < mouse_pos[0] < self.p2[0] or self.p1[0] > mouse_pos[0] > self.p2[0]) and \
(self.p1[1] < mouse_pos[1] < self.p2[1] or self.p1[1] > mouse_pos[1] > self.p2[1]):
return True
else:
return False
else:
return self.bounds.is_clicked(mouse_pos)
def get_p1(self):
return self.p1.copy()
def get_p2(self):
return self.p2.copy()
def get_top_right(self):
return self.p1.copy()
def get_top_left(self):
return [self.p2[0], self.p1[1]]
def get_bottom_left(self):
return [self.p1[0], self.p2[1]]
def get_bottom_right(self):
return self.p2.copy()
def get_top(self):
return [(self.p2[0] + self.p1[0]) / 2, self.p1[1]]
def get_bottom(self):
return [(self.p2[0] + self.p1[0]) / 2, self.p2[1]]
def get_left(self):
return [self.p1[0], (self.p1[1] + self.p2[1]) / 2]
def get_right(self):
return [self.p2[0], (self.p1[1] + self.p2[1]) / 2]
def get_width(self):
return self.width
def get_height(self):
return self.height
def get_fill(self):
return self.fill
def get_outline(self):
return self.outline
def get_outline_width(self):
return self.outline_width
def get_anchor(self):
return self.anchor
def set_dimensions(self, width, height, horizontal_align="center", vertical_align="center"):
self.set_width(width, horizontal_align)
self.set_height(height, vertical_align)
return self
def set_resizable(self, top=False, left=False, bottom=False, right=False, min_width=40, min_height=40,
bounds_width=10, top_bounds=None, bottom_bounds=None, left_bounds=None, right_bounds=None):
if min_width < 1 or min_height < 1:
raise GraphicsError(f"\n\nGraphicsError: Minimum height and width of resizable object must be greater than "
f"or equal to 1. Right now, min_width={min_width} & min_height={min_height}")
self.min_width = min_width
self.min_height = min_height
self.is_resizing = {"top": top, "left": left, "bottom": bottom, "right": right}
self._set_resizable([top, bottom, left, right], top_bounds=top_bounds, bottom_bounds=bottom_bounds,
left_bounds=left_bounds, right_bounds=right_bounds, thickness=bounds_width)
if top is False and bottom is False and left is False and right is False:
if self in GraphicsObject.resizing_objects:
GraphicsObject.resizing_objects.remove(self)
elif self not in GraphicsObject.resizing_objects:
GraphicsObject.resizing_objects.add(self)
self.bounds_thickness = bounds_width
return self
def set_coords(self, p1, p2):
self.p1 = p1.copy()
self.p2 = p2.copy()
# These make sure that the p2 is 'after' p1, ie the x & y value of p2 is greater than that of p1
if self.p1[0] > self.p2[0]: # Checking if p1's x value is greater than p2's. If so, then swap the values
self.p1[0], self.p2[0] = self.p2[0], self.p1[0]
if self.p1[1] > self.p2[1]: # Checking if p1's y value is greater than p2's. If so, then swap the values
self.p1[1], self.p2[1] = self.p2[1], self.p1[1]
# abs(p2[0] - p1[0]) is not required because the p2 value is always greater than or equal to the p1 value
self.width = self.p2[0] - self.p1[0]
self.height = self.p2[1] - self.p1[1]
width_scale = (p2[0] - p1[0]) / self.width
height_scale = (p2[1] - p1[1]) / self.height
# abs(p2[0] - p1[0]) is not required because the p2 value is always greater than or equal to the p1 value
self.width = p2[0] - p1[0]
self.height = p2[1] - p1[1]
self.anchor = [(self.p1[0] + self.p2[0]) // 2, (self.p1[1] + self.p2[1]) // 2]
self._update_layer()
return self
def set_width(self, width, center="center"):
if center not in {"center", "right", "left"}:
raise GraphicsError(
"\n\nThe center argument for resizing the object (set_outline_width) needs to be one of "
f'{["center", "right", "left"]}')
if center == "left":
self.set_coords(self.p1, self.p2.add_x(width - self.width))
elif center == "right":
self.set_coords(self.p1.add_x(-(width - self.width)), self.p2)
else:
self.set_coords(self.p1.add_x(-(width / 2 - self.width)), self.p2.add_x(width / 2 - self.width))
return self
def set_height(self, height, center="center"):
if center not in {"center", "top", "bottom"}:
raise GraphicsError(
"\n\nThe center argument for resizing the object (set_height) needs to be one of "
f'{["center", "top", "bottom"]}')
if center == "top":
self.set_coords(self.p1, self.p2.add_y(height - self.height))
elif center == "bottom":
self.set_coords(self.p1.add_y(-(height - self.height)), self.p2)
else:
self.set_coords(self.p1.add_y(-(height / 2 - self.height)), self.p2.add_y(height / 2 - self.height))
return self
def set_fill(self, fill):
if fill is None:
self.fill = STYLES["default"]["fill"]
elif isinstance(fill, Colour): # Checking if the option is a colour
self.fill = fill
else: # If not, raise an error
raise GraphicsError(f"\n\nGraphicsError: The Rectangle fill must be a Colour object , not {fill}")
self._update_layer()
return self
def set_outline(self, outline):
if outline is None:
self.outline = STYLES["default"]["outline"]
elif isinstance(outline, Colour): # Checking if the option is a colour
self.outline = outline
else: # If not, raise an error
raise GraphicsError(f"\n\nGraphicsError: The rectangle outline must be a Colour object , not {outline}")
self._update_layer()
return self
def set_outline_width(self, outline_width):
if outline_width is None:
self.outline_width = STYLES["default"]["width"]
elif isinstance(outline_width, int): # Checking if the option is an integer
self.outline_width = outline_width
else: # If not, raise an error
raise GraphicsError(
f"\n\nGraphicsError: The rectangle outline width must be an integer, not {outline_width}")
self._update_layer()
return self
| 2.8125 | 3 |
Graph/DFS&BFS.py | Mayner0220/Programmers | 1 | 3618 | # https://www.acmicpc.net/problem/1260
n, m, v = map(int, input().split())
graph = [[0] * (n+1) for _ in range(n+1)]
visit = [False] * (n+1)
for _ in range(m):
R, C = map(int, input().split())
graph[R][C] = 1
graph[C][R] = 1
def dfs(v):
visit[v] = True
print(v, end=" ")
for i in range(1, n+1):
if not visit[i] and graph[v][i]==1:
dfs(i)
def bfs(v):
queue = [v]
visit[v] = False
while queue:
v = queue.pop(0)
print(v, end=" ")
for i in range(1, n+1):
if visit[i] and graph[v][i]==1:
queue.append(i)
visit[i] = False
dfs(v)
print()
bfs(v) | 3.171875 | 3 |
coding_intereview/1576. Replace All ?'s to Avoid Consecutive Repeating Characters.py | Jahidul007/Python-Bootcamp | 2 | 3619 | class Solution:
def modifyString(self, s: str) -> str:
s = list(s)
for i in range(len(s)):
if s[i] == "?":
for c in "abc":
if (i == 0 or s[i-1] != c) and (i+1 == len(s) or s[i+1] != c):
s[i] = c
break
return "".join(s)
| 3.4375 | 3 |
pysteps/tests/helpers.py | Fangyh09/pysteps | 6 | 3620 | <reponame>Fangyh09/pysteps
"""
Testing helper functions
=======================
Collection of helper functions for the testing suite.
"""
from datetime import datetime
import numpy as np
import pytest
import pysteps as stp
from pysteps import io, rcparams
def get_precipitation_fields(num_prev_files=0):
"""Get a precipitation field from the archive to be used as reference."""
# Selected case
date = datetime.strptime("201505151630", "%Y%m%d%H%M")
data_source = rcparams.data_sources["mch"]
root_path = data_source["root_path"]
path_fmt = data_source["path_fmt"]
fn_pattern = data_source["fn_pattern"]
fn_ext = data_source["fn_ext"]
importer_name = data_source["importer"]
importer_kwargs = data_source["importer_kwargs"]
# Find the input files from the archive
fns = io.archive.find_by_date(date, root_path, path_fmt, fn_pattern, fn_ext,
timestep=5, num_prev_files=num_prev_files)
# Read the radar composites
importer = io.get_method(importer_name, "importer")
reference_field, quality, metadata = io.read_timeseries(fns, importer,
**importer_kwargs)
del quality # Not used
if num_prev_files == 0:
reference_field = np.squeeze(reference_field) # Remove time dimension
# Convert to mm/h
reference_field, metadata = stp.utils.to_rainrate(reference_field, metadata)
# Mask invalid values
reference_field = np.ma.masked_invalid(reference_field)
# Log-transform the data [dBR]
reference_field, metadata = stp.utils.dB_transform(reference_field,
metadata,
threshold=0.1,
zerovalue=-15.0)
return reference_field
def smart_assert(actual_value, expected, tolerance=None):
"""
Assert by equality for non-numeric values, or by approximation otherwise.
If the precision keyword is None, assert by equality.
When the precision is not None, assert that two numeric values
(or two sets of numbers) are equal to each other within the tolerance.
"""
if tolerance is None:
assert actual_value == expected
else:
# Compare numbers up to a certain precision
assert actual_value == pytest.approx(expected, 1e-6)
| 2.25 | 2 |
modules/courses/courses.py | ehiller/mobilecsp-v18 | 0 | 3621 | <reponame>ehiller/mobilecsp-v18<filename>modules/courses/courses.py
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Courses module."""
__author__ = '<NAME> (<EMAIL>)'
from common import resource
from controllers import assessments
from controllers import lessons
from controllers import utils
from models import content
from models import resources_display
from models import custom_modules
from models import roles
from tools import verify
All_LOCALES_PERMISSION = 'can_pick_all_locales'
All_LOCALES_DESCRIPTION = 'Can pick all locales, including unavailable ones.'
SEE_DRAFTS_PERMISSION = 'can_see_draft_content'
SEE_DRAFTS_DESCRIPTION = 'Can see lessons and assessments with draft status.'
custom_module = None
def can_pick_all_locales(app_context):
return roles.Roles.is_user_allowed(
app_context, custom_module, All_LOCALES_PERMISSION)
def can_see_drafts(app_context):
return roles.Roles.is_user_allowed(
app_context, custom_module, SEE_DRAFTS_PERMISSION)
def register_module():
"""Registers this module in the registry."""
def on_module_enabled():
roles.Roles.register_permissions(custom_module, permissions_callback)
resource.Registry.register(resources_display.ResourceCourseSettings)
resource.Registry.register(resources_display.ResourceUnit)
resource.Registry.register(resources_display.ResourceAssessment)
resource.Registry.register(resources_display.ResourceLink)
resource.Registry.register(resources_display.ResourceLesson)
resource.Registry.register(utils.ResourceHtmlHook)
def permissions_callback(unused_app_context):
return [
roles.Permission(All_LOCALES_PERMISSION, All_LOCALES_DESCRIPTION),
roles.Permission(SEE_DRAFTS_PERMISSION, SEE_DRAFTS_DESCRIPTION)
]
# provide parser to verify
verify.parse_content = content.parse_string_in_scope
# setup routes
courses_routes = [
('/', lessons.CourseHandler),
('/activity', lessons.UnitHandler),
('/answer', assessments.AnswerHandler),
('/assessment', lessons.AssessmentHandler),
('/course', lessons.CourseHandler),
('/forum', utils.ForumHandler),
('/preview', utils.PreviewHandler),
('/register', utils.RegisterHandler),
('/resources', utils.ResourcesHandler),
('/rest/locale', utils.StudentLocaleRESTHandler),
('/review', lessons.ReviewHandler),
('/reviewdashboard', lessons.ReviewDashboardHandler),
('/student/editstudent', utils.StudentEditStudentHandler),
('/student/settracks', utils.StudentSetTracksHandler),
('/student/home', utils.StudentProfileHandler),
('/student/unenroll', utils.StudentUnenrollHandler),
('/unit', lessons.UnitHandler)]
global custom_module # pylint: disable=global-statement
custom_module = custom_modules.Module(
'Course',
'A set of pages for delivering an online course.',
[], courses_routes,
notify_module_enabled=on_module_enabled)
return custom_module
| 1.78125 | 2 |
packages/merlin/protocols/PrefixLayout.py | pyre/pyre | 25 | 3622 | <gh_stars>10-100
# -*- coding: utf-8 -*-
#
# <NAME> <<EMAIL>>
# (c) 1998-2021 all rights reserved
# support
import merlin
# the manager of intermediate and final build products
class PrefixLayout(merlin.protocol, family="merlin.layouts.prefix"):
"""
The manager of the all build products, both final and intermediate disposables
"""
# required state
bin = merlin.properties.path()
bin.doc = "the location of executables"
config = merlin.properties.path()
config.doc = "global package configuration files"
doc = merlin.properties.path()
doc.doc = "package documentation"
etc = merlin.properties.path()
etc.doc = "host specific files"
include = merlin.properties.path()
include.doc = "library header files"
lib = merlin.properties.path()
lib.doc = "libraries"
libexec = merlin.properties.path()
libexec.doc = "binaries that are meant to be used by other packages"
share = merlin.properties.path()
share.doc = "architecture independent package files"
var = merlin.properties.path()
var.doc = "runtime files"
# framework hooks
@classmethod
def pyre_default(cls, **kwds):
"""
Specify the default implementation
"""
# choose the default implementer
return merlin.components.fhs
# end of file
| 1.921875 | 2 |
test/IECoreMaya/ImageConverterTest.py | bradleyhenke/cortex | 386 | 3623 | <reponame>bradleyhenke/cortex
##########################################################################
#
# Copyright (c) 2011, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import maya.cmds
import IECore
import IECoreImage
import IECoreMaya
class ImageConverterTest( IECoreMaya.TestCase ) :
def test( self ) :
imageA = IECore.Reader.create( "test/IECoreImage/data/exr/colorBarsWithAlpha.exr" ).read()
toMaya = IECoreMaya.ToMayaImageConverter( imageA )
mImage = maya.OpenMaya.MImage()
toMaya.convert( mImage )
fromMaya = IECoreMaya.FromMayaImageConverter( mImage )
imageB = fromMaya.convert()
self.assertFalse(
IECoreImage.ImageDiffOp()( imageA=imageA, imageB=imageB, maxError=1.0/256 ).value
)
if __name__ == "__main__":
IECoreMaya.TestProgram()
| 0.96875 | 1 |
tests/core_ptl/check_for_ranks.py | PatrykNeubauer/NeMo | 2 | 3624 | <filename>tests/core_ptl/check_for_ranks.py
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
import torch
from omegaconf import OmegaConf
from pytorch_lightning import Trainer
from pytorch_lightning.utilities.distributed import rank_zero_only
from nemo.core import ModelPT
from nemo.utils import logging
from nemo.utils.exp_manager import ExpManagerConfig, exp_manager
class OnesDataset(torch.utils.data.Dataset):
def __init__(self, dataset_len):
super().__init__()
self.__dataset_len = dataset_len
def __getitem__(self, *args):
return torch.ones(2)
def __len__(self):
return self.__dataset_len
class ExampleModel(ModelPT):
def __init__(self, *args, **kwargs):
cfg = OmegaConf.structured({})
super().__init__(cfg, trainer=kwargs.get('trainer', None))
# dummy parameter in order to allow DDP to execute
self.l1 = torch.nn.modules.Linear(in_features=2, out_features=1)
def train_dataloader(self):
return None
def val_dataloader(self):
return None
def predict_dataloader(self):
dataset = OnesDataset(2)
return torch.utils.data.DataLoader(dataset, batch_size=2)
def forward(self, batch):
return batch.mean()
def validation_step(self, batch, batch_idx):
return self(batch)
def training_step(self, batch, batch_idx):
return self(batch)
def list_available_models(self):
pass
def setup_training_data(self):
pass
def setup_validation_data(self):
pass
def validation_epoch_end(self, loss):
self.log("val_loss", torch.stack(loss).mean())
def instantiate_multinode_ddp_if_possible():
num_gpus = torch.cuda.device_count()
trainer = Trainer(gpus=num_gpus, accelerator='ddp', logger=None, checkpoint_callback=None)
exp_manager_cfg = ExpManagerConfig(exp_dir='./ddp_check/', use_datetime_version=False, version="")
exp_manager(trainer, cfg=OmegaConf.structured(exp_manager_cfg))
return trainer
def setup_model(trainer: Trainer):
model = ExampleModel(trainer=trainer)
logging.info(f"M.Global Rank:{model.global_rank}")
logging.info(f"M.Local Rank:{model.local_rank}")
logging.info(f"M.World Size:{model.trainer.world_size}")
trainer.predict(model)
return model
def get_rank_info(texts: list, rank_key: str) -> int:
for line in texts:
if rank_key in line:
rank_value = line.split(":")[-1]
rank_value = int(rank_value)
return rank_value
print("Could not find the correct rank key !")
exit(1)
@rank_zero_only
def check_model_ranks(model: ExampleModel):
basedir = os.path.join('./ddp_check/', 'default', 'version_0')
file_template = "nemo_log_globalrank-{rank}_localrank-{rank}.txt"
world_size = torch.cuda.device_count()
for rank in range(world_size):
filename = file_template.format(rank=rank)
filepath = os.path.join(basedir, filename)
with open(filepath, 'r') as f:
texts = f.readlines()
texts = [t.replace("\n", "") for t in texts]
log_global_rank = get_rank_info(texts, rank_key='M.Global Rank')
log_world_size = get_rank_info(texts, rank_key='M.World Size')
if log_global_rank != rank:
print("Logged global rank is not equal to trainer.global_rank !")
exit(1)
if log_world_size != world_size:
print("Logged world size if not equal to trainer.world_size !")
exit(1)
@rank_zero_only
def cleanup():
if os.path.exists('./ddp_check'):
shutil.rmtree('./ddp_check', ignore_errors=True)
def run_checks():
cleanup()
trainer = instantiate_multinode_ddp_if_possible()
model = setup_model(trainer)
check_model_ranks(model)
print("DDP checks passed !")
cleanup()
if __name__ == '__main__':
run_checks()
| 2.15625 | 2 |
helpers/json_manager.py | Lofi-Lemonade/Python-Discord-Bot-Template | 0 | 3625 | <gh_stars>0
""""
Copyright © Krypton 2022 - https://github.com/kkrypt0nn (https://krypton.ninja)
Description:
This is a template to create your own discord bot in python.
Version: 4.1
"""
import json
def add_user_to_blacklist(user_id: int) -> None:
"""
This function will add a user based on its ID in the blacklist.json file.
:param user_id: The ID of the user that should be added into the blacklist.json file.
"""
with open("blacklist.json", "r+") as file:
file_data = json.load(file)
file_data["ids"].append(user_id)
with open("blacklist.json", "w") as file:
file.seek(0)
json.dump(file_data, file, indent=4)
def remove_user_from_blacklist(user_id: int) -> None:
"""
This function will remove a user based on its ID from the blacklist.json file.
:param user_id: The ID of the user that should be removed from the blacklist.json file.
"""
with open("blacklist.json", "r") as file:
file_data = json.load(file)
file_data["ids"].remove(user_id)
with open("blacklist.json", "w") as file:
file.seek(0)
json.dump(file_data, file, indent=4)
| 3.0625 | 3 |
tests/test_common.py | ColinKennedy/ways | 2 | 3626 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''Make sure that generic functions work exactly as we expect.'''
# IMPORT STANDARD LIBRARIES
import unittest
# IMPORT WAYS LIBRARIES
from ways import common
class ParseTestCase(unittest.TestCase):
'''Test generic parsing-related functions.'''
def test_working_0001(self):
'''Test that correct input for expand_string works as expected.'''
pattern = '/jobs/{JOB}/some_kind/{THING}/real_folders'
text = '/jobs/some_job_here/some_kind/of/real_folders'
expected_output = {'JOB': 'some_job_here', 'THING': 'of'}
self.assertEqual(expected_output, common.expand_string(pattern, text))
def test_working_0002(self):
'''Test that correct input for expand_string works as expected.'''
shot = 'NAME_010'
format_string = '{SHOT}_{ID}'
expected_output = {'SHOT': 'NAME', 'ID': '010'}
self.assertEqual(expected_output, common.expand_string(format_string, shot))
def test_expand_string_failure_0001(self):
'''Force expand_string fails to prevent a bad match from occurring.'''
text = '/jobs/some_job/some_kind/of/real_folders'
pattern = '/jobs/{JOB}/some_kind/of/real_folders/inner'
self.assertFalse(common.expand_string(pattern, text))
def test_expand_string_failure_0002(self):
'''Force expand_string fails to prevent a bad match from occurring.'''
text = '/jobs/some_job/some_kind/of/real_folders'
pattern = '/jobs/{JOB}/some_kind/{SHOTNAME}/real_folders/inner'
self.assertFalse(common.expand_string(pattern, text))
| 2.96875 | 3 |
setup.py | glibin/natasha | 1 | 3627 | <reponame>glibin/natasha<gh_stars>1-10
from setuptools import setup, find_packages
setup(
name='natasha',
version='0.2.0',
description='Named-entity recognition for russian language',
url='https://github.com/bureaucratic-labs/natasha',
author='<NAME>',
author_email='<EMAIL>',
license='MIT',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
keywords='natural language processing, russian morphology, named entity recognition, tomita',
packages=find_packages(),
install_requires=[
'yargy==0.3.0'
],
extras_require={
'web': [
'ujson',
'aiohttp',
],
},
)
| 1.25 | 1 |
GeneratePassword/generate_password_v2.py | OneScreenfulOfPython/screenfuls | 2 | 3628 | import os, sys
import random
import string
try:
# Make Python2 work like Python3
input = raw_input
except NameError:
# On Python3; already using input
pass
letters = string.ascii_letters
numbers = string.digits
punctuation = string.punctuation
def generate(password_length, at_least_one_letter, at_least_one_number, at_least_one_punctuation):
"""Generate a password by include enough random
characters to meet the password length restriction.
In addition, the user can specify that at least one
of the each of the classes of character be used.
"""
#
# Any combination of characters is valid
#
valid_characters = ""
if at_least_one_letter:
valid_characters += letters
if at_least_one_number:
valid_characters += numbers
if at_least_one_punctuation:
valid_characters += punctuation
#
# Start with a blank password and then go round enough
# times to make a password of the required length.
#
password = ""
for i in range(password_length):
#
# Each time around, ensure that one of each of the selected
# groups is chosen, and then just choose randomly from all
# groups.
#
if at_least_one_letter:
character = random.choice(letters)
at_least_one_letter = False
elif at_least_one_number:
character = random.choice(numbers)
at_least_one_number = False
elif at_least_one_punctuation:
character = random.choice(punctuation)
at_least_one_punctuation = False
else:
character = random.choice(valid_characters)
password += character
#
# Finally, shuffle the password so we don't always get a
# letter at the beginning, with a number after and some
# punctuation.
#
characters = list(password)
#
# random.shuffle shuffles a list *in place*
#
random.shuffle(characters)
#
# X.join(...) means: return all the strings in (...) joined by X
# ", ".join(['Eggs', 'Bacon', 'Beans']) => "Eggs, Bacon, Beans"
# But if you want to generate *real* .csv files, use the csv module
# because there are lots of corner-cases.
#
password = "".join(characters)
return password
if __name__ == '__main__':
password_length = int(input("How many letters? "))
at_least_one_letter = "Y" == (input("At least one letter [Y/n]? ").upper() or "Y")
at_least_one_number = "Y" == (input("At least one number [Y/n]? ").upper() or "Y")
at_least_one_punctuation = "Y" == (input("At least one punctuation [Y/n]? ").upper() or "Y")
password = generate(password_length, at_least_one_letter, at_least_one_number, at_least_one_punctuation)
print("Your password is: {}".format(password))
| 4.15625 | 4 |
bot/jobs/thorchain_node_jobs.py | block42-blockchain-company/thornode-telegram-bot | 15 | 3629 | <reponame>block42-blockchain-company/thornode-telegram-bot
from constants.messages import get_node_health_warning_message, get_node_healthy_again_message
from handlers.chat_helpers import try_message_with_home_menu, try_message_to_all_users
from packaging import version
from service.utils import *
def check_thornodes(context):
chat_id = context.job.context['chat_id']
chat_data = context.job.context['chat_data']
inactive_nodes = []
for node_address, local_node in chat_data.get('nodes', {}).items():
try:
remote_node = get_thornode_object_or_none(address=node_address)
except HTTPError as e:
logger.exception(e)
continue
if remote_node is None:
text = 'THORNode ' + local_node['alias'] + ' is not active anymore! 💀' + '\n' + \
'Address: ' + node_address + '\n\n' + \
'Please enter another THORNode address.'
inactive_nodes.append(node_address)
try_message_with_home_menu(context=context,
chat_id=chat_id,
text=text)
continue
is_not_blocked = float(local_node['last_notification_timestamp']) < \
datetime.timestamp(
datetime.now() - timedelta(seconds=local_node['notification_timeout_in_seconds']))
if is_not_blocked:
message = build_notification_message_for_active_node(local_node, remote_node, context)
if message:
# Update data
local_node['status'] = remote_node['status']
local_node['bond'] = remote_node['bond']
local_node['slash_points'] = remote_node['slash_points']
local_node['ip_address'] = remote_node['ip_address']
local_node['last_notification_timestamp'] = datetime.timestamp(datetime.now())
local_node['notification_timeout_in_seconds'] *= NOTIFICATION_TIMEOUT_MULTIPLIER
try_message_with_home_menu(context=context,
chat_id=chat_id,
text=message)
else:
local_node['notification_timeout_in_seconds'] = INITIAL_NOTIFICATION_TIMEOUT
if local_node['status'].upper() in MONITORED_STATUSES and is_thornode_healthy(context, node_address):
check_thorchain_block_height(context, node_address=node_address)
check_thorchain_catch_up_status(context, node_address=node_address)
check_thorchain_midgard_api(context, node_address=node_address)
for node_address in inactive_nodes:
del chat_data['nodes'][node_address]
def build_notification_message_for_active_node(local_node, remote_node, context) -> [str, None]:
changed_fields = [
field for field in ['status', 'bond', 'slash_points']
if local_node[field] != remote_node[field]
]
threshold = get_slash_points_threshold(context)
slash_point_change = abs(int(local_node['slash_points']) - int(remote_node['slash_points']))
if (len(changed_fields) <= 1) and ('slash_points' in changed_fields) and (slash_point_change <= threshold):
return None
if len(changed_fields) > 0:
text = f"THORNode: {local_node['alias']}\n" \
f"Address: {local_node['node_address']}\n" \
f"Status: {local_node['status'].capitalize()}"
if 'status' in changed_fields:
text += f' ➡️ {remote_node["status"].capitalize()}'
text += f"\nBond: {tor_to_rune(int(local_node['bond']))}"
if 'bond' in changed_fields:
text += f" ➡️ {tor_to_rune(int(remote_node['bond']))}"
text += '\nSlash Points: ' + '{:,}'.format(int(local_node['slash_points']))
if 'slash_points' in changed_fields:
text += ' ➡️ ' + '{:,}'.format(int(remote_node['slash_points']))
return text
else:
return None
def check_versions_status(context):
chat_data = context.job.context['chat_data']
try:
node_accounts = get_node_accounts()
except Exception as e:
logger.exception(e)
logger.error("I couldn't get the node accounts while checking version status.")
return
highest_version = max(map(lambda n: n['version'], node_accounts),
key=lambda v: version.parse(v))
last_newest_version = chat_data.get('newest_software_version', None)
if last_newest_version is None or version.parse(
highest_version) > version.parse(last_newest_version):
chat_data['newest_software_version'] = highest_version
for node in chat_data.get('nodes', {}).values():
if version.parse(node['version']) < version.parse(highest_version):
message = f"Consider updating the software on your node: *{node['alias']}* ‼️\n" \
f"Your software version is *{node['version']}* " \
f"but one of the nodes already runs on *{highest_version}*"
try_message_with_home_menu(
context,
chat_id=context.job.context['chat_id'],
text=message)
def check_churning(context):
try:
validators = get_node_accounts()
except Exception as e:
logger.exception(e)
logger.error("I couldn't get the node accounts while checking if churning occurred.")
return
if 'node_statuses' not in context.bot_data:
context.bot_data['node_statuses'] = {}
for validator in validators:
context.bot_data['node_statuses'][
validator['node_address']] = validator['status']
return
local_node_statuses = context.bot_data['node_statuses']
churned_in = []
churned_out = []
highest_churn_status_since = 0
for validator in validators:
if did_churn_happen(validator, local_node_statuses, highest_churn_status_since):
highest_churn_status_since = int(validator['status_since'])
for validator in validators:
remote_status = validator['status']
local_status = local_node_statuses[
validator['node_address']] if validator[
'node_address'] in local_node_statuses else "unknown"
if remote_status != local_status:
if 'active' == remote_status:
churned_in.append({
"address": validator['node_address'],
"bond": validator['bond']
})
elif 'active' == local_status:
churned_out.append({
"address": validator['node_address'],
"bond": validator['bond']
})
if len(churned_in) or len(churned_out):
text = "🔄 CHURN SUMMARY\n" \
"THORChain has successfully churned:\n\n"
text += "Nodes Added:\n" if len(churned_in) else ""
for node in churned_in:
text += f"*{node['address']}*\nBond: *{tor_to_rune(node['bond'])}*\n"
text += "\nNodes Removed:\n" if len(churned_out) else ""
for node in churned_out:
text += f"*{node['address']}*\nBond: *{tor_to_rune(node['bond'])}*\n"
text += "\nSystem:\n"
try:
network = get_network_data()
text += f"📡 Network Security: *{network_security_ratio_to_string(get_network_security_ratio(network))}*\n\n" \
f"💚 Total Active Bond: *{tor_to_rune(network['bondMetrics']['totalActiveBond'])}* (total)\n\n" \
"⚖️ Bonded/Staked Ratio: *" + '{:.2f}'.format(
int(get_network_security_ratio(network) * 100)) + " %*\n\n" \
"↩️ Bonding ROI: *" + '{:.2f}'.format(
float(network['bondingAPY']) * 100) + " %* APY\n\n" \
"↩️ Liquidity ROI: *" + '{:.2f}'.format(
float(network['liquidityAPY']) * 100) + " %* APY"
context.bot_data.setdefault("vault_addresses", {})
current_chains = get_pool_addresses_from_any_node()
for chain in current_chains:
if chain['chain'] in context.bot_data['vault_addresses']:
if chain['address'] != context.bot_data['vault_addresses'][chain['chain']]:
text += f"\n\n🔐 Vault Addresses:" if "Vault Addresses" not in text else ""
text += f"\n*{chain['chain']}*: \n" \
f"Old Vault address: {context.bot_data['vault_addresses'][chain['chain']]}\n"\
f"⬇️\n" \
f"New Vault address: {chain['address']}\n"
else:
text += "\n\n⚠️ 🚨 CHURNING BUT THE VAULT ADDRESSES DID NOT CHANGE 🚨\n"
context.bot_data['vault_addresses'][chain['chain']] = chain['address']
except Exception as e:
logger.exception(e)
try_message_to_all_users(context, text=text)
for validator in validators:
context.bot_data['node_statuses'][
validator['node_address']] = validator['status']
def did_churn_happen(validator, local_node_statuses, highest_churn_status_since) -> bool:
remote_status = validator['status']
local_status = local_node_statuses[validator['node_address']] if validator[
'node_address'] in local_node_statuses else "unknown"
if int(validator['status_since']) > highest_churn_status_since and \
((local_status == 'ready' and remote_status == 'active') or (
local_status == 'active' and remote_status == 'standby')):
return True
return False
def is_thornode_healthy(context, node_address) -> bool:
chat_id = context.job.context['chat_id']
node_data = context.job.context['chat_data']['nodes'][node_address]
# If not initialized assuming node was healhty.
if "healthy" not in context.job.context['chat_data']['nodes'][node_address]:
context.job.context['chat_data']['nodes'][node_address]["healthy"] = True
was_healthy = node_data["healthy"]
try:
# Check whether node answers. If it doesn't we get an Exception.
get_latest_block_height(node_data['ip_address'])
if not was_healthy:
try_message_with_home_menu(context=context, chat_id=chat_id, text=get_node_healthy_again_message(node_data))
context.job.context['chat_data']['nodes'][node_address]["healthy"] = True
return True
except (Timeout, ConnectionError, BadStatusException, Exception):
if was_healthy:
try_message_with_home_menu(context=context, chat_id=chat_id, text=get_node_health_warning_message(node_data))
context.job.context['chat_data']['nodes'][node_address]["healthy"] = False
return False
def check_thorchain_block_height(context, node_address):
chat_id = context.job.context['chat_id']
node_data = context.job.context['chat_data']['nodes'][node_address]
try:
block_height = get_latest_block_height(node_data['ip_address'])
except (Timeout, ConnectionError):
logger.warning(f"Timeout or Connection error with {node_data['ip_address']}")
return
is_stuck = block_height <= node_data.setdefault('block_height', 0)
block_height_stuck_count = node_data.setdefault("block_height_stuck_count", 0)
if is_stuck:
block_height_stuck_count += 1
if block_height_stuck_count == 1:
text = 'Block height is not increasing anymore! 💀' + '\n' + \
'IP: ' + node_data['ip_address'] + '\n' + \
'THORNode: ' + node_data['alias'] + '\n' + \
'Node address: ' + node_address + '\n' + \
'Block height stuck at: ' + block_height + '\n\n' + \
'Please check your Thornode immediately!'
try_message_with_home_menu(context=context, chat_id=chat_id, text=text)
else:
if block_height_stuck_count >= 1:
text = f"Block height is increasing again! 👌\n" + \
f"IP: {node_data['ip_address']}\n" + \
f"THORNode: {node_data['alias']}\n" + \
f"Node address: {node_address}\n" + \
f"Block height now at: {block_height}\n"
try_message_with_home_menu(context=context, chat_id=chat_id, text=text)
block_height_stuck_count = 0
node_data['block_height'] = block_height
node_data["block_height_stuck_count"] = block_height_stuck_count
def check_solvency_job(context):
message = check_solvency(context)
if message:
try_message_to_all_users(context, text=message)
def check_solvency(context) -> [str, None]:
try:
asgard_solvency = asgard_solvency_check()
yggdrasil_solvency = yggdrasil_solvency_check()
except (Timeout, ConnectionError):
logger.warning(f"Timeout or Connection error while querying Asgard and Yggdrasil.")
return None
except Exception as e:
logger.exception(e)
return None
is_solvent = asgard_solvency['is_solvent'] and yggdrasil_solvency['is_solvent']
insolvency_count = context.bot_data.setdefault("insolvency_count", 0)
message = None
if not is_solvent:
insolvency_count += 1
if insolvency_count == MISSING_FUNDS_THRESHOLD:
message = 'THORChain is *missing funds*! 💀\n\n'
message += get_insolvent_balances_message(asgard_solvency, yggdrasil_solvency)
else:
if insolvency_count >= MISSING_FUNDS_THRESHOLD:
message = 'THORChain is *100% solvent* again! 👌\n'
insolvency_count = 0
context.bot_data["insolvency_count"] = insolvency_count
return message
def check_thorchain_catch_up_status(context, node_address):
"""
Check if node is some blocks behind with catch up status
"""
chat_id = context.job.context['chat_id']
node_data = context.job.context['chat_data']['nodes'][node_address]
if 'is_catching_up' not in node_data:
node_data['is_catching_up'] = False
try:
is_currently_catching_up = is_thorchain_catching_up(
node_data['ip_address'])
except (Timeout, ConnectionError):
logger.warning(f"Timeout or Connection error with {node_data['ip_address']}")
return
if node_data['is_catching_up'] != is_currently_catching_up:
try:
block_height = get_latest_block_height(node_data['ip_address'])
except (Timeout, ConnectionError):
logger.warning(f"Timeout or Connection error with {node_data['ip_address']}")
block_height = "currently unavailable"
if is_currently_catching_up:
node_data['is_catching_up'] = True
text = 'The Node is behind the latest block height and catching up! 💀 ' + '\n' + \
'IP: ' + node_data['ip_address'] + '\n' + \
'THORNode: ' + node_data['alias'] + '\n' + \
'Node address: ' + node_address + '\n' + \
'Current block height: ' + block_height + '\n\n' + \
'Please check your Thornode immediately!'
else:
node_data['is_catching_up'] = False
text = 'The node caught up to the latest block height again! 👌' + '\n' + \
'IP: ' + node_data['ip_address'] + '\n' + \
'THORNode: ' + node_data['alias'] + '\n' + \
'Node address: ' + node_address + '\n' + \
'Current block height: ' + block_height
try_message_with_home_menu(context=context, chat_id=chat_id, text=text)
def check_thorchain_midgard_api(context, node_address):
"""
Check that Midgard API is ok
"""
chat_id = context.job.context['chat_id']
node_data = context.job.context['chat_data']['nodes'][node_address]
was_healthy = node_data.setdefault('is_midgard_healthy', True)
is_midgard_healthy = is_midgard_api_healthy(node_data['ip_address'])
if was_healthy != is_midgard_healthy:
if is_midgard_healthy:
text = 'Midgard API is healthy again! 👌' + '\n' + \
'IP: ' + node_data['ip_address'] + '\n' + \
'THORNode: ' + node_data['alias'] + '\n' + \
'Node address: ' + node_address
try_message_with_home_menu(context, chat_id=chat_id, text=text)
else:
text = 'Midgard API is not healthy anymore! 💀' + '\n' + \
'IP: ' + node_data['ip_address'] + '\n' + \
'THORNode: ' + node_data['alias'] + '\n' + \
'Node address: ' + node_address + '\n\n' + \
'Please check your Thornode immediately!'
try_message_with_home_menu(context, chat_id=chat_id, text=text)
node_data['is_midgard_healthy'] = is_midgard_healthy
| 2.296875 | 2 |
hard-gists/7578539/snippet.py | jjhenkel/dockerizeme | 21 | 3630 | from pylab import *
from numpy import *
from numpy.linalg import solve
from scipy.integrate import odeint
from scipy.stats import norm, uniform, beta
from scipy.special import jacobi
a = 0.0
b = 3.0
theta=1.0
sigma=sqrt(theta/(2*(a+b+2)))
tscale = 0.05
invariant_distribution = poly1d( [-1 for x in range(int(a))], True)*poly1d( [1 for x in range(int(b))], True)
def eigenvalue(n):
return theta*n*(n+a+b+1)/(a+b+2)
gaussian_var = norm()
def dW(dt):
return norm.rvs() / sqrt(dt)
def random_walk(y0, tmax, dt, times = None):
dt = dt * tscale
def rhs(y,t):
return -theta*(y-(a-b)/(a+b+2)) + sqrt(2*theta*(1-y*y)/(a+b+2))*dW(dt/tscale)
if (times is None):
times = arange(0,tmax,dt)
y = zeros(shape=times.shape, dtype=float)
y[0] = y0
for i in range(1,y.shape[0]):
y[i] = y[i-1] + rhs(y[i-1], times[i])*dt
if abs(y[i]) > 1:
y[i] = y[i] / abs(y[i])
return (times, y)
def beta_prior(s, f):
return poly1d(ones(shape=(s,)), True)*poly1d(-1*ones(shape=(f,)), True)
def poly_to_jacobi(x):
"""x is a poly1d object"""
xc = x.coeffs
N = x.order+1
matrix = zeros(shape=(N,N), dtype=float)
for i in range(N):
matrix[N-i-1:N, i] = jacobi(i,a,b).coeffs
return solve(matrix, xc)
def jacobi_to_poly(x):
result = poly1d([0])
for i in range(x.shape[0]):
result = result + (jacobi(i,a,b)*invariant_distribution)*x[i]
return result
def jacobi_to_poly_no_invariant(x):
result = poly1d([0])
for i in range(x.shape[0]):
result = result + jacobi(i,a,b)*x[i]
return result
def propagate_jacobi(pc, t):
"""Takes jacobi coefficients and propagates them"""
n = arange(pc.shape[0], dtype=float)
l = theta*n*(n+a+b+1.0)/(a+b+2.0)*tscale
return exp(-l*t)*pc
def truncate_unnecessary_jacobi(p):
p_normalized = p / (abs(p).sum())
cs = cumsum(abs(p_normalized[::-1]))[::-1]
return p_normalized[where(abs(cs) > 1e-4)]
def pde_solve(prior, t):
result = zeros(shape=(t.shape[0], prior.shape[0]), dtype=float)
result[0,:] = prior
for i in range(1,t.shape[0]):
result[i,:] = propagate_jacobi(result[i-1,:], t[i]-t[i-1])
return result
def transform_to_x(pdf, x):
result = zeros(shape=(pdf.shape[0], x.shape[0]), dtype=float)
for i in range(0, pdf.shape[0]):
p = jacobi_to_poly(pdf[i,:])
result[i,:] = p(x)
result[i,:] /= result[i,:].sum()
return result
tmax = 4
prior = beta_prior(40, 20)
prior_in_jacobi = poly_to_jacobi(prior)
dt = 0.1
times = arange(0,tmax,dt)
x = arange(-1,1,0.01)
rw_dt = 0.01
t, y = random_walk(0.35*2-1, tmax, rw_dt)
solution_as_x = zeros(shape=(times.size, x.size), dtype=float)
solution_as_jacobi = None
empirical_ctr = zeros(shape=(4,), dtype=float)
for i in range(0,4):
nt = int(1.0/dt)
prior = prior_in_jacobi
rnd = uniform(0,1)
if (i > 0):
nsamples = 40
r = rnd.rvs(nsamples)
ctr = (y[i/rw_dt]+1)/2.0
print "CTR: " + str(ctr)
success = (r < ctr).sum()
print "Empirical: " + str(success / float(nsamples))
evidence = beta_prior( nsamples - success, success)
prior = None
j = truncate_unnecessary_jacobi(solution_as_jacobi[int(1/dt)-1])
prior = poly_to_jacobi(evidence * jacobi_to_poly_no_invariant(j))
empirical_ctr[i] = success / float(nsamples)
solution_as_jacobi = pde_solve(prior, times[i*nt:(i+1)*nt])
solution_as_x[i*nt:(i+1)*nt] = transform_to_x(solution_as_jacobi, x)
plot(arange(0,4), empirical_ctr, 'go')
plot(t, (y+1)/2.0, 'k')
imshow(solution_as_x.transpose(), origin='lower', extent=[0,tmax,0,1])
xlabel("time")
ylabel("CTR")
title("Bayesian Estimate of CTR")
colorbar()
show()
| 2.453125 | 2 |
forms.py | lennykioko/Flask-social-network | 1 | 3631 | # forms are not just about display, instead they are more of validation
# wtf forms protect our site against csrf attacks
from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, TextAreaField
from wtforms.validators import (DataRequired, Regexp, ValidationError, Email,
Length, EqualTo)
from models import User
def name_exists(form, field):
if User.select().where(User.username == field.data).exists():
raise ValidationError('User with this name already exists.')
def email_exists(form, field):
if User.select().where(User.email == field.data).exists():
raise ValidationError('User with this email already exists.')
class RegisterForm(FlaskForm):
username = StringField(
'Username', # is the label
validators=[
DataRequired(),
Regexp(
r'^[a-zA-Z0-9_]+$',
message = ("Username should be one word, letters, numbers and underscores only.")
),
name_exists
])
email = StringField(
'Email',
validators=[
DataRequired(),
Email(),
email_exists
])
password = PasswordField(
'Password',
validators=[
DataRequired(),
Length(min=8),
EqualTo('<PASSWORD>', message = 'Passwords must match')
])
password2 = PasswordField(
'<PASSWORD>',
validators=[DataRequired()
])
class LoginForm(FlaskForm):
email = StringField('Email', validators=[DataRequired(), Email()])
password = PasswordField('Password', validators=[DataRequired()])
class PostForm(FlaskForm):
content = TextAreaField("What's Up?", validators = [DataRequired()])
| 3.34375 | 3 |
pantam_cli/utils/messages.py | flmnt/pantam | 2 | 3632 | <reponame>flmnt/pantam
from sys import stderr, stdout
from enum import Enum
from colored import fg, attr
PANTAM: str = fg("yellow") + attr("bold") + "PANTAM" + attr("reset")
colour_msg = lambda msg, colour: fg(colour) + attr("bold") + msg + attr("reset")
info_msg = lambda msg: colour_msg(msg, "blue")
success_msg = lambda msg: colour_msg(msg, "green")
error_msg = lambda msg: colour_msg(msg, "red")
class NewLine(Enum):
before = 1
after = 2
both = 3
def write_msg(msg: str, spacing: NewLine = None) -> None:
"""Write message to stdout"""
prefix: str = "\n" if spacing in (NewLine.before, NewLine.both) else ""
suffix: str = "\n" if spacing in (NewLine.after, NewLine.both) else ""
stdout.write("%s%s%s" % (prefix, msg, suffix))
def write_error(msg: str) -> None:
"""Write message to stderr"""
stderr.write("\n%s\n" % msg)
welcome_msg = (
lambda: PANTAM
+ """
The microframework for microservices.
Let's build your app...
"""
)
name_index_file_msg = lambda: "What is the name of your main script?"
name_actions_folder_msg = lambda: "What is the name of your actions folder?"
def create_actions_file_msg(second_run: bool):
"""Actions File Message"""
article = "another" if second_run else "an"
return "Do you want to create %s action file?" % article
name_actions_file_msg = lambda: "What is the name of your actions file?"
confirm_structure_msg = (
lambda structure: """Your application will look like this:
%s
Happy to proceed?"""
% structure
)
| 2.640625 | 3 |
tests/manage/test_remove_mon_from_cluster.py | zmc/ocs-ci | 0 | 3633 | """
A Testcase to remove mon from
when I/O's are happening.
Polarion-ID- OCS-355
"""
import logging
import pytest
from ocs_ci.ocs import ocp, constants
from ocs_ci.framework.testlib import tier4, ManageTest
from ocs_ci.framework import config
from ocs_ci.ocs.resources import pod
from tests.helpers import run_io_with_rados_bench, delete_cephblockpool
from ocs_ci.ocs.cluster import CephCluster
from ocs_ci.utility.retry import retry
from ocs_ci.ocs.exceptions import CephHealthException
log = logging.getLogger(__name__)
@retry(CephHealthException, 8, 3, 1)
def verify_mon_pod_up(ceph_cluster, pods):
"""
Verify mon pods are in Running state.
Returns:
bool: True for wait for the resource, False otherwise
"""
log.info(f"Verifying all mons pods are up and Running")
ceph_cluster.cluster_health_check(timeout=3)
ret = pods.wait_for_resource(
condition=constants.STATUS_RUNNING, selector='app=rook-ceph-mon',
resource_count=3, timeout=700)
log.info(f"waited for all mon pod to come up and running {ret}")
return ret
def run_io_on_pool():
"""
Runs the I/O on the pool and delete the pool
Returns: A thread of I/O
"""
tools_pod = pod.get_ceph_tools_pod()
tools_pod.add_role(role='client')
return run_io_with_rados_bench(
ceph_pods=[tools_pod],
config={'time': 45, 'cleanup': False,
'pool': 'test-pool'
}
)
@tier4
@pytest.mark.polarion_id("OCS-355")
class TestRemoveMonFromCluster(ManageTest):
def test_remove_mon_pod_from_cluster(self):
"""
To remove mon pod from the cluster
after the I/O is performed on the pool
and waiting for the operator to create a
new mon pod on its own
"""
ceph_cluster = CephCluster()
pods = ocp.OCP(
kind=constants.POD, namespace=config.ENV_DATA['cluster_namespace']
)
list_mons = ceph_cluster.get_mons_from_cluster()
assert len(list_mons) > 1, pytest.skip(
"INVALID: Mon count should be more than one to delete."
)
assert run_io_on_pool(), 'Failed to run I/O on the pool'
assert delete_cephblockpool('test-pool'), 'Failed to delete pool'
ceph_cluster.cluster_health_check(timeout=0)
ceph_cluster.remove_mon_from_cluster()
assert verify_mon_pod_up(ceph_cluster, pods), f"Mon pods are not up and running state"
ceph_cluster.cluster_health_check(timeout=60)
| 2.015625 | 2 |
smartystreets_python_sdk/us_autocomplete_pro/client.py | Caaz/smartystreets-python-sdk | 0 | 3634 | <reponame>Caaz/smartystreets-python-sdk<filename>smartystreets_python_sdk/us_autocomplete_pro/client.py
from smartystreets_python_sdk import Request
from smartystreets_python_sdk.exceptions import SmartyException
from smartystreets_python_sdk.us_autocomplete_pro import Suggestion, geolocation_type
class Client:
def __init__(self, sender, serializer):
"""
It is recommended to instantiate this class using ClientBuilder.build_us_autocomplete_pro_api_client()
"""
self.sender = sender
self.serializer = serializer
def send(self, lookup):
"""
Sends a Lookup object to the US Autocomplete Pro API and stores the result in the Lookup's result field.
"""
if not lookup or not lookup.search:
raise SmartyException('Send() must be passed a Lookup with the search field set.')
request = self.build_request(lookup)
response = self.sender.send(request)
if response.error:
raise response.error
result = self.serializer.deserialize(response.payload)
suggestions = self.convert_suggestions(result.get('suggestions') or [])
lookup.result = suggestions
return suggestions
def build_request(self, lookup):
request = Request()
self.add_parameter(request, 'search', lookup.search)
self.add_parameter(request, 'max_results', lookup.max_results)
self.add_parameter(request, 'include_only_cities', self.build_filter_string(lookup.city_filter))
self.add_parameter(request, 'include_only_states', self.build_filter_string(lookup.state_filter))
self.add_parameter(request, 'include_only_zip_codes', self.build_filter_string(lookup.zip_filter))
self.add_parameter(request, 'exclude_states', self.build_filter_string(lookup.exclude))
self.add_parameter(request, 'prefer_cities', self.build_filter_string(lookup.prefer_cities))
self.add_parameter(request, 'prefer_states', self.build_filter_string(lookup.prefer_states))
self.add_parameter(request, 'prefer_zip_codes', self.build_filter_string(lookup.prefer_zips))
self.add_parameter(request, 'prefer_ratio', lookup.prefer_ratio)
self.add_parameter(request, 'prefer_geolocation', lookup.prefer_geo)
self.add_parameter(request, 'selected', lookup.selected)
return request
@staticmethod
def build_filter_string(filter_list):
return ','.join(filter_list or []) or None
@staticmethod
def convert_suggestions(suggestion_dictionaries):
return [Suggestion(suggestion) for suggestion in suggestion_dictionaries]
@staticmethod
def add_parameter(request, key, value):
if value and value != 'none':
request.parameters[key] = value
| 2.3125 | 2 |
mssqlvc.py | Saritasa/mssqlvc | 2 | 3635 | <reponame>Saritasa/mssqlvc<filename>mssqlvc.py
# -*- coding: utf-8 -*-
"""
mssqlvc
~~~~~~~
Database version control utility for Microsoft SQL Server. See README.md for more information.
Licensed under the BSD license. See LICENSE file in the project root for full license information.
"""
import argparse
import datetime
import io
import logging
import os
import re
import sys
import urlparse
try:
import clr
except ImportError:
print('Cannot import crl module, make sure you run this script using IronPython')
exit(2)
import System
clr.AddReference('Microsoft.SqlServer.Smo')
clr.AddReference('Microsoft.SqlServer.SqlEnum')
clr.AddReference('Microsoft.SqlServer.ConnectionInfo')
import Microsoft.SqlServer.Management.Smo as Smo
import Microsoft.SqlServer.Management.Common as Common
__author__ = '<NAME>'
__copyright__ = 'Copyright (c) 2015-2016, Saritasa'
__license__ = 'BSD'
__version__ = '1.4.5'
__all__ = ['MsSqlVersion']
class ScriptExecutionError(Exception):
pass
class MsSqlVersion(object):
"""
SQL Server patch migration class.
"""
class bcolors:
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
def __init__(self, connection_string, patch_dir='.', exclude_pattern=None, logger=None,
stop_on_error=False, noexecute=False, case_insensitive=False, record_files_only=False):
"""
Initialize instance with connection and database objects.
:param connection_string: Connection string in rfc1738 url format
:param patch_dir: Patch directory with .sql files
:param exclude_pattern: String with regular expression the patch files should match
:param logger: Logger that is used for logging
:param stop_on_error: Stop execution on error, default behavior is to continue
:param case_insensitive: Use case insensitive to compare patch files
:param record_files_only: Only file names will be stored to patch table without folder paths
"""
url = urlparse.urlparse(connection_string)
is_local_login = not url.username
self.connection = Common.ServerConnection(LoginSecure=is_local_login, ServerInstance=url.hostname,
DatabaseName=url.path.replace('/', ''))
if not is_local_login:
self.connection.Login = url.username
self.connection.Password = <PASSWORD>
self.server = Smo.Server(self.connection)
self.database = self.server.Databases[self.connection.DatabaseName]
self.server.ConnectionContext.ConnectTimeout = 90
self.exclude_pattern = exclude_pattern
self.patch_dir = patch_dir
self.stop_on_error = stop_on_error
self.case_insensitive = case_insensitive
self.record_files_only = record_files_only
self.executed_count = 0
self.logger = logging.NullHandler() if not logger else logger
if not os.path.exists(patch_dir):
raise Exception('Patch folder does not exist')
if 'mssql' not in connection_string:
raise Exception('Wrong connection string, it should contain mssql word')
exists = self._create_patch_table_if_not_exists(self.database)
if not exists:
self.logger.info('[%s] created _patch_history table' % (self.database.Name,))
def __del__(self):
if self.server:
self.server.ConnectionContext.Disconnect()
def update(self):
"""Executes database update process"""
patches = self.get_pending_patches()
self.logger.debug('Files to execute %s' % (patches,))
for patch in patches:
success = self.execute_file(patch)
if success:
self.executed_count += 1
self.put_patch(patch)
if not success and self.stop_on_error:
self.logger.critical(MsSqlVersion.bcolors.WARNING + 'Execution stopped. Please fix errors and try again.'
+ MsSqlVersion.bcolors.ENDC)
raise ScriptExecutionError()
self.logger.info('[%s] Executed %d patch(-es)' % (self.database.Name, self.executed_count))
def fill(self):
"""Skip scripts execution but add them to patches table"""
patches = self.get_pending_patches()
for patch in patches:
self.logger.info('Add file %s' % (patch,))
self.put_patch(patch)
def get_pending_patches(self):
applied_patches = self.get_applied_patches()
if self.record_files_only:
applied_patches = [os.path.basename(f) for f in applied_patches]
patches = self._get_sql_files_from_dir(applied_patches)
patches.sort()
return patches
def execute_file(self, file):
"""Executes file against database in transaction, returns True if success"""
ret = True
try:
full_name = os.path.join(os.path.normpath(self.patch_dir), file)
with io.open(full_name, 'r', encoding='utf8') as sql_file:
sql = sql_file.read()
self.logger.info('[%s] Executing %s...' % (self.database.Name, file))
self.connection.BeginTransaction()
self.database.ExecuteNonQuery(sql)
self.connection.CommitTransaction()
except Exception as e:
self.connection.RollBackTransaction()
self.logger.error('Exception on %s' % (file,))
message = e.message or e
if e.clsException.InnerException is not None and e.clsException.InnerException.InnerException is not None:
message += ' ' + e.clsException.InnerException.InnerException.Message
self.logger.error('[%s] %s (%s)' % (self.database.Name, full_name, message))
ret = False
return ret
def put_patch(self, file):
"""Write record that file has been executed"""
now = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
if self.record_files_only:
file = os.path.basename(file)
sql = 'insert [_patch_history] (name, applied_at) values(\'%s\', \'%s\');' % (file, now)
self.database.ExecuteNonQuery(sql)
def get_applied_patches(self):
rows = self.database.ExecuteWithResults('select name from [_patch_history];').Tables[0].Rows
return set([row['name'] for row in rows])
def _get_sql_files_from_dir(self, exclude_list=[]):
"""Get all script files from directory"""
_exclude_list = set(exclude_list) if not self.case_insensitive else [f.lower() for f in exclude_list]
prevdir = os.getcwd()
os.chdir(self.patch_dir)
sql_files = []
for root, dirs, files in os.walk('.'):
for file in files:
file = os.path.normpath(os.path.join(root, file))
_file = file
if self.case_insensitive:
_file = _file.lower()
if self.record_files_only:
_file = os.path.basename(_file)
if (_file in _exclude_list or not _file.lower().endswith('.sql') or
(self.exclude_pattern and re.search(self.exclude_pattern, file))):
continue
sql_files.append(file)
os.chdir(prevdir)
return sql_files
@staticmethod
def _create_patch_table_if_not_exists(database):
"""Create patch table in database if not exists"""
sql = 'select * from sys.objects where object_id = object_id(\'_patch_history\') AND type in (\'U\');'
exists = database.ExecuteWithResults(sql).Tables[0].Rows.Count > 0
if not exists:
sql = """
create table [_patch_history] (id int not null identity(1, 1), name varchar(100) not null,
applied_at datetime not null);
alter table [_patch_history] add constraint _patch_history_PK primary key clustered (id);
"""
database.ExecuteNonQuery(sql)
return exists
def get_cmd_line_parser():
"""Get initialized argparse.ArgumentParser object"""
parser = argparse.ArgumentParser(
description='MSSQL database patch history tool',
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog='''Example: %(prog)s -c "mssql://sa:123@host\instance/database" -d "D:/1/project/patch"''')
parser.add_argument('--connection', '-c',
required=True,
dest='connection',
action='store',
help='connection string in rfc1738 url format, required')
parser.add_argument('--directory', '-d',
dest='directory',
action='store',
default='.',
help='directory with patch files')
parser.add_argument('--log', '-l',
dest='log',
action='store',
help='log file')
parser.add_argument('--noexecute', '-n',
action='store_true',
dest='noexecute',
default=False,
help='displays pending script files with no execution')
parser.add_argument('--noexecute-fill', '-nf',
action='store_true',
dest='noexecute_fill',
default=False,
help='displays pending script files with no execution and fills patch table')
parser.add_argument('--stop-on-error', '-soe',
action='store_true',
dest='stop_on_error',
default=False,
help='stops execution if any script fails')
parser.add_argument('--exclude-pattern', '-ep',
dest='exclude_pattern',
help='skips files match to regular expression')
parser.add_argument('--record-files-only', '-rfo',
action='store_true',
dest='record_files_only',
default=False,
help='only file names will be stored to patch table without folder paths')
parser.add_argument('--case-insensitive', '-ci',
action='store_true',
dest='case_insensitive',
default=False,
help='use case insensitive to compare patch files so "PatchName.sql" and "patchname.sql" is the same')
parser.add_argument('--debug',
action='store_true',
dest='debug',
default=False,
help='enables debug output')
parser.add_argument('--version', '-v',
action='version',
version='%(prog)s ' + __version__)
return parser
if __name__ == '__main__':
# parser
parser = get_cmd_line_parser()
parser_args = parser.parse_args()
if parser_args.connection is None or parser_args.directory is None:
parser.print_help()
exit(1)
# logging
logger = logging.getLogger('mssql')
if parser_args.log:
fh = logging.FileHandler(parser_args.log)
fh.setFormatter(logging.Formatter('%(asctime)s [%(levelname)s] %(message)s'))
logger.addHandler(fh)
ch = logging.StreamHandler()
ch.setFormatter(logging.Formatter('%(asctime)s [%(levelname)s] %(message)s'))
logger.setLevel(logging.DEBUG if parser_args.debug else logging.INFO)
logger.addHandler(ch)
# database handle
sqlvc = MsSqlVersion(parser_args.connection, parser_args.directory, exclude_pattern=parser_args.exclude_pattern,
stop_on_error=parser_args.stop_on_error, case_insensitive=parser_args.case_insensitive,
record_files_only=parser_args.record_files_only, logger=logger)
if parser_args.noexecute:
for patch in sqlvc.get_pending_patches():
logger.info(' ' + patch)
elif parser_args.noexecute_fill:
sqlvc.fill()
else:
sqlvc.update()
| 1.617188 | 2 |
lib/python3.6/site-packages/statsmodels/iolib/tests/test_table_econpy.py | KshitizSharmaV/Quant_Platform_Python | 1 | 3636 | '''
Unit tests table.py.
:see: http://docs.python.org/lib/minimal-example.html for an intro to unittest
:see: http://agiletesting.blogspot.com/2005/01/python-unit-testing-part-1-unittest.html
:see: http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/305292
'''
from __future__ import absolute_import
from statsmodels.compat.python import zip
import numpy as np
from numpy.testing import assert_equal
__docformat__ = "restructuredtext en"
from statsmodels.iolib.table import Cell, SimpleTable
from statsmodels.iolib.table import default_latex_fmt
from statsmodels.iolib.table import default_html_fmt
ltx_fmt1 = default_latex_fmt.copy()
html_fmt1 = default_html_fmt.copy()
txt_fmt1 = dict(
data_fmts = ['%0.2f', '%d'],
empty_cell = ' ',
colwidths = 1,
colsep=' * ',
row_pre = '* ',
row_post = ' *',
table_dec_above='*',
table_dec_below='*',
header_dec_below='*',
header_fmt = '%s',
stub_fmt = '%s',
title_align='r',
header_align = 'r',
data_aligns = "r",
stubs_align = "l",
fmt = 'txt'
)
cell0data = 0.0000
cell1data = 1
row0data = [cell0data, cell1data]
row1data = [2, 3.333]
table1data = [ row0data, row1data ]
test1stubs = ('stub1', 'stub2')
test1header = ('header1', 'header2')
#test1header = ('header1\nheader1a', 'header2\nheader2a')
tbl = SimpleTable(table1data, test1header, test1stubs,
txt_fmt=txt_fmt1, ltx_fmt=ltx_fmt1, html_fmt=html_fmt1)
def custom_labeller(cell):
if cell.data is np.nan:
return 'missing'
class TestCell(object):
def test_celldata(self):
celldata = cell0data, cell1data, row1data[0], row1data[1]
cells = [Cell(datum, datatype=i % 2)
for i, datum in enumerate(celldata)]
for cell, datum in zip(cells, celldata):
assert_equal(cell.data, datum)
class TestSimpleTable(object):
def test_txt_fmt1(self):
# Limited test of custom txt_fmt
desired = """
*****************************
* * header1 * header2 *
*****************************
* stub1 * 0.00 * 1 *
* stub2 * 2.00 * 3 *
*****************************
"""
actual = '\n%s\n' % tbl.as_text()
#print('actual')
#print(actual)
#print('desired')
#print(desired)
assert_equal(actual, desired)
def test_ltx_fmt1(self):
# Limited test of custom ltx_fmt
desired = r"""
\begin{center}
\begin{tabular}{lcc}
\toprule
& \textbf{header1} & \textbf{header2} \\
\midrule
\textbf{stub1} & 0.0 & 1 \\
\textbf{stub2} & 2 & 3.333 \\
\bottomrule
\end{tabular}
\end{center}
"""
actual = '\n%s\n' % tbl.as_latex_tabular()
#print(actual)
#print(desired)
assert_equal(actual, desired)
def test_html_fmt1(self):
# Limited test of custom html_fmt
desired = """
<table class="simpletable">
<tr>
<td></td> <th>header1</th> <th>header2</th>
</tr>
<tr>
<th>stub1</th> <td>0.0</td> <td>1</td>
</tr>
<tr>
<th>stub2</th> <td>2</td> <td>3.333</td>
</tr>
</table>
"""
#the previous has significant trailing whitespace that got removed
#desired = '''\n<table class="simpletable">\n<tr>\n <td></td> <th>header1</th> <th>header2</th>\n</tr>\n<tr>\n <th>stub1</th> <td>0.0</td> <td>1</td> \n</tr>\n<tr>\n <th>stub2</th> <td>2</td> <td>3.333</td> \n</tr>\n</table>\n'''
actual = '\n%s\n' % tbl.as_html()
actual = '\n'.join((line.rstrip() for line in actual.split('\n')))
#print(actual)
#print(desired)
#print len(actual), len(desired)
assert_equal(actual, desired)
def test_customlabel(self):
# Limited test of custom custom labeling
tbl = SimpleTable(table1data, test1header, test1stubs, txt_fmt=txt_fmt1)
tbl[1][1].data = np.nan
tbl.label_cells(custom_labeller)
#print([[c.datatype for c in row] for row in tbl])
desired = """
*****************************
* * header1 * header2 *
*****************************
* stub1 * -- * 1 *
* stub2 * 2.00 * 3 *
*****************************
"""
actual = '\n%s\n' % tbl.as_text(missing='--')
assert_equal(actual, desired)
| 2.578125 | 3 |
homeassistant/components/todoist/types.py | MrDelik/core | 30,023 | 3637 | <gh_stars>1000+
"""Types for the Todoist component."""
from __future__ import annotations
from typing import TypedDict
class DueDate(TypedDict):
"""Dict representing a due date in a todoist api response."""
date: str
is_recurring: bool
lang: str
string: str
timezone: str | None
| 2.109375 | 2 |
src/c/c_pyzstd.py | corneliusroemer/pyzstd | 29 | 3638 | <filename>src/c/c_pyzstd.py
from collections import namedtuple
from enum import IntEnum
from ._zstd import *
from . import _zstd
__all__ = (# From this file
'compressionLevel_values', 'get_frame_info',
'CParameter', 'DParameter', 'Strategy',
# From _zstd
'ZstdCompressor', 'RichMemZstdCompressor',
'ZstdDecompressor', 'EndlessZstdDecompressor',
'ZstdDict', 'ZstdError', 'decompress', 'get_frame_size',
'compress_stream', 'decompress_stream',
'zstd_version', 'zstd_version_info', 'zstd_support_multithread')
# Used in __init__.py
_ZSTD_DStreamInSize = _zstd._ZSTD_DStreamInSize
_train_dict = _zstd._train_dict
_finalize_dict = _zstd._finalize_dict
# compressionLevel_values
_nt_values = namedtuple('values', ['default', 'min', 'max'])
compressionLevel_values = _nt_values(_zstd._ZSTD_defaultCLevel,
_zstd._ZSTD_minCLevel,
_zstd._ZSTD_maxCLevel)
_nt_frame_info = namedtuple('frame_info',
['decompressed_size', 'dictionary_id'])
def get_frame_info(frame_buffer):
"""Get zstd frame infomation from a frame header.
Argument
frame_buffer: A bytes-like object. It should starts from the beginning of
a frame, and needs to include at least the frame header (6 to
18 bytes).
Return a two-items namedtuple: (decompressed_size, dictionary_id)
If decompressed_size is None, decompressed size is unknown.
dictionary_id is a 32-bit unsigned integer value. 0 means dictionary ID was
not recorded in the frame header, the frame may or may not need a dictionary
to be decoded, and the ID of such a dictionary is not specified.
It's possible to append more items to the namedtuple in the future."""
ret_tuple = _zstd._get_frame_info(frame_buffer)
return _nt_frame_info(*ret_tuple)
class CParameter(IntEnum):
"""Compression parameters"""
compressionLevel = _zstd._ZSTD_c_compressionLevel
windowLog = _zstd._ZSTD_c_windowLog
hashLog = _zstd._ZSTD_c_hashLog
chainLog = _zstd._ZSTD_c_chainLog
searchLog = _zstd._ZSTD_c_searchLog
minMatch = _zstd._ZSTD_c_minMatch
targetLength = _zstd._ZSTD_c_targetLength
strategy = _zstd._ZSTD_c_strategy
enableLongDistanceMatching = _zstd._ZSTD_c_enableLongDistanceMatching
ldmHashLog = _zstd._ZSTD_c_ldmHashLog
ldmMinMatch = _zstd._ZSTD_c_ldmMinMatch
ldmBucketSizeLog = _zstd._ZSTD_c_ldmBucketSizeLog
ldmHashRateLog = _zstd._ZSTD_c_ldmHashRateLog
contentSizeFlag = _zstd._ZSTD_c_contentSizeFlag
checksumFlag = _zstd._ZSTD_c_checksumFlag
dictIDFlag = _zstd._ZSTD_c_dictIDFlag
nbWorkers = _zstd._ZSTD_c_nbWorkers
jobSize = _zstd._ZSTD_c_jobSize
overlapLog = _zstd._ZSTD_c_overlapLog
def bounds(self):
"""Return lower and upper bounds of a parameter, both inclusive."""
# 1 means compression parameter
return _zstd._get_param_bounds(1, self.value)
class DParameter(IntEnum):
"""Decompression parameters"""
windowLogMax = _zstd._ZSTD_d_windowLogMax
def bounds(self):
"""Return lower and upper bounds of a parameter, both inclusive."""
# 0 means decompression parameter
return _zstd._get_param_bounds(0, self.value)
class Strategy(IntEnum):
"""Compression strategies, listed from fastest to strongest.
Note : new strategies _might_ be added in the future, only the order
(from fast to strong) is guaranteed.
"""
fast = _zstd._ZSTD_fast
dfast = _zstd._ZSTD_dfast
greedy = _zstd._ZSTD_greedy
lazy = _zstd._ZSTD_lazy
lazy2 = _zstd._ZSTD_lazy2
btlazy2 = _zstd._ZSTD_btlazy2
btopt = _zstd._ZSTD_btopt
btultra = _zstd._ZSTD_btultra
btultra2 = _zstd._ZSTD_btultra2
# Set CParameter/DParameter types for validity check
_zstd._set_parameter_types(CParameter, DParameter) | 2.125 | 2 |
test/unit/data/model/mapping/common.py | quacksawbones/galaxy-1 | 1,085 | 3639 | from abc import ABC, abstractmethod
from contextlib import contextmanager
from uuid import uuid4
import pytest
from sqlalchemy import (
delete,
select,
UniqueConstraint,
)
class AbstractBaseTest(ABC):
@pytest.fixture
def cls_(self):
"""
Return class under test.
Assumptions: if the class under test is Foo, then the class grouping
the tests should be a subclass of BaseTest, named TestFoo.
"""
prefix = len("Test")
class_name = self.__class__.__name__[prefix:]
return getattr(self.get_model(), class_name)
@abstractmethod
def get_model(self):
pass
def dbcleanup_wrapper(session, obj, where_clause=None):
with dbcleanup(session, obj, where_clause):
yield obj
@contextmanager
def dbcleanup(session, obj, where_clause=None):
"""
Use the session to store obj in database; delete from database on exit, bypassing the session.
If obj does not have an id field, a SQLAlchemy WHERE clause should be provided to construct
a custom select statement.
"""
return_id = where_clause is None
try:
obj_id = persist(session, obj, return_id)
yield obj_id
finally:
table = obj.__table__
if where_clause is None:
where_clause = _get_default_where_clause(type(obj), obj_id)
stmt = delete(table).where(where_clause)
session.execute(stmt)
def persist(session, obj, return_id=True):
"""
Use the session to store obj in database, then remove obj from session,
so that on a subsequent load from the database we get a clean instance.
"""
session.add(obj)
session.flush()
obj_id = obj.id if return_id else None # save this before obj is expunged
session.expunge(obj)
return obj_id
def delete_from_database(session, objects):
"""
Delete each object in objects from database.
May be called at the end of a test if use of a context manager is impractical.
(Assume all objects have the id field as their primary key.)
"""
# Ensure we have a list of objects (check for list explicitly: a model can be iterable)
if not isinstance(objects, list):
objects = [objects]
for obj in objects:
table = obj.__table__
stmt = delete(table).where(table.c.id == obj.id)
session.execute(stmt)
def get_stored_obj(session, cls, obj_id=None, where_clause=None, unique=False):
# Either obj_id or where_clause must be provided, but not both
assert bool(obj_id) ^ (where_clause is not None)
if where_clause is None:
where_clause = _get_default_where_clause(cls, obj_id)
stmt = select(cls).where(where_clause)
result = session.execute(stmt)
# unique() is required if result contains joint eager loads against collections
# https://gerrit.sqlalchemy.org/c/sqlalchemy/sqlalchemy/+/2253
if unique:
result = result.unique()
return result.scalar_one()
def has_unique_constraint(table, fields):
for constraint in table.constraints:
if isinstance(constraint, UniqueConstraint):
col_names = {c.name for c in constraint.columns}
if set(fields) == col_names:
return True
def has_index(table, fields):
for index in table.indexes:
col_names = {c.name for c in index.columns}
if set(fields) == col_names:
return True
def collection_consists_of_objects(collection, *objects):
"""
Returns True iff list(collection) == list(objects), where object equality is determined
by primary key equality: object1.id == object2.id.
"""
if len(collection) != len(objects): # False if lengths are different
return False
if not collection: # True if both are empty
return True
# Sort, then compare each member by its 'id' attribute, which must be its primary key.
collection.sort(key=lambda item: item.id)
objects_l = list(objects)
objects_l.sort(key=lambda item: item.id)
for item1, item2 in zip(collection, objects_l):
if item1.id is None or item2.id is None or item1.id != item2.id:
return False
return True
def get_unique_value():
"""Generate unique values to accommodate unique constraints."""
return uuid4().hex
def _get_default_where_clause(cls, obj_id):
where_clause = cls.__table__.c.id == obj_id
return where_clause
| 2.828125 | 3 |
django_events/users/management/commands/create_default_su.py | chrisBrookes93/django-events-management | 0 | 3640 | <reponame>chrisBrookes93/django-events-management
from django.core.management.base import BaseCommand
from django.contrib.auth import get_user_model
class Command(BaseCommand):
help = "Creates a default super user if one doesn't already exist. " \
"This is designed to be used in the docker-compose.yml to create an initial super user on deployment."
def handle(self, *args, **kwargs):
"""
Checks whether any super users exist and creates a default one if not
:param args: Unused
:param kwargs: Unused
"""
super_users = get_user_model().objects.filter(is_superuser=True)
if super_users.exists():
self.stdout.write('A superuser already exists, not creating one')
else:
get_user_model().objects.create_superuser(email="<EMAIL>", password="<PASSWORD>")
self.stdout.write('Created default superuser "<EMAIL>"')
self.stdout.write('Make sure you change the password immediately!')
| 2.421875 | 2 |
antlir/bzl/image_layer.bzl | zeroxoneb/antlir | 28 | 3641 | <gh_stars>10-100
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
An `image.layer` is a set of `feature` with some additional parameters. Its
purpose to materialize those `feature`s as a btrfs subvolume in the
per-repo `buck-image/out/volume/targets`.
We call the subvolume a "layer" because it can be built on top of a snapshot
of its `parent_layer`, and thus can be represented as a btrfs send-stream for
more efficient storage & distribution.
The Buck output of an `image.layer` target is a JSON file with information
on how to find the resulting layer in the per-repo
`buck-image/out/volume/targets`. See `SubvolumeOnDisk.to_json_file`.
## Implementation notes
The implementation of this converter deliberately minimizes the amount of
business logic in its command. The converter must include **only** our
interactions with the buck target graph. Everything else should be
delegated to subcommands.
### Command
In composing the `bash` command, our core maxim is: make it a hermetic
function of the converter's inputs -- do not read data from disk, do not
insert disk paths into the command, do not do anything that might cause the
bytes of the command to vary between machines or between runs. To achieve
this, we use Buck macros to resolve all paths, including those to helper
scripts. We rely on environment variables or pipes to pass data between the
helper scripts.
Another reason to keep this converter minimal is that `buck test` cannot
make assertions about targets that fail to build. Since we only have the
ability to test the "good" targets, it behooves us to put most logic in
external scripts, so that we can unit-test its successes **and** failures
thoroughly.
### Output
We mark `image.layer` uncacheable, because there's no easy way to teach Buck
to serialize a btrfs subvolume (for that, we have `package.new`).
That said, we should still follow best practices to avoid problems if e.g.
the user renames their repo, or similar. These practices include:
- The output JSON must store no absolute paths.
- Store Buck target paths instead of paths into the output directory.
### Dependency resolution
An `image.layer` consumes a set of `feature` outputs to decide what to put into
the btrfs subvolume. These outputs are actually just JSON files that
reference other targets, and do not contain the data to be written into the
image.
Therefore, `image.layer` has to explicitly tell buck that it needs all
direct dependencies of its `feature`s to be present on disk -- see our
`attrfilter` queries below. Without this, Buck would merrily fetch the just
the `feature` JSONs from its cache, and not provide us with any of the
buid artifacts that comprise the image.
We do NOT need the direct dependencies of the parent layer's features,
because we treat the parent layer as a black box -- whatever it has laid
down in the image, that's what it provides (and we don't care about how).
The consequences of this information hiding are:
- Better Buck cache efficiency -- we don't have to download
the dependencies of the ancestor layers' features. Doing that would be
wasteful, since those bits are redundant with what's in the parent.
- Ability to use genrule image layers / apply non-pure post-processing to
a layer. In terms of engineering, both of these non-pure approaches are
a terrible idea and a maintainability headache, but they do provide a
useful bridge for transitioning to Buck image builds from legacy
imperative systems.
- The image compiler needs a litte extra code to walk the parent layer and
determine what it provides.
- We cannot have "unobservable" dependencies between features. Since
feature dependencies are expected to routinely cross layer boundaries,
feature implementations are forced only to depend on data that can be
inferred from the filesystem -- since this is all that the parent layer
implementation can do. NB: This is easy to relax in the future by
writing a manifest with additional metadata into each layer, and using
that metadata during compilation.
"""
load(":compile_image_features.bzl", "compile_image_features")
load(":image_layer_utils.bzl", "image_layer_utils")
load(":image_utils.bzl", "image_utils")
def image_layer(
name,
parent_layer = None,
features = None,
flavor = None,
flavor_config_override = None,
antlir_rule = "user-internal",
**image_layer_kwargs):
"""
Arguments
- `parent_layer`: The name of another `image_layer` target, on
top of which the current layer will install its features.
- `features`: List of `feature` target paths and/or
nameless structs from `feature.new`.
- `flavor`: Picks default build options for the layer, including
`build_appliance`, RPM installer, and others. See `flavor_helpers.bzl`
for details.
- `flavor_config_override`: A struct that can override the default
values fetched from `REPO_CFG[flavor].flavor_to_config`.
- `mount_config`: Specifies how this layer is mounted in the
`mounts` field of a `feature` of a parent layer. See
the field in `_image_layer_impl` in `image_layer_utils.bzl`
- `runtime`: A list of desired helper buck targets to be emitted.
`container` is always included in the list by default.
See the field in `_image_layer_impl` in `image_layer_utils.bzl` and the
[docs](/docs/tutorials/helper-buck-targets#imagelayer) for the list of
possible helpers, their respective behaviours, and how to invoke them.
"""
image_layer_utils.image_layer_impl(
_rule_type = "image_layer",
_layer_name = name,
# Build a new layer. It may be empty.
_make_subvol_cmd = compile_image_features(
name = name,
current_target = image_utils.current_target(name),
parent_layer = parent_layer,
features = features,
flavor = flavor,
flavor_config_override = flavor_config_override,
),
antlir_rule = antlir_rule,
**image_layer_kwargs
)
| 2.03125 | 2 |
python/testData/debug/test_ignore_lib.py | jnthn/intellij-community | 2 | 3642 | from calendar import setfirstweekday
stopped_in_user_file = True
setfirstweekday(15) | 1.296875 | 1 |
promt_tr/__main__.py | ffreemt/promt-tr-free | 0 | 3643 | <filename>promt_tr/__main__.py
''' __main__, to run:
python -m promt_tr
'''
import sys
from random import randint
from promt_tr import promt_tr, LANG_CODES
# pragma: no cover
def main():
'''main'''
from_lang = 'auto'
to_lang = 'zh'
text = 'test ' + str(randint(0, 10000))
if not sys.argv[1:]:
print('Provide some English text, with an optional to_lang')
print('E.g., python -m promt_tr test this and that de')
print('Testing with some random text\n')
else:
argv = sys.argv[1:]
len_ = len(argv)
if len_ == 1:
if argv[0] in LANG_CODES:
to_lang = argv[0]
else:
text = argv[0]
elif argv[-1] in LANG_CODES:
to_lang = argv[-1]
text = ' '.join(argv[:-1])
else:
text = ' '.join(argv)
for to_lang in ['zh', 'de', 'fr', 'it', 'es']:
resu = promt_tr(text, from_lang, to_lang)
print(f'[{text}] translated to [{to_lang}]: [{resu}]')
if __name__ == '__main__':
main()
| 3.3125 | 3 |
src/features/v3/proc_v3_n1_calc_distance.py | askoki/nfl_dpi_prediction | 0 | 3644 | import os
import sys
import pandas as pd
from datetime import datetime
from settings import RAW_DATA_DIR, DataV3, DATA_V3_SUBVERSION
from src.features.helpers.processing import add_missing_timestamp_values
from src.features.helpers.processing_v3 import get_closest_players, get_players_and_ball_indices, calculate_distance, \
normalize_according_to_play_direction, check_group_event
from src.features.helpers.processing_v4 import home_has_possession, calculate_team_sitation
week_num = int(sys.argv[1])
data_v3 = DataV3(DATA_V3_SUBVERSION)
save_file_path = data_v3.get_step1_checkpoint_path(week_num)
try:
clean_df = pd.read_csv(save_file_path)
save_file_exists = True
except FileNotFoundError:
save_file_exists = False
if not save_file_exists:
print("Started loading data")
play_df = pd.read_csv(os.path.join(RAW_DATA_DIR, 'plays.csv'))
games_df = pd.read_csv(os.path.join(RAW_DATA_DIR, 'games.csv'))
week_and_games = games_df[games_df.week == week_num]
tracking_df = pd.read_csv(os.path.join(RAW_DATA_DIR, f'week{week_num}.csv'))
print("Data loaded. Start processing timestamps")
tracking_df = add_missing_timestamp_values(tracking_df)
games_n_plays_df = play_df.merge(week_and_games, how='inner', on='gameId')
m_grouped = games_n_plays_df.groupby(['gameId', 'playId'])
df_t = tracking_df.merge(games_n_plays_df, how='left', on=['gameId', 'playId'])
# Remove all events without 'pass_forward'
df_t_grouped = df_t.groupby(['gameId', 'playId'])
df_t_v3 = df_t.copy().sort_index()
for name, group in df_t_grouped:
game_id, play_id = name
# if group does not contain pass forward, drop it
if all(group.event != 'pass_forward'):
df_t_v3 = df_t_v3[(df_t_v3.gameId != game_id) | (df_t_v3.playId != play_id)]
df_t_v3_s = df_t_v3.sort_values(by=['gameId', 'playId', 'time', 'event'])
df_t_v3_s = df_t_v3_s.reset_index(drop=True)
df_t_grouped = df_t_v3_s.groupby(['gameId', 'playId'])
# remove all values before 'pass_forward'
print("Removing all values before pass forward event...")
for name, group in df_t_grouped:
game_id, play_id = name
pass_forward_frame_id = group[group.event == 'pass_forward'].index.min() - 1
remove_start = group.index.min()
df_t_v3_s = df_t_v3_s.drop(df_t_v3_s.loc[remove_start:pass_forward_frame_id].index)
pd.options.mode.chained_assignment = None
gb = df_t_v3_s.groupby(['gameId', 'playId'])
print('Getting closest players...')
keep_indices = []
for name, group in gb:
game_id, play_id = name
try:
event_3rd = group.event.unique()[2]
except IndexError:
print('Number of events is < 3, skipping...')
continue
situation_df = group[group.event == event_3rd]
# convert dataframe into series
ball_row = situation_df[situation_df.team == 'football'].head(1)
# remove ball
player_situation_df = situation_df[situation_df.team != 'football']
try:
p1, p2 = get_closest_players(player_situation_df, ball_row.x.item(), ball_row.y.item())
except ValueError:
print('Value Error raised. This group will be skipped.')
continue
p_n_b_indices = get_players_and_ball_indices(group, p1, p2)
if p_n_b_indices:
keep_indices.extend(p_n_b_indices)
clean_df = df_t_v3_s[df_t_v3_s.index.isin(keep_indices)]
clean_df.to_csv(
save_file_path,
index=False
)
print('Normalize...')
clean_df = normalize_according_to_play_direction(clean_df)
clean_df['homeHasPossession'] = clean_df.apply(
lambda row: home_has_possession(row), axis=1
)
clean_df['teamSituation'] = clean_df.apply(
lambda row: calculate_team_sitation(row), axis=1
)
print('Creating features...')
min_df = clean_df[[
'time', 'x', 'y', 's', 'o', 'dir', 'event', 'team',
'gameId', 'playId', 'frameId', 'isDefensivePI'
]]
gb_2 = clean_df.groupby(['gameId', 'playId', 'frameId'])
# ball direction and orientation are NaN
calc_df = pd.DataFrame(
columns=[
'time',
'att_def_d', 'att_ball_d', 'def_ball_d',
'att_s', 'def_s', 'ball_s',
'att_o', 'def_o',
'att_dir', 'def_dir',
'event', 'gameId', 'playId', 'frameId', 'isDefensivePI'
]
)
GROUP_SIZE_MINIMUM = 3
for name, group in gb_2:
game_id, play_id, frameId = name
if len(group) < GROUP_SIZE_MINIMUM:
continue
ball = group[group.teamSituation == 'football'].head(1).squeeze()
p_att = group[group.teamSituation == 'attacking'].head(1).squeeze()
p_def = group[group.teamSituation == 'defending'].head(1).squeeze()
group_row = group.head(1).squeeze()
group_events = group.event.unique().tolist()
dict_to_append = {
'time': group_row.time,
'att_def_d': calculate_distance(p_att.x, p_att.y, p_def.x, p_def.y),
'att_ball_d': calculate_distance(p_att.x, p_att.y, ball.x, ball.y),
'def_ball_d': calculate_distance(p_def.x, p_def.y, ball.x, ball.y),
'att_s': p_att.s, 'def_s': p_def.s, 'ball_s': ball.s,
'att_a': p_att.a, 'def_a': p_def.a, 'ball_a': ball.a,
'att_o': p_att.o, 'def_o': p_def.o,
'att_dir': p_att.dir, 'def_dir': p_def.dir,
'event': group_row.event,
'pass_arrived': check_group_event(group_events, 'pass_arrived'),
'pass_outcome_caught': check_group_event(group_events, 'pass_outcome_caught'),
'tackle': check_group_event(group_events, 'tackle'),
'first_contact': check_group_event(group_events, 'first_contact'),
'pass_outcome_incomplete': check_group_event(group_events, 'pass_outcome_incomplete'),
'out_of_bounds': check_group_event(group_events, 'out_of_bounds'),
'week': week_num,
'gameId': group_row.gameId,
'playId': group_row.playId,
'frameId': group_row.frameId,
'isDefensivePI': group_row.isDefensivePI
}
calc_df = calc_df.append(
dict_to_append,
ignore_index=True
)
print("Saving data...")
calc_df.to_csv(
data_v3.get_step1_end_path(week_num),
index=False
)
print(f'End time: {datetime.now().strftime("%H:%M:%S")}')
| 2.515625 | 3 |
annotate-preprocessed.py | Rajpratik71/devel-scripts | 0 | 3645 | #!/usr/bin/python
"""Annotates -E preprocessed source input with line numbers.
Read std input, then annotate each line with line number based on previous
expanded line directives from -E output. Useful in the context of compiler
debugging.
"""
import getopt
import os
import re
import sys
import script_utils as u
flag_reverse = True
def usage(msgarg):
"""Print usage and exit."""
if msgarg:
sys.stderr.write("error: %s\n" % msgarg)
print """\
usage: %s [options] < input > output
options:
-d increase debug msg verbosity level
""" % os.path.basename(sys.argv[0])
sys.exit(1)
def parse_args():
"""Command line argument parsing."""
global flag_reverse
try:
optlist, _ = getopt.getopt(sys.argv[1:], "dr")
except getopt.GetoptError as err:
# unrecognized option
usage(str(err))
for opt, _ in optlist:
if opt == "-d":
u.increment_verbosity()
elif opt == "-r":
flag_reverse = False
# Setup
u.setdeflanglocale()
parse_args()
# Read
lines = sys.stdin.readlines()
lnum = -1
matcher = re.compile(r"^\#\s+(\d+)\s+\"(\S+)\".*$")
for line in lines:
m = matcher.match(line)
if m:
lnum = int(m.group(1))
afile = m.group(2)
print "<%s:%d>" % (afile, lnum)
continue
print "%d:%s" % (lnum, line.strip())
lnum += 1
| 2.984375 | 3 |
pages/lstm.py | tekeburak/dam-occupancy-model | 8 | 3646 | import streamlit as st
import tensorflow as tf
import numpy
from utils.get_owm_data import get_open_weather_map_data
from utils.get_date import get_date_list_for_gmt
import plotly.graph_objects as go
from plotly import tools
import plotly.offline as py
import plotly.express as px
def app():
st.title("LSTM Model")
st.subheader('What does LSTM model do?')
st.markdown("""<p style='text-align: justify;'>LSTM networks are an extension of recurrent neural networks (RNNs) mainly introduced to handle situations where RNNs fail. It has been so designed that thevanishing gradient problem is almost completely removed, while the training model is left unaltered. Long-time lags in certain problems are bridged using LSTMs where they also handle noise, distributed representations, and continuous values.</p>""", unsafe_allow_html=True)
st.subheader('Why we chose LSTM?')
st.markdown("""<p style='text-align: justify;'>LSTM is well-suited to classify, process and predict time series given time lags of unknown duration. Relative insensitivity to gap length gives an advantage to LSTM over alternative RNNs, hidden Markov models and other sequence learningmethods. In addition, LSTM works great because LSTM cells have a memory that can store previous timestep information and this is how it learns.</p>""", unsafe_allow_html=True)
st.subheader('LSTM model input and output')
st.markdown("Model input is 7 days daily weather data from [OpenWeatherAPI](https://openweathermap.org/api). Model input features are *Rain*, *MaxTemp*, *MinTemp*, *AvgWind*, *AvgHumidity* and *AvgPressure*. Model predicts 7 days dam occupancy rate of İstanbul using these features.", unsafe_allow_html=True)
LSTM_model_name = 'models/LSTM_model.h5'
model_lstm = tf.keras.models.load_model(LSTM_model_name)
features = get_open_weather_map_data()
prediction_lstm = model_lstm.predict(features) * 100
prediction_lstm = prediction_lstm.ravel()
date_list = get_date_list_for_gmt()
data = []
layout = go.Layout(
title= "<b>LSTM Dam Occupancy Forecasting Plot</b>",paper_bgcolor = 'rgb(248, 248, 255)',plot_bgcolor = 'rgb(248, 248, 255)',barmode = "stack",
xaxis = dict(title="Time", linecolor="#BCCCDC",showspikes=True,spikethickness=2,spikedash="dot",spikecolor= "#ffffff",spikemode="across",),
yaxis= dict(title="Dam Occupancy Rate (%)",linecolor="#021C1E"))
line_chart= go.Scatter(x=date_list, y=prediction_lstm, marker_color='rgb(0, 200, 200)' )
data.append(line_chart)
fig= go.Figure(data=data, layout=layout)
st.plotly_chart(fig)
| 3.15625 | 3 |
fst_web/demo_settings.py | kamidev/autobuild_fst | 0 | 3647 | # -*- coding: utf-8 -*-
import os
ROOT = os.path.abspath(os.path.dirname(__file__))
path = lambda *args: os.path.join(ROOT, *args)
""" Template for local settings of the FST webservice (fst_web)
Please edit this file and replace all generic values with values suitable to
your particular installation.
"""
# NOTE! Always set this to False before deploying
DEBUG = True
# NOTE! Before deploying on a public, uncomment ALLOWED_HOSTS
# and add IP address and/or domain of your site
ALLOWED_HOSTS = ['localhost', '127.0.0.1', 'fst.magokoro.nu']
# Look for instance-specific settings
try:
from .instance_settings import *
except ImportError:
from .default_instance_settings import *
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': path('database/fst_demo.db')
}
}
LOG_LEVEL = "DEBUG"
# Enable this to override global DB Debug setting
# DB_DEBUG_LEVEL = "DEBUG"
# Setup mail server for sending email notifications.
# You can use any mail server you want.
# But a very simple way to get started is to use a gmail account.
EMAIL_USE_TLS = True
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_PORT = 587
# EMAIL_HOST_USER = 'your email'
# EMAIL_HOST_PASSWORD = '<PASSWORD>'
# Admins specified here receive email notifications on critical errors.
ADMINS = ()
MANAGERS = ADMINS
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = os.path.join("/dokument/")
# Site and port for hosting FST service (do not add ending '/').
FST_SITE_URL = "http://127.0.0.1:8000"
# TODO - Check if FST_INSTANCE_PREFIX can be removed
# Site and port of specific FST instance (do not add ending '/').
FST_INSTANCE_URL = os.path.join(
"http://127.0.0.1:8000",
FST_INSTANCE_PREFIX)
| 1.75 | 2 |
BookingScraper-joao_v2/BookingScraper/airbnb.py | joaocamargo/estudos-python | 1 | 3648 | <gh_stars>1-10
#! /usr/bin/env python3.6
import argparse
import argcomplete
from argcomplete.completers import ChoicesCompleter
from argcomplete.completers import EnvironCompleter
import requests
from bthread import BookingThread
from bs4 import BeautifulSoup
from file_writer import FileWriter
hotels = []
def get_countries():
with open("europa2020.txt", "r") as f:
countries = f.read().splitlines()
return countries
def get_booking_page(session, offset, rooms, country, dest_id, DayIni, DayFim):
print('get_booking_page(session, offset, rooms, country, dest_id, DayIni, DayFim):')
print(session, offset, rooms, country, dest_id, DayIni, DayFim)
diaInicial = str(int(DayIni[0:2]))
mesInicial = str(int(DayIni[3:5]))
anoInicial = str(int(DayIni[6:10]))
diaFinal = str(int(DayFim[0:2]))
mesFinal = str(int(DayFim[3:5]))
anoFinal = str(int(DayFim[6:10]))
'''
Make request to airbnb page and parse html
:param offset:
:return: html page
'''
url = 'https://www.airbnb.com.br/s/Londres/'\
'homes?refinement_paths%5B%5D=%2Fhomes¤t_tab_id=home_tab&selected_tab_id=home_tab&source=mc_search_bar&search_type=unknown'\
'&click_referer=t%3ASEE_ALL%7Csid%3A874f16ee-6196-4289-9717-17dec73e1e5c%7Cst%3AMAGAZINE_HOMES&screen_size=large&hide_dates_and_guests_filters=false'\
'&ne_lat=51.80546533345978&ne_lng=0.4969575708007312&sw_lat=51.17528882051496&sw_lng=-0.8200285131836154&zoom=10&search_by_map=false&checkin={anoInicial}-{mesInicial}-{diaInicial}'\
'&checkout={anoFinal}-{mesFinal}-{diaFinal}&adults={rooms}&property_type_id%5B%5D=1&property_type_id%5B%5D=43&property_type_id%5B%5D=47'\
'&place_id=ChIJdd4hrwug2EcRmSrV3Vo6llI&room_types%5B%5D=Entire%20home%2Fapt'\
'§ion_offset=6&items_offset=18'.format(rooms=rooms, country=country.replace(' ', '+'),anoFinal=anoFinal,mesFinal=mesFinal,diaInicial=diaInicial,mesInicial=mesInicial,anoInicial=anoInicial,diaFinal=diaFinal,dest_id=dest_id) + str(offset)
r = requests.get(url, headers=
{'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:47.0)'
' Gecko/20100101 Firefox/48.0'})
html = r.content
print(url)
parsed_html = BeautifulSoup(html, 'lxml')
return parsed_html
def process_hotels(session, offset, rooms, country, dest_id, DayIni, DayFim):
parsed_html = get_booking_page(session, offset, rooms, country, dest_id,DayIni, DayFim)
hotel = parsed_html.find_all('div', {'class': 'sr_item'})
for ho in hotel:
#print("ho.find('a', {'class': 'jq_tooltip'})")
#print(ho.find('a', {'class': 'jq_tooltip'}))
#name = ho.find('a', {'class': 'jq_tooltip'})['data-title']
print("ho.find('span', {'class': 'sr-hotel__name'})")
#print(ho.find('span', {'class': 'sr-hotel__name'}))
if ho.find('span', {'class': 'sr-hotel__name'}) is not None:
name = str(ho.find('span', {'class': 'sr-hotel__name'}).text.encode('utf-8')).replace('\\n','').replace("b","").replace("'","").replace('\\','')
else:
name = '-1'
if ho.find('div', {'class': 'bui-price-display__value prco-inline-block-maker-helper'}) is not None:
price = ho.find('div', {'class': 'bui-price-display__value prco-inline-block-maker-helper'}).text.replace('\n','').replace("b","").replace("'","")
else:
price = '-1'
if ho.find('span', {'class': '_ky9opu0'}) is not None:
nota = str(ho.find('span', {'class': '_ky9opu0'}).text.replace('\n','').replace("b","").replace("'",""))
else :
nota = '-1'
if ho.find('span', {'title': 'This is the straight-line distance on the map. Actual travel distance may vary.'}) is not None:
distance = str(ho.find('span', {'title': 'This is the straight-line distance on the map. Actual travel distance may vary.'}).text.encode('utf-8')).replace('\\n','').replace("b","").replace("'","").replace('\\','')
else :
distance = '-1'
# if ho.find('a', {'class': 'bui-link'}) is not None :
# result = [str(item) for item in ho.find_all('span', attrs={'data-bui-component' : 'Tooltip'})]
# print('TAMANHO TOOLTIP', str(len(result)))
# for i in result:
# print(i)
# for i in result:
# if i in 'km':
# distance = str(i)
# else:
# distance = '----'
# else:
# distance = '----'
# if len(result) ==1:
# if result[0] in 'km':
# distance = result
# else:
# distance = 'aaaaa' + str(len(result))
# else:
# distance = '---'
hotels.append(DayIni+';'+DayFim+';'+name + ';' + price + ';' + nota + ';' + distance)
#hotels.append(str(len(hotels) + 1) + ' : ' + name + ' : ' + price)
def prep_data(rooms=1, country='Macedonia', dest_id='-1', DayIni='01/01/2019', DayFim='02/01/2019', out_format=None):
'''
Prepare data for saving
:return: hotels: set()
'''
offset = 1
session = requests.Session()
parsed_html = get_booking_page(session, offset, rooms, country, dest_id, DayIni,DayFim)
all_offset = parsed_html.find_all('li', {'class':
'sr_pagination_item'})[-1].get_text().splitlines()[-1]
threads = []
for i in range(int(all_offset)):
offset += 1
t = BookingThread(session, offset, rooms, country,dest_id,DayIni, DayFim, process_hotels)
threads.append(t)
for t in threads:
t.start()
for t in threads:
t.join()
hotels2 = hotels
return hotels2
def get_data(rooms=1, country='Macedonia', dest_id='-1',DayIni='01/01/2019',DayFim='02/01/2019', out_format=None):
'''
Get all accomodations in Macedonia and save them in file
:return: hotels-in-macedonia.{txt/csv/xlsx} file
'''
print('Procurando por',country)
hotels_list = prep_data(rooms, country,dest_id, DayIni, DayFim, out_format)
save_data(hotels_list , out_format=out_format, country=country)
def save_data(data, out_format, country):
'''
Saves hotels list in file
:param data: hotels list
:param out_format: json, csv or excel
:return:
'''
writer = FileWriter(data, out_format, country)
file = writer.output_file()
print('All accommodations are saved.')
print('You can find them in', file, 'file')
if __name__ == "__main__":
parser = argparse.ArgumentParser()
countries = get_countries()
parser.add_argument("--rooms",
help='Add the number of rooms to the booking request.',
default=1,
type=int,
nargs='?')
parser.add_argument("--country",
help='Add the country to the booking request.',
default='Macedonia',
nargs='?').completer = ChoicesCompleter(countries)
parser.add_argument("--dest_id",
help='Add the country to the booking request.',
default='0',
nargs='?')
parser.add_argument("--DayIni",
help='Data inicial',
default='01/01/2019',
nargs='?')
parser.add_argument("--DayFim",
help='Data inicial',
default='02/01/2019',
nargs='?')
parser.add_argument("--out_format",
help='Add the format for the output file. Add excel, json or csv.',
default='json',
choices=['json', 'excel', 'csv'],
nargs='?').completer = EnvironCompleter
argcomplete.autocomplete(parser)
args = parser.parse_args()
localidades = [{
'Pais': 'London',
'dest_id': '-2601889'
}, {
'Pais': 'Utrecht',
'dest_id': '-2154382'
}, {
'Pais': 'Buzios',
'dest_id': '-626254'
}, {
'Pais': '',
'dest_id': ''
}]
countryAux = [d['Pais'] for d in localidades if args.dest_id in d['dest_id']]
if len(countryAux)>0:
country = countryAux[0]
print('Parametros')
print(args.rooms, country,args.dest_id,args.DayIni,args.DayFim, args.out_format)
get_data(args.rooms, country,args.dest_id,args.DayIni,args.DayFim, args.out_format)
else:
country = 'Nao Identificado'
locais = [d['Pais'] + ':' + d['dest_id'] for d in localidades if d['Pais'] != '']
print('----------')
print('Utilize uma das seguintes localizações')
for i in locais:
print(i)
print('----------')
| 2.53125 | 3 |
main.py | valurhrafn/chromium-sync | 4 | 3649 | #!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from google.appengine.api import users
import webapp2
# For datastore
import cgi
import urllib
from google.appengine.ext import ndb
class UserId(ndb.Model):
content = ndb.StringProperty()
date = ndb.DateTimeProperty(auto_now_add=True)
@classmethod
def query_user(cls, ancestor_key):
return cls.query(ancestor=ancestor_key).order(-cls.date)
# ************** MainHandler ************* #
class MainHandler(webapp2.RequestHandler):
def get(self):
self.response.write('Hello world!')
# ************** GetUser ************* #
class GetUser(webapp2.RequestHandler):
def get(self):
self.response.out.write('<html><body>')
client_id = self.request.get('client_id')
ancestor_key = ndb.Key("ID", client_id or "*no_id*")
userids = UserId.query_user(ancestor_key).fetch(20)
self.response.out.write('her er eitthvad')
for userid in userids:
self.response.out.write('<blockquote>%s</blockquote>' %
cgi.escape(userid.content))
# Checks for active Google account session
# user = users.get_current_user()
# if user:
# self.response.headers['Content-Type'] = 'text/plain'
# self.response.write('Hello, ' + user.nickname())
# else:
# self.redirect(users.create_login_url(self.request.uri))
self.response.out.write('</body></html>')
def post(self):
pass
# ************** HasData ************* #
class HasData(webapp2.RequestHandler):
def get(self):
pass
#TODO does user have data
class PostData(webapp2.RequestHandler):
def post(self):
client_id = self.request.get('client_id')
chrome_user = UserId(parent=ndb.Key("ID", client_id or "*no_id*"),
content = self.request.get('client_id'))
chrome_user.put()
#TODO recieve data from client
class GetSyncData(object):
"""docstring for GetSyncData"""
def __init__(self, arg):
super(GetSyncData, self).__init__()
self.arg = arg
#implement get data for user
# property user.email() or user.user_id()
app = webapp2.WSGIApplication([
('/', MainHandler),
('/GetUser/', GetUser),
('/HasData/', HasData),
('/chrome-sync/command/', PostData),
('/GetSyncData/', GetSyncData)
], debug=True)
| 2.34375 | 2 |
comet/service/subscriber.py | dneise/Comet | 15 | 3650 | # Comet VOEvent Broker.
from twisted.application.internet import ClientService
from comet.protocol.subscriber import VOEventSubscriberFactory
__all__ = ["makeSubscriberService"]
def makeSubscriberService(endpoint, local_ivo, validators, handlers, filters):
"""Create a reconnecting VOEvent subscriber service.
Parameters
----------
endpoint : implements `twisted.internet.interfaces.IStreamClientEndpoint`
The endpoint to which the service will connect.
local_ivo : `str` or `None`
IVOA identifier for the subscriber.
validators : `list` of implementers of `~comet.icomet.IValidator`.
Validators which will be applied to incoming events. Events which fail
validation will be rejected.
handlers : `list` of implementers of `~comet.icomet.IHandler`.
Handlers to which events which pass validation will be passed.
filters : `list` of `str`
XPath filters. Will be passed to upstream as a request to filter the
alerts being sent.
Notes
-----
Upstream brokes may not provide support for XPath filtering; in this case,
the filters suppplied will be ignored.
Reconnection is handled according to the default policies of
`twisted.application.internet.ClientService`.
"""
factory = VOEventSubscriberFactory(local_ivo, validators, handlers, filters)
service = ClientService(endpoint, factory)
return service
| 2.265625 | 2 |
scripts/master/cros_try_job_git.py | bopopescu/build | 0 | 3651 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import base64
import json
import os
import re
import shutil
import zlib
from StringIO import StringIO
try:
# Create a block to work around evil sys.modules manipulation in
# email/__init__.py that triggers pylint false positives.
# pylint: disable=E0611,F0401
from email.Message import Message
from email.Utils import formatdate
except ImportError:
raise
from buildbot.process.properties import Properties
from buildbot.schedulers.trysched import TryBase
from twisted.internet import defer, reactor, utils
from twisted.mail.smtp import SMTPSenderFactory
from twisted.python import log
from common.twisted_util.response import StringResponse
from master import gitiles_poller
from master.try_job_base import BadJobfile
class CbuildbotConfigs(object):
# Valid 'etc' builder targets. Specifically, this ensures:
# - The build name doesn't begin with a flag ('--')
# - The build name doesn't contain spaces (to spill into extra args).
_ETC_TARGET_RE = re.compile(r'^[a-zA-Z][\w-]+\w$')
def __init__(self, configs, etc_builder=None):
"""Holds base state of the master's try job related configuration.
configs (dict): A dictionary of all known CrOS configs. This will be as
up-to-date as the Chromite pin.
etc_builder (str): If not None, the name of the etc builder.
"""
self.configs = configs
self.etc_builder = etc_builder
def AddBuildBucketHooks(self, c):
"""Build mutation hook called via BuildBucket when scheduling builds.
The cbuildbot config is specified in the `cbb_config` property. The
callback transforms that property to an actual waterfall builder name by
mapping it based on its config.
If an 'etc' builder is configured and the config name is unknown, it will be
mapped to the 'etc' builder if possible.
A tryserver BuildBucket build takes the form:
- Empty `builder_name` parameter. If one is supplied, it will be ignored.
- BuildBot changes can be added by including one or more BuildBucket
`changes` parameters: [{'author': {'email': '<EMAIL>'}}].
- `cbb_config` property must be set to the build's cbuildbot config target.
- `extra_args` property (optional) may be a JSON list of additional
parameters to pass to the tryjob.
- `slaves_request` property (optional) may be a JSON list of slaves on which
this build may run.
- Additional BuildBot properties may be added.
NOTE: Internally, all of these parameters are converted to BuildBot
properties and referenced as such in other areas of code. The Git poller
also constructs the same property set, so code paths converge.
"""
def params_hook(params, _build):
# Map `cbb_config` to a builder name.
properties = params.get('properties', {})
config_name = properties.get('cbb_config')
if not config_name:
raise ValueError('Missing required `cbb_config` property.')
params['builder_name'] = self.GetBuilderForConfig(config_name)
# Validate other fields.
if not isinstance(properties.get('extra_args', []), list):
raise ValueError('`extra_args` property is not a list.')
if not isinstance(properties.get('slaves_request', []), list):
raise ValueError('`slaves_request` is not a list.')
# Add mandatory properties to build.
params['properties'] = properties
c['buildbucket_params_hook'] = params_hook
def GetBuilderForConfig(self, config_name):
config = self.configs.get(config_name)
if config:
return config['_template'] or config_name
self.ValidateEtcBuild(config_name)
return self.etc_builder
def ValidateEtcBuild(self, config_name):
"""Tests whether a specified build config_name is candidate for etc build.
Raises a ValueError if an etc build cannot be dispatched.
"""
if not self.etc_builder:
raise ValueError('etc builder is not configured.')
if not config_name:
raise ValueError('Empty config name')
if not self._ETC_TARGET_RE.match(config_name):
raise ValueError('invalid etc config name (%s).' % (config_name,))
def translate_v1_to_v2(parsed_job):
"""Translate tryjob desc from V1 to V2."""
parsed_job.setdefault('extra_args', []).append('--remote-trybot')
parsed_job['version'] = 2
def translate_v2_to_v3(parsed_job):
"""Translate tryjob desc from V2 to V3."""
# V3 --remote-patches format is not backwards compatible.
if any(a.startswith('--remote-patches')
for a in parsed_job.get('extra_args', ())):
raise BadJobfile('Cannot translate --remote-patches from tryjob v.2 to '
'v.3. Please run repo sync.')
parsed_job['version'] = 3
class CrOSTryJobGit(TryBase):
"""Poll a Git server to grab patches to try."""
# Name of property source for generated properties.
_PROPERTY_SOURCE = 'Try Job'
# The version of tryjob that the master is expecting.
_TRYJOB_FORMAT_VERSION = 3
# Functions that translate from one tryjob version to another.
_TRANSLATION_FUNCS = {
1 : translate_v1_to_v2,
2 : translate_v2_to_v3,
}
# Template path URL component to retrieve the Base64 contents of a file from
# Gitiles.
_GITILES_PATH_TMPL = '%(repo)s/+/%(revision)s/%(path)s?format=text'
@classmethod
def updateJobDesc(cls, parsed_job):
"""Ensure job description is in the format we expect."""
while parsed_job['version'] < cls._TRYJOB_FORMAT_VERSION:
prev_ver = parsed_job['version']
translation_func = cls._TRANSLATION_FUNCS[parsed_job['version']]
translation_func(parsed_job)
if parsed_job['version'] <= prev_ver:
raise AssertionError('translation function %s not incrementing version!'
% str(translation_func))
def __init__(self, name, pollers, smtp_host, from_addr, reply_to,
email_footer, cbuildbot_configs, properties=None):
"""Initialize the class.
Arguments:
name: See TryBase.__init__().
pollers: A list of job repo git pit pollers.
smtp_host: The smtp host for sending out error emails.
from_addr: The email address to display as being sent from.
reply_to: The email address to put in the 'Reply-To' email header field.
email_footer: The footer to append to any emails sent out.
cbuildbot_configs: (CbuildbotConfigs) A configuration set instance. Any
'bot' request outside of this list will go to an 'etc' builder, if
available.
properties: See TryBase.__init__()
"""
TryBase.__init__(self, name, [], properties or {})
self.pollers = pollers
self.smtp_host = smtp_host
self.from_addr = from_addr
self.reply_to = reply_to
self.email_footer = email_footer
self.cbb = cbuildbot_configs
def startService(self):
TryBase.startService(self)
self.startConsumingChanges()
@staticmethod
def load_job(data):
try:
return json.loads(data)
except ValueError as e:
raise BadJobfile("Failed to parse job JSON: %s" % (e.message,))
def validate_job(self, parsed_job):
# A list of field description tuples of the format:
# (name, type, required).
fields = [('name', basestring, True),
('user', basestring, True),
('email', list, True),
('bot', list, True),
('extra_args', list, False),
('version', int, True),
('slaves_request', list, False),
]
error_msgs = []
for name, f_type, required in fields:
val = parsed_job.get(name)
if val is None:
if required:
error_msgs.append('Option %s missing!' % name)
elif not isinstance(val, f_type):
error_msgs.append('Option %s of wrong type!' % name)
# If we're an 'etc' job, we must have bots defined to execute.
for bot in parsed_job['bot']:
if bot in self.cbb.configs:
continue
# Assert that this is a valid 'etc' build.
try:
self.cbb.ValidateEtcBuild(bot)
except ValueError as e:
error_msgs.append("Invalid 'etc' build (%s): %s" % (bot, e.message))
if error_msgs:
raise BadJobfile('\n'.join(error_msgs))
def get_props(self, config, options):
"""Overriding base class method."""
props = Properties()
props.setProperty('slaves_request', options.get('slaves_request', []),
self._PROPERTY_SOURCE)
props.setProperty('cbb_config', config, self._PROPERTY_SOURCE)
extra_args = options.get('extra_args')
if extra_args:
# This field can be quite large, and exceed BuildBot property limits.
# Compress it, Base64 encode it, and prefix it with "z:" so the consumer
# knows its size.
extra_args = 'z:' + base64.b64encode(zlib.compress(json.dumps(
extra_args)))
props.setProperty('cbb_extra_args', extra_args,
self._PROPERTY_SOURCE)
return props
def create_buildset(self, ssid, parsed_job):
"""Overriding base class method."""
dlist = []
buildset_name = '%s:%s' % (parsed_job['user'], parsed_job['name'])
for bot in parsed_job['bot']:
builder_name = self.cbb.GetBuilderForConfig(bot)
log.msg("Creating '%s' try job(s) %s for %s" % (builder_name, ssid, bot))
dlist.append(self.addBuildsetForSourceStamp(ssid=ssid,
reason=buildset_name,
external_idstring=buildset_name,
builderNames=[builder_name],
properties=self.get_props(bot, parsed_job)))
return defer.DeferredList(dlist)
def send_validation_fail_email(self, name, emails, error):
"""Notify the user via email about the tryjob error."""
html_content = []
html_content.append('<html><body>')
body = """
Your tryjob with name '%(name)s' failed the validation step. This is most
likely because <br>you are running an older version of cbuildbot. Please run
<br><code>repo sync chromiumos/chromite</code> and try again. If you still
see<br>this message please contact <EMAIL>.<br>
"""
html_content.append(body % {'name': name})
html_content.append("Extra error information:")
html_content.append(error.replace('\n', '<br>\n'))
html_content.append(self.email_footer)
m = Message()
m.set_payload('<br><br>'.join(html_content), 'utf8')
m.set_type("text/html")
m['Date'] = formatdate(localtime=True)
m['Subject'] = 'Tryjob failed validation'
m['From'] = self.from_addr
m['Reply-To'] = self.reply_to
result = defer.Deferred()
sender_factory = SMTPSenderFactory(self.from_addr, emails,
StringIO(m.as_string()), result)
reactor.connectTCP(self.smtp_host, 25, sender_factory)
@defer.inlineCallbacks
def gotChange(self, change, important):
try:
yield self._gotChangeImpl(change, important)
except Exception as e:
log.msg('Exception in try job scheduler: %s' % (e,))
import traceback
traceback.print_exc()
@defer.inlineCallbacks
def _gotChangeImpl(self, change, _important):
"""Process the received data and send the queue buildset."""
# Find poller that this change came from.
for poller in self.pollers:
if not isinstance(poller, gitiles_poller.GitilesPoller):
continue
if poller.repo_url == change.repository:
break
else:
raise BadJobfile(
'Received tryjob from unsupported repository %s' % change.repository)
# pylint: disable=W0631
file_contents = yield self.loadGitilesChangeFile(poller, change)
parsed = {}
try:
parsed = self.load_job(file_contents)
self.validate_job(parsed)
self.updateJobDesc(parsed)
except BadJobfile as e:
self.send_validation_fail_email(parsed.setdefault('name', ''),
parsed['email'], str(e))
raise
# The sourcestamp/buildsets created will be merge-able.
ssid = yield self.master.db.sourcestamps.addSourceStamp(
branch=change.branch,
revision=change.revision,
project=change.project,
repository=change.repository,
changeids=[change.number])
yield self.create_buildset(ssid, parsed)
@defer.inlineCallbacks
def loadGitilesChangeFile(self, poller, change):
if len(change.files) != 1:
# We only accept changes with 1 diff file.
raise BadJobfile(
'Try job with too many files %s' % (','.join(change.files)))
# Load the contents of the modified file.
path = self._GITILES_PATH_TMPL % {
'repo': poller.repo_path,
'revision': change.revision,
'path': change.files[0],
}
contents_b64 = yield poller.agent.request('GET', path, retry=5,
protocol=StringResponse.Get)
defer.returnValue(base64.b64decode(contents_b64))
| 1.734375 | 2 |
Medium/515.py | Hellofafar/Leetcode | 6 | 3652 | # ------------------------------
# 515. Find Largest Value in Each Tree Row
#
# Description:
# You need to find the largest value in each row of a binary tree.
# Example:
# Input:
# 1
# / \
# 3 2
# / \ \
# 5 3 9
# Output: [1, 3, 9]
#
# Version: 1.0
# 12/22/18 by Jianfa
# ------------------------------
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def largestValues(self, root):
"""
:type root: TreeNode
:rtype: List[int]
"""
if not root:
return []
children = [root]
res = []
while children:
temp = [] # Node of next row
largest = -sys.maxsize # Largest number of this row
for i in range(len(children)):
node = children[i]
largest = max(node.val, largest)
if node.left:
temp.append(node.left)
if node.right:
temp.append(node.right)
res.append(largest)
children = temp
return res
# Used for testing
if __name__ == "__main__":
test = Solution()
# ------------------------------
# Summary:
# BFS solution. | 4.21875 | 4 |
opsmop/meta/docs/exparser.py | lachmanfrantisek/opsmop | 0 | 3653 | # Copyright 2018 <NAME> LLC, <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
class Example(object):
def __init__(self):
# things we'll figure out as we scan an example
self.name = ""
self.see_files = []
self.description = []
self.code = []
class Record(object):
def __init__(self):
# things which we'll figure out as we scan the example
self.name = ""
self.purpose = ""
self.provider_names = []
self.related_modules = []
self.category = ""
self.description = []
self.examples = []
self.current_example = Example()
self.phase = 'module'
self.count = 0
def set_phase(self, phase):
self.phase = phase
print("---------------------------------------------------------")
print("%s phase | %s" % (self.count, self.phase))
print("---------------------------------------------------------")
@classmethod
def from_file(cls, filename):
r = cls()
r.name = os.path.basename(filename).replace(".py","")
print("=========================================================")
print("%s M | %s" % ('0', r.name))
data = open(filename).read().splitlines()
for line in data:
if not r.handle_line(line):
break
return r
def load_command(self, line):
if "DESCRIPTION" in line or '----' in line or '====' in line:
pass
elif not ":" in line:
# commands must contain a colon unless they are blocks or DESCRIPTION starters
return (False, None, None)
if not line.startswith("#"):
# commands must be in comments
return (False, None, None)
if ":" in line:
tokens = line.split(":")
if tokens[0].upper() != tokens[0]:
# commands must be in all caps. This is done
# so we don't get confused by colons in URLs and so on.
print("REJECT: %s" % tokens[0])
return (False, None, None)
# at this point we are sure it is a command
if '#------------' in line.replace(" ",""):
return (True, 'start_block', None)
if '#============' in line.replace(" ",""):
return (True, 'end_block', None)
# throw away the leading comment
line = line.replace("#","",1).strip()
if line.startswith("DESCRIPTION"):
return (True, 'description', None)
tokens = line.split(':', 1)
command = tokens[0].replace("#","").strip().lower()
rest = tokens[1].strip()
return (True, command, rest)
def handle_line(self, line):
self.count = self.count + 1
(is_command, command, rest) = self.load_command(line)
print("%s line | %s" % (self.count, line))
#if command == 'policy':
# return False
if is_command:
#if command not in [ 'start_block', 'end_block' ]:
# print("keyword: %s => %s" % (command, rest))
self.handle_command(command, rest)
return True
#print("PHASE=%s" % self.phase)
#print("LINE=%s" % line)
if self.phase == 'module':
if not line.startswith("#") or line.replace("#","").strip():
raise Exception("the module phase should be all commands")
elif self.phase == 'description':
# module description lines must be comments
self.handle_module_description(line)
elif self.phase == 'example':
if not line.startswith("#") or line.replace("#","").strip():
raise Exception("the example phase should be all commands")
elif self.phase == 'example_description':
self.handle_example_description(self.current_example, line)
elif self.phase == 'example_code':
self.handle_example_code(self.current_example, line)
elif self.phase == 'limbo':
#print("ignoring line while in limbo: %s" % line)
pass
elif self.phase == 'done':
#print("ignoring line while done: %s" % line)
pass
else:
raise Exception("unknown phase: %s" % self.phase)
return True # continue
def handle_command(self, command, rest):
#print("<PHASE: %s, COMMAND: %s, REST: %s>" % (self.phase, command, rest))
if self.phase == 'done':
return False
if self.phase == 'module':
# from module mode the only state transition is into module_description mode
# when we find the description command
if command not in ['start_block', 'end_block']:
print("%s set | %-20s | %s" % (self.count, command, rest))
if command == 'module':
pass
elif command == 'start_block':
pass
elif command == 'category':
self.category = rest
elif command == 'purpose':
self.purpose = rest
elif command == 'related':
self.related_modules = [ x.strip() for x in rest.split(",") ]
elif command == 'providers':
self.providers = [ x.strip() for x in rest.split(",") ]
elif command == 'fyi':
pass
elif command == 'description':
print("---------------------------------------------------------")
self.set_phase('description')
elif command == 'end_block':
raise Exception("unexpected end block without description")
else:
raise Exception("unknown command: %s" % command)
elif self.phase == 'description':
# in description phase end block moves us into limbo until we find
# another example start block
if command == 'end_block':
self.set_phase('limbo')
else:
raise Exception("invalid command: %s" % command)
elif self.phase == 'limbo':
# in limbo, seeing a start block moves us into example phase
if command == 'start_block':
self.set_phase('example')
else:
raise Exception("invalid command: %s" % command)
elif self.phase == 'example':
# in example phase we can only move into example description phase
# by hitting the description command
if command == 'example':
print("---------------------------------------------------------")
print("%s exmp | %s" % (self.count, rest))
print("---------------------------------------------------------")
self.current_example.name = rest
elif command == 'setup':
self.set_phase('done')
elif command == 'description':
print("MOV!")
self.set_phase('example_description')
elif command == 'see_files' or command == 'see_file':
self.current_example.see_files = [ x.strip() for x in rest.split(",")]
else:
raise Exception("unknown command: %s" % command)
elif self.phase == 'example_description':
# in example description phase we can only move into example code phase
# by hitting an end block
if command == 'end_block':
print("-------")
self.set_phase('example_code')
else:
raise Exception("unknown command: %s" % command)
elif self.phase == 'example_code':
# in example code phase we can only move back into example phase by
# hitting a start block
if command == 'start_block':
self.examples.append(self.current_example)
self.current_example = Example()
self.set_phase('example')
else:
raise Exception("unknown command: %s" % command)
elif self.phase == 'done':
return False
else:
raise Exception("unknown phase: %s" % self.phase)
def handle_example_description(self, example, line):
# could be a comment or the code example, we want to keep both
if line.startswith("#"):
line = line.replace("#","")
line = line.strip()
print("%s desc | %s" % (self.count, line))
example.description.append(line)
def handle_example_code(self, example, line):
line = line.rstrip()
example.code.append(line)
print("%s code | %s" % (self.count, line))
def handle_module_description(self, line):
if line.startswith("#"):
line = line.replace("#","")
line = line.strip()
if line:
print("%s mdesc | %s" % (self.count, line))
self.description.append(line)
| 2.875 | 3 |
pylox/TokenType.py | sheunl/Compiler_Tests | 0 | 3654 | from enum import Enum
class T(Enum):
#single character Tokens
LEFT_PAREN =1
RIGHT_PAREN =2
LEFT_BRACE = 3
RIGHT_BRACE = 4
COMMA = 5
DOT = 6
MINUS = 7
PLUS = 8
SEMICOLON = 9
SLASH = 10
STAR = 11
#one or two character tokens
BANG = 12
BANG_EQUAL = 13
EQUAL = 14
EQUAL_EQUAL = 15
GREATER = 16
GREATER_EQUAL = 17
LESS = 18
LESS_EQUAL = 19
#Literals
IDENTIFIER = 20
STRING = 21
NUMBER = 22
#keywords
AND = 23
CLASS = 24
ELSE = 25
FALSE = 26
FUN = 27
FOR = 28
IF = 29
NIL =30
OR =31
PRINT =32
RETURN = 33
SUPER = 34
THIS = 35
TRUE = 36
VAR = 37
WHILE = 38
EOF= 39 | 3.21875 | 3 |
src/oslibs/cocos/cocos-src/tools/cocos2d-console/plugins/framework/framework_add.py | dios-game/dios-cocos | 1 | 3655 | <reponame>dios-game/dios-cocos<gh_stars>1-10
import cocos
from MultiLanguage import MultiLanguage
from package.helper import ProjectHelper
class FrameworkAdd(cocos.CCPlugin):
@staticmethod
def plugin_name():
return "add-framework"
@staticmethod
def brief_description():
return MultiLanguage.get_string('FRAMEWORK_ADD_BRIEF')
# parse arguments
def parse_args(self, argv):
from argparse import ArgumentParser
parser = ArgumentParser(prog="cocos %s" % self.__class__.plugin_name(),
description=self.__class__.brief_description())
parser.add_argument("name", metavar="NAME", help=MultiLanguage.get_string('FRAMEWORK_ADD_ARG_NAME'))
return parser.parse_args(argv)
def run(self, argv):
args = self.parse_args(argv)
name = args.name
project = ProjectHelper.get_current_project()
ProjectHelper.add_framework(project, name)
| 2.40625 | 2 |
src/utils.py | f-grimaldi/explain_ML | 1 | 3656 | from matplotlib import colors
import numpy as np
class SaveOutput:
def __init__(self):
self.outputs = []
def __call__(self, module, module_in, module_out):
self.outputs.append(module_out)
def clear(self):
self.outputs = []
class MidpointNormalize(colors.Normalize):
def __init__(self, vmin=None, vmax=None, vcenter=None, clip=False):
self.vcenter = vcenter
colors.Normalize.__init__(self, vmin, vmax, clip)
def __call__(self, value, clip=None):
# I'm ignoring masked values and all kinds of edge cases to make a
# simple example...
x, y = [self.vmin, self.vcenter, self.vmax], [self.vmin, self.vcenter, self.vmax]
return np.ma.masked_array(np.interp(value, x, y))
| 2.703125 | 3 |
lib/two/mongomgr.py | erkyrath/tworld | 38 | 3657 | <reponame>erkyrath/tworld
"""
Manage the connection to the MongoDB server.
"""
import tornado.gen
import tornado.ioloop
import motor
class MongoMgr(object):
def __init__(self, app):
# Keep a link to the owning application.
self.app = app
self.log = self.app.log
# This will be the Motor (MongoDB) connection. We'll open it in the
# first monitor_mongo_status call.
self.mongo = None
self.mongoavailable = False # true if self.mongo exists and is open
self.mongotimerbusy = False # true while monitor_mongo_status runs
# We also manage self.app.mongodb, a MotorDatabase. This must be
# non-None exactly when mongoavailable is true.
def init_timers(self):
ioloop = tornado.ioloop.IOLoop.instance()
# The mongo status monitor. We set up one call immediately, and then
# try again every three seconds.
ioloop.add_callback(self.monitor_mongo_status)
res = tornado.ioloop.PeriodicCallback(self.monitor_mongo_status, 3000)
res.start()
def close(self):
"""Close the connection to mongodb. (The monitor will start it
right back up again, or try to.)
"""
if self.mongo:
try:
self.mongo.disconnect()
except Exception as ex:
self.log.error('Problem disconnecting mongo: %s', ex)
self.mongo = None
self.app.mongodb = None
@tornado.gen.coroutine
def monitor_mongo_status(self):
if (self.mongotimerbusy):
self.log.warning('monitor_mongo_status: already in flight; did a previous call jam?')
return
if (self.app.shuttingdown):
self.log.warning('monitor_mongo_status: server is shutting down, never mind')
return
self.mongotimerbusy = True
if (self.mongoavailable):
try:
res = yield motor.Op(self.mongo.admin.command, 'ping')
if (not res):
self.log.error('Mongo client not alive')
self.mongoavailable = False
except Exception as ex:
self.log.error('Mongo client not alive: %s', ex)
self.mongoavailable = False
if (not self.mongoavailable):
self.close()
if (not self.mongoavailable):
try:
self.mongo = motor.MotorClient(tz_aware=True)
res = yield motor.Op(self.mongo.open)
### maybe authenticate to a database?
self.mongoavailable = True
self.app.mongodb = self.mongo[self.app.opts.mongo_database]
self.log.info('Mongo client open')
self.app.queue_command({'cmd':'dbconnected'})
except Exception as ex:
self.mongoavailable = False
self.app.mongodb = None
self.log.error('Mongo client not open: %s', ex)
self.mongotimerbusy = False
| 2.625 | 3 |
code/examples/example_binomial_and_log_normal_abtest.py | hugopibernat/BayesianABTestAnalysis | 0 | 3658 | #################################################
####### Author: <NAME> #######
####### Contact: <EMAIL> #######
####### Date: April 2014 #######
#################################################
from bayesianABTest import sampleSuccessRateForBinomial, sampleMeanForLogNormal, probabilityOfABetterThanB
from numpy.random import lognormal
from numpy import mean, concatenate, zeros
# Generate Log-Normal data
A_actuals = lognormal(mean=4.10, sigma=1.0, size=100)
B_actuals = lognormal(mean=4.00, sigma=1.0, size=100)
# Plus some zeros
A_data = concatenate([A_actuals,zeros(10000)])
B_data = concatenate([B_actuals,zeros(10000)])
# Modeling conversions with a binomial variable
A_purchases = sum(A_data > 0)
A_sessions = len(A_data)
B_purchases = sum(B_data > 0)
B_sessions = len(B_data)
A_CR = sampleSuccessRateForBinomial(A_sessions,A_purchases)
B_CR = sampleSuccessRateForBinomial(B_sessions,B_purchases)
# Modeling the spend with a log-normal
A_non_zero_data = A_data[A_data > 0]
B_non_zero_data = B_data[B_data > 0]
A_spend = sampleMeanForLogNormal(A_non_zero_data)
B_spend = sampleMeanForLogNormal(B_non_zero_data)
# Combining the two
A_rps = A_CR*A_spend
B_rps = B_CR*B_spend
# Result:
print probabilityOfABetterThanB(A_rps,B_rps) | 2.21875 | 2 |
tests/models/test_documents.py | airslate-oss/python-airslate | 3 | 3659 | <filename>tests/models/test_documents.py
# This file is part of the airslate.
#
# Copyright (c) 2021 airSlate, Inc.
#
# For the full copyright and license information, please view
# the LICENSE file that was distributed with this source code.
from airslate.models.documents import UpdateFields
from airslate.entities.fields import Field
def test_empty_update_fields__to_dict():
model = UpdateFields()
assert model.to_dict() == {'data': []}
def test_update_fields__to_dict():
model = UpdateFields(data=[Field('123'), Field('abc')])
assert model.to_dict() == {'data': [
{'id': '123', 'type': 'dictionary'},
{'id': 'abc', 'type': 'dictionary'}
]}
| 2.09375 | 2 |
sim/dynamicobject.py | rseed42/labyrinth | 0 | 3660 | class DynamicObject(object):
def __init__(self, name, id_):
self.name = name
self.id = id_
| 3.0625 | 3 |
app/main.py | meysam81/sheypoor | 0 | 3661 | <reponame>meysam81/sheypoor
from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
from app import api
from app.core.config import config
app = FastAPI(title="Sheypoor")
# Set all CORS enabled origins
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
app.include_router(api.router, prefix=config.API_URI)
| 1.765625 | 2 |
cdnu/ccds.py | Indy2222/mbg-codon-usage | 0 | 3662 | <filename>cdnu/ccds.py
from typing import List, NamedTuple
CCDS_FILE = 'CCDS.current.txt'
CHROMOSOMES = ('1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12',
'13', '14', '15', '16', '17', '18', '19', '20', '21', '22',
'X', 'Y')
class CdsPos(NamedTuple):
ccds_id: str
indexes: list
"""2-tuples with start (inclusive) and stop indexes (exclusive) in
reference genome. Whole CDS can be constructed as concatenation of the
sub-sequences."""
molecule: str
"""Molecule name, see :const:`CHROMOSOMES`"""
def load_ccds() -> List[CdsPos]:
"""Load file with CDS locations within GRCh38 genome as a list of
:class:`CdsPos`."""
cds = []
with open(CCDS_FILE, encoding='utf-8', newline='\n') as fp:
for line in fp:
if not line:
# Skip empty lines
continue
if line.startswith('#'):
# Skip comments
continue
parts = line.split('\t')
ccds_id = parts[4]
status = parts[5]
if 'Public' not in status:
# CDS is not yet public
continue
if parts[6] == '-':
# CDS strand negative order = reverse-complement
continue
locations_str = parts[9]
if locations_str == '-':
# CDS location unknown
continue
chromosome = parts[0]
assert chromosome in CHROMOSOMES, chromosome
locations = []
assert locations_str.startswith('[')
assert locations_str.endswith(']')
for location_str in locations_str[1:-1].split(','):
start_str, stop_str = location_str.split('-')
start, stop = int(start_str), int(stop_str) + 1
locations.append((start, stop))
if sum(b - a for a, b in locations) % 3 != 0:
# Skip CDS which are not multiple of three in length.
continue
cds.append(CdsPos(
ccds_id=ccds_id,
molecule='chr' + chromosome,
indexes=locations
))
return cds
| 3.015625 | 3 |
test/test_resolve_errors.py | ITMO-NSS-team/GEFEST | 12 | 3663 | import pytest
from copy import deepcopy
from gefest.core.structure.point import Point
from gefest.core.structure.polygon import Polygon
from gefest.core.structure.structure import Structure
from gefest.core.algs.postproc.resolve_errors import *
from gefest.core.algs.geom.validation import *
# marking length and width for testing polygon
poly_width = 10
poly_length = 20
# creating a testing polygons via corner points
rectangle_points = [(-1, 40), (-1, poly_length+40), (-poly_width-10, poly_length+40), (-poly_width-10, 40)]
out_bounds_rectangle_poly = Polygon('rectangle', points=[Point(*coords) for coords in rectangle_points])
triangle_points = [(1, 1), (poly_width, poly_length), (1, poly_length)]
unclosed_triangle_poly = Polygon('triangle', points=[Point(*coords) for coords in triangle_points])
incorrect_points = [(5, 5), (5, poly_length), (8, poly_length), (5, 5), (5, 30)]
incorrect_poly = Polygon('incorrect_poly', points=[Point(*coords) for coords in incorrect_points])
domain = Domain()
def test_unclosed_poly():
input_structure = Structure([unclosed_triangle_poly])
observed_structure = postprocess(input_structure, domain)
assert unclosed_poly(input_structure, domain)
assert not unclosed_poly(observed_structure, domain)
def test_self_intersection():
input_structure = Structure([incorrect_poly])
observed_structure = postprocess(input_structure, domain)
assert self_intersection(input_structure)
assert not self_intersection(observed_structure)
def test_out_of_bound():
input_structure = Structure([out_bounds_rectangle_poly])
observed_structure = postprocess(input_structure, domain)
assert out_of_bound(input_structure, domain)
assert not out_of_bound(observed_structure, domain)
def test_fixed_polys():
domain = Domain(fixed_points=[[[15, 30],
[40, 30],
[15, 40]]])
poly_like_fixed = Polygon('like_fixed', points=[Point(15, 30), Point(40, 30), Point(15, 40)])
input_structure = Structure([poly_like_fixed, unclosed_triangle_poly])
observed_structure = postprocess(input_structure, domain)
assert all([np.isclose(len(observed_structure.polygons), 2),
'like_fixed' not in [poly.id for poly in observed_structure.polygons],
'fixed' in [poly.id for poly in observed_structure.polygons]])
def test_too_close():
same_poly = deepcopy(unclosed_triangle_poly)
same_poly.id = 'same_triangle'
input_structure = Structure([unclosed_triangle_poly, same_poly])
observed_structure = postprocess(input_structure, domain)
print(observed_structure.polygons)
assert np.isclose(len(observed_structure.polygons), 1)
| 2.1875 | 2 |
tests/mocks.py | davla/i3-live-tree | 1 | 3664 | from unittest.mock import MagicMock, Mock
from i3ipc.aio import Con
import i3_live_tree.tree_serializer # noqa: F401
class MockConSerializer(Mock, Con):
"""Mock a generic i3ipc.aio.Con for serialization purposes
This Mock is meant to ease testing of i3ipc.aio.Con serialization methods,
which are mokey patched in i3_live_tree.tree_serializer.
In order to achieve this, the mock inherits all the method implementations
of i3ipc.aio.Con, most importantly the serialization ones. However,
whatever is needed for serialization, both properties and methods, is
mocked and can be injected in the constructor, in order to ease the
creation of mock instances.
"""
def __init__(self, *args, name=None, layout=None, focused=False,
nodes=iter(()), **kwargs):
Mock.__init__(self, *args, **kwargs)
self.focused = focused
self.layout = layout
self.name = name
self.nodes = nodes
class MockConNavigation(MagicMock):
"""Mock an i3ipc.aio.Con for navigation purposes
This Mock is meant to be used when testing i3ipc event handlers. It mocks
all the necessary methods and properties, by returning `self` when an
i3ipc.aio.Con instance is needed for the sake of simplicity.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def find_focused(self):
"""Return the focused window"""
return self
def workspace(self):
"""Return the containing workspace"""
return self
class MockI3(Mock):
"""Mock an i3ipc.aio.Connection"""
def __init__(self, *args, tree, **kwargs):
super().__init__(*args, **kwargs)
self.tree = tree
async def get_tree(self):
"""Return the i3 tree asynchronously"""
return self.tree
| 2.90625 | 3 |
hvac/api/secrets_engines/gcp.py | nested-tech/hvac | 0 | 3665 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Gcp methods module."""
from hvac import exceptions
from hvac.api.vault_api_base import VaultApiBase
from hvac.constants.gcp import DEFAULT_MOUNT_POINT, ALLOWED_CREDS_ENDPOINTS
class Gcp(VaultApiBase):
def generate_credentials(self, roleset, endpoint='key', mount_point=DEFAULT_MOUNT_POINT):
if endpoint not in ALLOWED_CREDS_ENDPOINTS:
error_msg = 'invalid endpoint argument provided "{arg}", supported types: "{allowed_endpoints}"'
raise exceptions.ParamValidationError(error_msg.format(
arg=endpoint,
allowed_endpoints=', '.join(ALLOWED_CREDS_ENDPOINTS),
))
api_path = '/v1/{mount_point}/{endpoint}/{roleset}'.format(
mount_point=mount_point,
endpoint=endpoint,
roleset=roleset,
)
response = self._adapter.get(
url=api_path
)
return response.json()
| 2.390625 | 2 |
ypricemagic/uniswap.py | poolpitako/ypricemagic | 0 | 3666 | import token
from tokenize import tokenize
from brownie import Contract, chain
from brownie.exceptions import ContractNotFound
from cachetools.func import ttl_cache
from .utils.cache import memory
from .utils.multicall2 import fetch_multicall
from .interfaces.ERC20 import ERC20ABI
import ypricemagic.magic
import ypricemagic.utils.utils
from .constants import STABLECOINS, dai, usdc, usdt, wbtc, weth, sushi
# NOTE: If this is failing to pull a price for a token you need, it's likely because that token requires a special swap path.
# Please add a viable swap path below to fetch price data successfully.
#project.load()
if chain.id == 1:
FACTORIES = {
"uniswap": "0x5C69bEe701ef814a2B6a3EDD4B1652CB9cc5aA6f",
"sushiswap": "0xC0AEe478e3658e2610c5F7A4A2E1777cE9e4f2Ac",
}
ROUTERS = {
"uniswap": Contract("0x7a250d5630B4cF539739dF2C5dAcb4c659F2488D"),
"sushiswap": Contract("0xD9E1CE17F2641F24AE83637AB66A2CCA9C378B9F"),
}
SPECIAL_PATHS = {
"sushiswap": {
"0xEF69B5697f2Fb0345cC680210fD39b593a2f9684": ["<KEY>","0x6B3595068778DD592e39A122f4f5a5cF09C90fE2",weth,usdc]
,"0xbf2179859fc6D5BEE9Bf9158632Dc51678a4100e": ["0xbf2179859fc6D5BEE9Bf9158632Dc51678a4100e","0xC28E27870558cF22ADD83540d2126da2e4b464c2",weth,usdc]
,"0x3166C570935a7D8554c8f4eA792ff965D2EFe1f2": ["0x3166C570935a7D8554c8f4eA792ff965D2EFe1f2","<KEY>",usdc]
,"0xE6279E1c65DD41b30bA3760DCaC3CD8bbb4420D6": ["0xE6279E1c65DD41b30bA3760DCaC3CD8bbb4420D6","0x87F5F9eBE40786D49D35E1B5997b07cCAA8ADbFF",weth,usdc]
,"0x4954Db6391F4feB5468b6B943D4935353596aEC9": ["0x4954Db6391F4feB5468b6B943D4935353596aEC9",usdc]
,"0x1E18821E69B9FAA8e6e75DFFe54E7E25754beDa0": ["0x1E18821E69B9FAA8e6e75DFFe54E7E25754beDa0","<KEY>","0x6B3595068778DD592e39A122f4f5a5cF09C90fE2",weth,usdc]
,"0xfC1E690f61EFd961294b3e1Ce3313fBD8aa4f85d": ["0xfC1E690f61EFd961294b3e1Ce3313fBD8aa4f85d","0xba100000625a3754423978a60c9317c58a424e3D",weth,usdc]
,"0xBA50933C268F567BDC86E1aC131BE072C6B0b71a": ["0xBA50933C268F567BDC86E1aC131BE072C6B0b71a",weth,usdc]
,"0x6102407f07029892eB5Ff02164ADFaFb85f4d222": ["0x6102407f07029892eB5Ff02164ADFaFb85f4d222",usdt]
,"0x85034b3b2e292493D029443455Cc62ab669573B3": ["0x85034b3b2e292493D029443455Cc62ab669573B3","0x1f9840a85d5aF5bf1D1762F925BDADdC4201F984",weth,usdc]
,"0xb220D53F7D0f52897Bcf25E47c4c3DC0bac344F8": ["0xb220D53F7D0f52897Bcf25E47c4c3DC0bac344F8", usdc]
,"0x383518188C0C6d7730D91b2c03a03C837814a899": ["0x383518188C0C6d7730D91b2c03a03C837814a899",dai]
,"0xafcE9B78D409bF74980CACF610AFB851BF02F257": ["0xafcE9B78D409bF74980CACF610AFB851BF02F257",wbtc,weth,usdc]
},
"uniswap": {
}
}
elif chain.id == 56:
ROUTERS = {
"pancakeswapv2": Contract("0x10ED43C718714eb63d5aA57B78B54704E256024E"),
"pancakeswapv1": Contract("0x05fF2B0DB69458A0750badebc4f9e13aDd608C7F")
}
FACTORIES = {
"pancakeswapv2": "0xcA143Ce32Fe78f1f7019d7d551a6402fC5350c73",
"pancakeswapv1": "0xBCfCcbde45cE874adCB698cC183deBcF17952812"
}
SPECIAL_PATHS = {
"pancakeswapv2": {
},
"pancakeswapv1": {
}
}
elif chain.id == 137:
ROUTERS = {
"quickswap": Contract("0xa5E0829CaCEd8fFDD4De3c43696c57F7D7A678ff")
}
FACTORIES = {
"quickswap": "0x5757371414417b8C6CAad45bAeF941aBc7d3Ab32",
}
SPECIAL_PATHS = {
"quickswap": {
}
}
FACTORY_TO_ROUTER = {FACTORIES[name]: ROUTERS[name] for name in FACTORIES}
FACTORY_TO_PROTOCOL = {FACTORIES[name]: name for name in FACTORIES}
@ttl_cache(ttl=36000)
def get_price(token_in, token_out=usdc, router="uniswap", block=None, paired_against=weth):
"""
Calculate a price based on Uniswap Router quote for selling one `token_in`.
Always uses intermediate WETH pair if `[token_in,weth,token_out]` swap path available.
"""
if chain.id == 56 and token_out == usdc:
busd = Contract("0xe9e7CEA3DedcA5984780Bafc599bD69ADd087D56")
token_out = busd
tokens = [str(token) for token in [token_in, token_out]]
amount_in = 10 ** ypricemagic.utils.utils.get_decimals_with_override(tokens[0])
if str(token_in) in STABLECOINS:
return 1
elif str(paired_against) in STABLECOINS and str(token_out) in STABLECOINS:
path = [token_in, paired_against]
elif weth in (token_in, token_out):
path = [token_in, token_out]
elif paired_against == sushi and token_out != sushi:
path = [token_in,sushi,weth,token_out]
elif str(token_in) in SPECIAL_PATHS[router].keys() and str(token_out) in STABLECOINS:
path = SPECIAL_PATHS[router][str(token_in)]
elif chain.id == 56: #bsc
from .constants import cake, wbnb
if wbnb in (token_in, token_out):
path = [token_in, token_out]
elif cake in (token_in, token_out):
path = [token_in, token_out]
else:
path = [token_in,wbnb,token_out]
elif chain.id == 137: #bsc
from .constants import wmatic
if wmatic in (token_in, token_out):
path = [token_in, token_out]
else:
path = [token_in,wmatic,token_out]
else:
path = [token_in, weth, token_out]
fees = 0.997 ** (len(path) - 1)
if router in ROUTERS:
router = ROUTERS[router]
try:
quote = router.getAmountsOut(amount_in, path, block_identifier=block)
amount_out = quote[-1] / 10 ** ypricemagic.utils.utils.get_decimals_with_override(str(path[-1]))
return amount_out / fees
except ValueError as e:
return
@ttl_cache(ttl=600)
def get_price_v1(asset, block=None):
factory = Contract("0xc0a47dFe034B400B47bDaD5FecDa2621de6c4d95")
try:
exchange = Contract(factory.getExchange(asset))
eth_bought = exchange.getTokenToEthInputPrice(10 ** ypricemagic.utils.utils.get_decimals_with_override(asset), block_identifier=block)
exchange = Contract(factory.getExchange(usdc))
usdc_bought = exchange.getEthToTokenInputPrice(eth_bought, block_identifier=block) / 1e6
fees = 0.997 ** 2
return usdc_bought / fees
except (ContractNotFound, ValueError) as e:
pass
@memory.cache()
def is_uniswap_pool(address):
try:
return Contract(address).factory() in FACTORY_TO_ROUTER
except (ValueError, OverflowError, AttributeError):
pass
return False
@ttl_cache(ttl=600)
def lp_price(address, block=None):
""" Get Uniswap/Sushiswap LP token price. """
def extrapolate_balance_if_needed():
nonlocal balances
if balances[0] and not balances[1]:
balances[1] = balances[0]
if balances[1] and not balances[0]:
balances[0] = balances[1]
return balances
pair = Contract(address)
if chain.id not in [56, 137]: # No multicall2 on bsc or poly
factory, token0, token1, supply, reserves = fetch_multicall(
[pair, "factory"],
[pair, "token0"],
[pair, "token1"],
[pair, "totalSupply"],
[pair, "getReserves"],
block=block
)
else:
factory = pair.factory(block_identifier = block)
token0 = pair.token0(block_identifier = block)
token1 = pair.token1(block_identifier = block)
supply = pair.totalSupply(block_identifier = block)
reserves = pair.getReserves(block_identifier = block)
router = FACTORY_TO_PROTOCOL[factory]
tokens = [ypricemagic.utils.utils.Contract_with_erc20_fallback(token) for token in [token0, token1]]
price0 = get_price(tokens[0], paired_against=tokens[1], router=router, block=block)
price1 = get_price(tokens[1], paired_against=tokens[0], router=router, block=block)
prices = [price0,price1]
scales = [10 ** ypricemagic.utils.utils.get_decimals_with_override(str(token)) for token in tokens]
supply = supply / 1e18
try:
balances = [res / scale * price for res, scale, price in zip(reserves, scales, prices)]
except TypeError as e: # If can't get price via router, try to get from elsewhere
if not price0:
try:
price0 = ypricemagic.magic.get_price(tokens[0], block)
except ypricemagic.magic.PriceError:
price0 is None
if not price1:
try:
price1 = ypricemagic.magic.get_price(tokens[1], block)
except ypricemagic.magic.PriceError:
price1 is None
prices = [price0,price1]
balances = [None,None] # [res / scale * price for res, scale, price in zip(reserves, scales, prices)]
if price0:
balances[0] = reserves[0] / scales[0] * price0
if price1:
balances[1] = reserves[1] / scales[1] * price1
balances = extrapolate_balance_if_needed()
try:
return sum(balances) / supply
except TypeError:
return | 1.96875 | 2 |
configs/configuration_textrnn.py | haodingkui/semeval2020-task5-subtask1 | 2 | 3667 | """ TextRNN model configuration """
class TextRNNConfig(object):
def __init__(
self,
vocab_size=30000,
pretrained_embedding=None,
embedding_matrix=None,
embedding_dim=300,
embedding_dropout=0.3,
lstm_hidden_size=128,
output_dim=1,
**kwargs
):
self.pretrained_embedding = pretrained_embedding
self.embedding_matrix = embedding_matrix
self.embedding_dim = embedding_dim
self.embedding_dropout = embedding_dropout
self.lstm_hidden_size = lstm_hidden_size
self.output_dim = output_dim
| 2.828125 | 3 |
settings/debug_members.py | akorzunin/telegram_auction_bot | 0 | 3668 | <gh_stars>0
DEBUG_MEMBER_LIST = [
503131177,
] | 1.046875 | 1 |
metrics/pointops/pointops_util.py | JiazeWang/SP-GAN | 73 | 3669 | from typing import Tuple
import torch
from torch.autograd import Function
import torch.nn as nn
from metrics.pointops import pointops_cuda
import numpy as np
class FurthestSampling(Function):
@staticmethod
def forward(ctx, xyz, m):
"""
input: xyz: (b, n, 3) and n > m, m: int32
output: idx: (b, m)
"""
assert xyz.is_contiguous()
b, n, _ = xyz.size()
idx = torch.cuda.IntTensor(b, m)
temp = torch.cuda.FloatTensor(b, n).fill_(1e10)
pointops_cuda.furthestsampling_cuda(b, n, m, xyz, temp, idx)
return idx
@staticmethod
def backward(xyz, a=None):
return None, None
furthestsampling = FurthestSampling.apply
class Gathering(Function):
@staticmethod
def forward(ctx, features, idx):
"""
input: features: (b, c, n), idx : (b, m) tensor
output: (b, c, m)
"""
assert features.is_contiguous()
assert idx.is_contiguous()
b, c, n = features.size()
m = idx.size(1)
output = torch.cuda.FloatTensor(b, c, m)
pointops_cuda.gathering_forward_cuda(b, c, n, m, features, idx, output)
ctx.for_backwards = (idx, c, n)
return output
@staticmethod
def backward(ctx, grad_out):
idx, c, n = ctx.for_backwards
b, m = idx.size()
grad_features = torch.cuda.FloatTensor(b, c, n).zero_()
grad_out_data = grad_out.data.contiguous()
pointops_cuda.gathering_backward_cuda(b, c, n, m, grad_out_data, idx, grad_features.data)
return grad_features, None
gathering = Gathering.apply
class NearestNeighbor(Function):
@staticmethod
def forward(ctx, unknown: torch.Tensor, known: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Find the three nearest neighbors of unknown in known
input: unknown: (b, n, 3), known: (b, m, 3)
output: dist2: (b, n, 3) l2 distance to the three nearest neighbors
idx: (b, n, 3) index of 3 nearest neighbors
"""
assert unknown.is_contiguous()
assert known.is_contiguous()
b, n, _ = unknown.size()
m = known.size(1)
dist2 = torch.cuda.FloatTensor(b, n, 3)
idx = torch.cuda.IntTensor(b, n, 3)
pointops_cuda.nearestneighbor_cuda(b, n, m, unknown, known, dist2, idx)
return torch.sqrt(dist2), idx
@staticmethod
def backward(ctx, a=None, b=None):
return None, None
nearestneighbor = NearestNeighbor.apply
class Interpolation(Function):
@staticmethod
def forward(ctx, features: torch.Tensor, idx: torch.Tensor, weight: torch.Tensor) -> torch.Tensor:
"""
Performs weight linear interpolation on 3 features
input: features: (b, c, m) features descriptors to be interpolated from
idx: (b, n, 3) three nearest neighbors of the target features in features
weight: (b, n, 3) weights
output: (b, c, n) tensor of the interpolated features
"""
assert features.is_contiguous()
assert idx.is_contiguous()
assert weight.is_contiguous()
b, c, m = features.size()
n = idx.size(1)
ctx.interpolation_for_backward = (idx, weight, m)
output = torch.cuda.FloatTensor(b, c, n)
pointops_cuda.interpolation_forward_cuda(b, c, m, n, features, idx, weight, output)
return output
@staticmethod
def backward(ctx, grad_out: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""
input: grad_out: (b, c, n)
output: grad_features: (b, c, m), None, None
"""
idx, weight, m = ctx.interpolation_for_backward
b, c, n = grad_out.size()
grad_features = torch.cuda.FloatTensor(b, c, m).zero_()
grad_out_data = grad_out.data.contiguous()
pointops_cuda.interpolation_backward_cuda(b, c, n, m, grad_out_data, idx, weight, grad_features.data)
return grad_features, None, None
interpolation = Interpolation.apply
class Grouping(Function):
@staticmethod
def forward(ctx, features: torch.Tensor, idx: torch.Tensor) -> torch.Tensor:
"""
input: features: (b, c, n), idx : (b, m, nsample) containing the indicies of features to group with
output: (b, c, m, nsample)
"""
assert features.is_contiguous()
assert idx.is_contiguous()
b, c, n = features.size()
_, m, nsample = idx.size()
output = torch.cuda.FloatTensor(b, c, m, nsample)
pointops_cuda.grouping_forward_cuda(b, c, n, m, nsample, features, idx, output)
ctx.for_backwards = (idx, n)
return output
@staticmethod
def backward(ctx, grad_out: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
"""
input: grad_out: (b, c, m, nsample)
output: (b, c, n), None
"""
idx, n = ctx.for_backwards
b, c, m, nsample = grad_out.size()
grad_features = torch.cuda.FloatTensor(b, c, n).zero_()
grad_out_data = grad_out.data.contiguous()
pointops_cuda.grouping_backward_cuda(b, c, n, m, nsample, grad_out_data, idx, grad_features.data)
return grad_features, None
grouping = Grouping.apply
class GroupingInt(Function):
@staticmethod
def forward(ctx, features: torch.Tensor, idx: torch.Tensor) -> torch.Tensor:
"""
input: features: (b, c, n), idx : (b, m, nsample) containing the indicies of features to group with
output: (b, c, m, nsample)
"""
assert features.is_contiguous()
assert idx.is_contiguous()
b, c, n = features.size()
_, m, nsample = idx.size()
output = torch.cuda.LongTensor(b, c, m, nsample)
pointops_cuda.grouping_int_forward_cuda(b, c, n, m, nsample, features, idx, output)
return output
@staticmethod
def backward(ctx, a=None):
return None, None
grouping_int = GroupingInt.apply
class BallQuery(Function):
@staticmethod
def forward(ctx, radius: float, nsample: int, xyz: torch.Tensor, new_xyz: torch.Tensor) -> torch.Tensor:
"""
input: radius: float, radius of the balls
nsample: int, maximum number of features in the balls
xyz: torch.Tensor, (b, n, 3) xyz coordinates of the features
new_xyz: torch.Tensor, (b, m, 3) centers of the ball query
output: (b, m, nsample) tensor with the indicies of the features that form the query balls
"""
assert xyz.is_contiguous()
assert new_xyz.is_contiguous()
b, n, _ = xyz.size()
m = new_xyz.size(1)
idx = torch.cuda.IntTensor(b, m, nsample).zero_()
pointops_cuda.ballquery_cuda(b, n, m, radius, nsample, new_xyz, xyz, idx)
return idx
@staticmethod
def backward(ctx, a=None):
return None, None, None, None
ballquery = BallQuery.apply
class FeatureDistribute(Function):
@staticmethod
def forward(ctx, max_xyz: torch.Tensor, xyz: torch.Tensor) -> torch.Tensor:
"""
:param ctx:
:param max_xyz: (b, n, 3)
:param xyz: (b, m, 3)
:return: distribute_idx: (b, m)
"""
assert max_xyz.is_contiguous()
assert xyz.is_contiguous()
b, n, _ = max_xyz.size()
m = xyz.size(1)
distribute_idx = torch.cuda.IntTensor(b, m).zero_()
pointops_cuda.featuredistribute_cuda(b, n, m, max_xyz, xyz, distribute_idx)
return distribute_idx
@staticmethod
def backward(ctx, a=None):
return None, None
featuredistribute = FeatureDistribute.apply
class FeatureGather(Function):
@staticmethod
def forward(ctx, max_feature: torch.Tensor, distribute_idx: torch.Tensor) -> torch.Tensor:
'''
:param ctx:
:param max_feature: (b, c, n)
:param distribute_idx: (b, m)
:return: distribute_feature: (b, c, m)
'''
assert max_feature.is_contiguous()
assert distribute_idx.is_contiguous()
b, c, n = max_feature.size()
m = distribute_idx.size(1)
distribute_feature = torch.cuda.FloatTensor(b, c, m).zero_()
pointops_cuda.featuregather_forward_cuda(b, n, m, c, max_feature, distribute_idx, distribute_feature)
ctx.for_backwards = (distribute_idx, n)
return distribute_feature
@staticmethod
def backward(ctx, grad_distribute_feature: torch.Tensor):
'''
:param ctx:
:param grad_distribute_feature: (b, c, m)
:return: grad_max_feature: (b, c, n), None
'''
distribute_idx, n = ctx.for_backwards
b, c, m = grad_distribute_feature.size()
grad_max_feature = torch.cuda.FloatTensor(b, c, n).zero_()
grad_distribute_feature_data = grad_distribute_feature.data.contiguous()
pointops_cuda.featuregather_backward_cuda(b, n, m, c, grad_distribute_feature_data, distribute_idx, grad_max_feature.data)
return grad_max_feature, None
featuregather = FeatureGather.apply
class LabelStatBallRange(Function):
@staticmethod
def forward(ctx, radius: float, xyz: torch.Tensor, new_xyz: torch.Tensor, label_stat: torch.Tensor) -> torch.Tensor:
'''
:param ctx:
:param radius:
:param xyz: (b, n, 3)
:param new_xyz: (b, m, 3)
:param label_stat: (b, n, nclass)
:return: new_label_stat: (b, m, nclass)
'''
assert xyz.is_contiguous()
assert new_xyz.is_contiguous()
assert label_stat.is_contiguous()
b, n, nclass = label_stat.size()
m = new_xyz.size(1)
new_label_stat = torch.cuda.IntTensor(b, m, nclass).zero_()
pointops_cuda.labelstat_ballrange_cuda(b, n, m, radius, nclass, new_xyz, xyz, label_stat, new_label_stat)
return new_label_stat
@staticmethod
def backward(ctx, a=None):
return None, None, None, None
labelstat_ballrange = LabelStatBallRange.apply
class LabelStatIdx(Function):
@staticmethod
def forward(ctx, nsample: int, label_stat: torch.Tensor, idx: torch.Tensor) -> torch.Tensor:
'''
:param ctx:
:param nsample:
:param label_stat: (b, n, nclass)
:param idx: (b, m, nsample)
:return: new_label_stat: (b, m, nclass)
'''
assert label_stat.is_contiguous()
assert idx.is_contiguous()
b, n, nclass = label_stat.size()
m = idx.size(1)
new_label_stat = torch.cuda.IntTensor(b, m, nclass).zero_()
pointops_cuda.labelstat_idx_cuda(b, n, m, nsample, nclass, label_stat, idx, new_label_stat)
return new_label_stat
@staticmethod
def backward(ctx, a=None):
return None, None, None
labelstat_idx = LabelStatIdx.apply
class LabelStatAndBallQuery(Function):
@staticmethod
def forward(ctx, radius: float, nsample: int, xyz: torch.Tensor, new_xyz: torch.Tensor, label_stat: torch.Tensor):
'''
:param ctx:
:param radius:
:param nsample:
:param xyz: (b, n, 3)
:param new_xyz: (b, m, 3)
:param label_stat: (b, n, nclass)
:return: new_label_stat: (b, m, nclass) idx: (b, m, nsample)
'''
assert xyz.is_contiguous()
assert new_xyz.is_contiguous()
assert label_stat.is_contiguous()
b, n, nclass = label_stat.size()
m = new_xyz.size(1)
new_label_stat = torch.cuda.IntTensor(b, m, nclass).zero_()
idx = torch.cuda.IntTensor(b, m, nsample).zero_()
pointops_cuda.labelstat_and_ballquery_cuda(b, n, m, radius, nsample, nclass, new_xyz, xyz, label_stat, idx, new_label_stat)
return new_label_stat, idx
@staticmethod
def backward(ctx, a=None, b=None):
return None, None, None, None, None
labelstat_and_ballquery = LabelStatAndBallQuery.apply
def pairwise_distances(x, y=None):
'''
Input: x is a Nxd matrix
y is an optional Mxd matirx
Output: dist is a NxM matrix where dist[i,j] is the square norm between x[i,:] and y[j,:]
if y is not given then use 'y=x'.
i.e. dist[i,j] = ||x[i,:]-y[j,:]||^2
'''
x_norm = (x ** 2).sum(1).view(-1, 1)
if y is not None:
y_t = torch.transpose(y, 0, 1)
y_norm = (y ** 2).sum(1).view(1, -1)
else:
y_t = torch.transpose(x, 0, 1)
y_norm = x_norm.view(1, -1)
dist = x_norm + y_norm - 2.0 * torch.mm(x, y_t)
import numpy as np
return torch.clamp(dist, 0.0, np.inf)
class KNNQueryNaive(Function):
@staticmethod
def forward(ctx, nsample: int, xyz: torch.Tensor, new_xyz: torch.Tensor = None) -> Tuple[torch.Tensor]:
"""
KNN Indexing
input: nsample: int32, Number of neighbor
xyz: (b, n, 3) coordinates of the features
new_xyz: (b, m, 3) centriods
output: idx: (b, m, nsample)
"""
if new_xyz is None:
new_xyz = xyz
b, m, _ = new_xyz.size()
n = xyz.size(1)
'''
idx = torch.zeros(b, m, nsample).int().cuda()
for i in range(b):
dist = pairwise_distances(new_xyz[i, :, :], xyz[i, :, :])
[_, idxs] = torch.sort(dist, dim=1)
idx[i, :, :] = idxs[:, 0:nsample]
'''
# '''
# new_xyz_repeat = new_xyz.repeat(1, 1, n).view(b, m * n, 3)
# xyz_repeat = xyz.repeat(1, m, 1).view(b, m * n, 3)
# dist = (new_xyz_repeat - xyz_repeat).pow(2).sum(dim=2).view(b, m, n)
dist = (new_xyz.repeat(1, 1, n).view(b, m * n, 3) - xyz.repeat(1, m, 1).view(b, m * n, 3)).pow(2).sum(dim=2).view(b, m, n)
[_, idxs] = torch.sort(dist, dim=2)
idx = idxs[:, :, 0:nsample].int()
# '''
return idx
@staticmethod
def backward(ctx):
return None, None, None
knnquery_naive = KNNQueryNaive.apply
class KNNQuery(Function):
@staticmethod
def forward(ctx, nsample: int, xyz: torch.Tensor, new_xyz: torch.Tensor = None) -> Tuple[torch.Tensor]:
"""
KNN Indexing
input: nsample: int32, Number of neighbor
xyz: (b, n, 3) coordinates of the features
new_xyz: (b, m, 3) centriods
output: idx: (b, m, nsample)
( dist2: (b, m, nsample) )
"""
if new_xyz is None:
new_xyz = xyz
assert xyz.is_contiguous()
assert new_xyz.is_contiguous()
b, m, _ = new_xyz.size()
n = xyz.size(1)
idx = torch.cuda.IntTensor(b, m, nsample).zero_()
dist2 = torch.cuda.FloatTensor(b, m, nsample).zero_()
pointops_cuda.knnquery_cuda(b, n, m, nsample, xyz, new_xyz, idx, dist2)
return idx
@staticmethod
def backward(ctx, a=None):
return None, None, None
knnquery = KNNQuery.apply
class KNNQueryExclude(Function):
@staticmethod
def forward(ctx, nsample: int, xyz: torch.Tensor, new_xyz: torch.Tensor = None) -> Tuple[torch.Tensor]:
"""
KNN Indexing
input: nsample: int32, Number of neighbor
xyz: (b, n, 3) coordinates of the features
new_xyz: (b, m, 3) centriods
output: new_features: (b, m, nsample)
"""
if new_xyz is None:
new_xyz = xyz
b, m, _ = new_xyz.size()
n = xyz.size(1)
'''
idx = torch.zeros(b, m, nsample).int().cuda()
for i in range(b):
dist = pairwise_distances(new_xyz[i, :, :], xyz[i, :, :])
[_, idxs] = torch.sort(dist, dim=1)
idx[i, :, :] = idxs[:, 0:nsample]
'''
# '''
# new_xyz_repeat = new_xyz.repeat(1, 1, n).view(b, m * n, 3)
# xyz_repeat = xyz.repeat(1, m, 1).view(b, m * n, 3)
# dist = (new_xyz_repeat - xyz_repeat).pow(2).sum(dim=2).view(b, m, n)
dist = (new_xyz.repeat(1, 1, n).view(b, m * n, 3) - xyz.repeat(1, m, 1).view(b, m * n, 3)).pow(2).sum(dim=2).view(b, m, n)
[_, idxs] = torch.sort(dist, dim=2)
idx = idxs[:, :, 1:nsample+1].int()
# '''
return idx
@staticmethod
def backward(ctx):
return None, None, None
knnquery_exclude = KNNQueryExclude.apply
class Le_QueryAndGroup_SameSize(nn.Module):
"""
Groups with a ball query of radius
parameters:
radius: float32, Radius of ball
nsample: int32, Maximum number of features to gather in the ball
"""
def __init__(self, radius=None, nsample=32, use_xyz=True):
super(Le_QueryAndGroup_SameSize, self).__init__()
self.radius, self.nsample, self.use_xyz = radius, nsample, use_xyz
def forward(self, xyz: torch.Tensor, new_xyz: torch.Tensor = None, features: torch.Tensor = None, idx: torch.Tensor = None) -> torch.Tensor:
"""
input: xyz: (b, n, 3) coordinates of the features
new_xyz: (b, n, 3) centriods
features: (b, c, n)
idx: idx of neighbors
# idxs: (b, n)
output: new_features: (b, c+3, m, nsample)
# grouped_idxs: (b, m, nsample)
"""
assert xyz.size() == new_xyz.size()
if new_xyz is None:
new_xyz = xyz
if idx is None:
if self.radius is not None:
idx = ballquery(self.radius, self.nsample, xyz, new_xyz)
else:
# idx = knnquery_naive(self.nsample, xyz, new_xyz) # (b, m, nsample)
idx = knnquery(self.nsample, xyz, new_xyz) # (b, m, nsample)
xyz_trans = xyz.transpose(1, 2).contiguous()
grouped_xyz = grouping(xyz_trans, idx) # (b, 3, m, nsample)
# grouped_idxs = grouping(idxs.unsqueeze(1).float(), idx).squeeze(1).int() # (b, m, nsample)
grouped_xyz -= new_xyz.transpose(1, 2).unsqueeze(-1)
if features is not None:
grouped_features = grouping(features, idx) # (b, c, m, nsample)
if self.use_xyz:
#new_features = torch.cat([grouped_xyz, grouped_features], dim=1) # (b, c+3, m, nsample) # le
new_features = grouped_features # (b, c, m, nsample)
else:
new_features = grouped_features
else:
assert self.use_xyz, "Cannot have not features and not use xyz as a feature!"
new_features = grouped_xyz
return grouped_xyz, new_features
class QueryAndGroup(nn.Module):
"""
Groups with a ball query of radius
parameters:
radius: float32, Radius of ball
nsample: int32, Maximum number of features to gather in the ball
"""
def __init__(self, radius=None, nsample=32, use_xyz=True):
super(QueryAndGroup, self).__init__()
self.radius, self.nsample, self.use_xyz = radius, nsample, use_xyz
def forward(self, xyz: torch.Tensor, new_xyz: torch.Tensor = None, features: torch.Tensor = None, idx: torch.Tensor = None) -> torch.Tensor:
"""
input: xyz: (b, n, 3) coordinates of the features
new_xyz: (b, m, 3) centriods
features: (b, c, n)
idx: idx of neighbors
# idxs: (b, n)
output: new_features: (b, c+3, m, nsample)
# grouped_idxs: (b, m, nsample)
"""
if new_xyz is None:
new_xyz = xyz
if idx is None:
if self.radius is not None:
idx = ballquery(self.radius, self.nsample, xyz, new_xyz)
else:
# idx = knnquery_naive(self.nsample, xyz, new_xyz) # (b, m, nsample)
idx = knnquery(self.nsample, xyz, new_xyz) # (b, m, nsample)
xyz_trans = xyz.transpose(1, 2).contiguous()
grouped_xyz = grouping(xyz_trans, idx) # (b, 3, m, nsample)
# grouped_idxs = grouping(idxs.unsqueeze(1).float(), idx).squeeze(1).int() # (b, m, nsample)
grouped_xyz -= new_xyz.transpose(1, 2).unsqueeze(-1)
if features is not None:
grouped_features = grouping(features, idx)
if self.use_xyz:
new_features = torch.cat([grouped_xyz, grouped_features], dim=1) # (b, c+3, m, nsample)
else:
new_features = grouped_features
else:
assert self.use_xyz, "Cannot have not features and not use xyz as a feature!"
new_features = grouped_xyz
return new_features
class QueryAndGroup_Dilate(nn.Module):
"""
Groups with a ball query of radius
parameters:
radius: float32, Radius of ball
nsample: int32, Maximum number of features to gather in the ball
"""
def __init__(self, radius=None, nsample=32, use_xyz=True):
super(QueryAndGroup_Dilate, self).__init__()
self.radius, self.nsample, self.use_xyz = radius, nsample, use_xyz
def forward(self, xyz: torch.Tensor, new_xyz: torch.Tensor = None, features: torch.Tensor = None, idx: torch.Tensor = None) -> torch.Tensor:
"""
input: xyz: (b, n, 3) coordinates of the features
new_xyz: (b, m, 3) centriods
features: (b, c, n)
idx: idx of neighbors
# idxs: (b, n)
output: new_features: (b, c+3, m, nsample)
# grouped_idxs: (b, m, nsample)
"""
if new_xyz is None:
new_xyz = xyz
if idx is None:
if self.radius is not None:
idx = ballquery(self.radius, 2*self.nsample, xyz, new_xyz)
else:
# idx = knnquery_naive(self.nsample, xyz, new_xyz) # (b, m, nsample)
idx = knnquery(2*self.nsample, xyz, new_xyz) # (b, m, nsample)
idx2 = np.array([i for i in range(2*self.nsample)])
np.random.shuffle(idx2)
idx2 = idx2[:self.nsample]
idx = idx[:, :, idx2]
xyz_trans = xyz.transpose(1, 2).contiguous()
grouped_xyz = grouping(xyz_trans, idx) # (b, 3, m, nsample)
# grouped_idxs = grouping(idxs.unsqueeze(1).float(), idx).squeeze(1).int() # (b, m, nsample)
grouped_xyz -= new_xyz.transpose(1, 2).unsqueeze(-1)
if features is not None:
grouped_features = grouping(features, idx)
if self.use_xyz:
new_features = torch.cat([grouped_xyz, grouped_features], dim=1) # (b, c+3, m, nsample)
else:
new_features = grouped_features
else:
assert self.use_xyz, "Cannot have not features and not use xyz as a feature!"
new_features = grouped_xyz
return new_features
class Le_QueryAndGroup(nn.Module):
"""
Groups with a ball query of radius
parameters:
radius: float32, Radius of ball
nsample: int32, Maximum number of features to gather in the ball
"""
def __init__(self, radius=None, nsample=32, use_xyz=True):
super(Le_QueryAndGroup, self).__init__()
self.radius, self.nsample, self.use_xyz = radius, nsample, use_xyz
def forward(self, xyz: torch.Tensor, new_xyz: torch.Tensor = None, features: torch.Tensor = None, idx: torch.Tensor = None) -> torch.Tensor:
"""
input: xyz: (b, n, 3) coordinates of the features
new_xyz: (b, m, 3) centriods
features: (b, c, n)
idx: idx of neighbors
# idxs: (b, n)
output: new_features: (b, c+3, m, nsample)
# grouped_idxs: (b, m, nsample)
"""
if new_xyz is None:
new_xyz = xyz
if idx is None:
if self.radius is not None:
idx = ballquery(self.radius, self.nsample, xyz, new_xyz)
else:
# idx = knnquery_naive(self.nsample, xyz, new_xyz) # (b, m, nsample)
idx = knnquery(self.nsample, xyz, new_xyz) # (b, m, nsample)
xyz_trans = xyz.transpose(1, 2).contiguous()
grouped_xyz = grouping(xyz_trans, idx) # (b, 3, m, nsample)
# grouped_idxs = grouping(idxs.unsqueeze(1).float(), idx).squeeze(1).int() # (b, m, nsample)
grouped_xyz -= new_xyz.transpose(1, 2).unsqueeze(-1)
if features is not None:
grouped_features = grouping(features, idx) # (b, c, m, nsample)
if self.use_xyz:
#new_features = torch.cat([grouped_xyz, grouped_features], dim=1) # (b, c+3, m, nsample) # le
new_features = grouped_features # (b, c, m, nsample)
else:
new_features = grouped_features
else:
assert self.use_xyz, "Cannot have not features and not use xyz as a feature!"
new_features = grouped_xyz
return grouped_xyz, new_features
class Gen_QueryAndGroupXYZ(nn.Module):
"""
Groups with a ball query of radius
parameters:
radius: float32, Radius of ball
nsample: int32, Maximum number of features to gather in the ball
"""
def __init__(self, radius=None, nsample=32, use_xyz=True):
super(Gen_QueryAndGroupXYZ, self).__init__()
self.radius, self.nsample, self.use_xyz = radius, nsample, use_xyz
#def forward(self, xyz: torch.Tensor, new_xyz: torch.Tensor = None, features: torch.Tensor = None, idx: torch.Tensor = None) -> torch.Tensor:
def forward(self, xyz: torch.Tensor, new_xyz: torch.Tensor = None) -> torch.Tensor:
"""
input: xyz: (b, n, 3) coordinates of the features
new_xyz: (b, m, 3) centriods
features: (b, c, n)
idx: idx of neighbors
# idxs: (b, n)
output: new_features: (b, c+3, m, nsample)
# grouped_idxs: (b, m, nsample)
"""
if new_xyz is None:
new_xyz = xyz
#if idx is None:
if self.radius is not None:
idx = ballquery(self.radius, self.nsample, xyz, new_xyz)
else:
idx = knnquery(self.nsample, xyz, new_xyz) # (b, m, nsample)
xyz_trans = xyz.transpose(1, 2).contiguous() # BxNx3 -> Bx3xN
grouped_xyz = grouping(xyz_trans, idx) # (b, 3, m, nsample)
return grouped_xyz
class Le_QueryAndGroup_OnlyFeature(nn.Module):
"""
Groups with a ball query of radius
parameters:
radius: float32, Radius of ball
nsample: int32, Maximum number of features to gather in the ball
"""
def __init__(self, radius=None, nsample=32, use_xyz=True):
super(Le_QueryAndGroup_OnlyFeature, self).__init__()
self.radius, self.nsample, self.use_xyz = radius, nsample, use_xyz
def forward(self, xyz: torch.Tensor, new_xyz: torch.Tensor = None, features: torch.Tensor = None, idx: torch.Tensor = None) -> torch.Tensor:
"""
input: xyz: (b, n, 3) coordinates of the features
new_xyz: (b, m, 3) centriods
features: (b, c, n)
idx: idx of neighbors
# idxs: (b, n)
output: new_features: (b, c+3, m, nsample)
# grouped_idxs: (b, m, nsample)
"""
if new_xyz is None:
new_xyz = xyz
if idx is None:
if self.radius is not None:
idx = ballquery(self.radius, self.nsample, xyz, new_xyz)
else:
# idx = knnquery_naive(self.nsample, xyz, new_xyz) # (b, m, nsample)
idx = knnquery(self.nsample, xyz, new_xyz) # (b, m, nsample)
#xyz_trans = xyz.transpose(1, 2).contiguous()
#grouped_xyz = grouping(xyz_trans, idx) # (b, 3, m, nsample)
# grouped_idxs = grouping(idxs.unsqueeze(1).float(), idx).squeeze(1).int() # (b, m, nsample)
#grouped_xyz -= new_xyz.transpose(1, 2).unsqueeze(-1)
if features is not None:
grouped_features = grouping(features, idx) # (b, c, m, nsample)
if self.use_xyz:
#new_features = torch.cat([grouped_xyz, grouped_features], dim=1) # (b, c+3, m, nsample) # le
new_features = grouped_features # (b, c, m, nsample)
else:
new_features = grouped_features
else:
assert self.use_xyz, "Cannot have not features and not use xyz as a feature!"
new_features = grouped_xyz
return new_features
class GroupAll(nn.Module):
"""
Groups all features
"""
def __init__(self, use_xyz: bool = True):
super(GroupAll, self).__init__()
self.use_xyz = use_xyz
def forward(self, xyz: torch.Tensor, new_xyz: torch.Tensor, features: torch.Tensor = None) -> Tuple[torch.Tensor]:
"""
input: xyz: (b, n, 3) coordinates of the features
new_xyz: ignored torch
features: (b, c, n) descriptors of the features
output: new_features: (b, c+3, 1, N) tensor
"""
grouped_xyz = xyz.transpose(1, 2).unsqueeze(2)
if features is not None:
grouped_features = features.unsqueeze(2)
if self.use_xyz:
new_features = torch.cat([grouped_xyz, grouped_features], dim=1) # (b, c+3, 1, n)
else:
new_features = grouped_features
else:
new_features = grouped_xyz
return new_features
| 2.5625 | 3 |
core/src/zeit/cms/content/caching.py | rickdg/vivi | 5 | 3670 | <reponame>rickdg/vivi
from collections import defaultdict
from logging import getLogger
from operator import itemgetter
from os import environ
from time import time
from zope.cachedescriptors.property import Lazy as cachedproperty
from zeit.cms.content.sources import FEATURE_TOGGLES
from zope.component import getUtility
from zeit.connector.interfaces import IConnector
from zeit.connector.filesystem import Connector
log = getLogger(__name__)
class ContentCache(object):
@cachedproperty
def cache(self):
size = environ.get('CONTENT_CACHE_SIZE')
check = environ.get('CONTENT_CACHE_CHECK')
connector = getUtility(IConnector)
if size is not None and type(connector) is Connector:
self.size = int(size)
self.check = int(check) if check is not None else self.size / 5
self.connector = connector
self.cache = defaultdict(lambda: dict(used=0, mtimes={}, data={}))
self.hits = self.misses = 0
log.info('initialized content cache (size %s)', size)
return self.cache
else:
return None
def get(self, unique_id, key, factory, suffix=''):
cache = self.cache
if cache is None or not FEATURE_TOGGLES.find('content_caching'):
return factory()
try:
mtime = int(self.connector.mtime(unique_id, suffix))
except (ValueError, TypeError):
mtime = None
if mtime is None:
return factory()
obj = cache[unique_id]
obj['used'] += 1
obj['last'] = time()
if mtime != obj['mtimes'].get(suffix):
obj['data'].clear()
obj['mtimes'][suffix] = mtime
cache = obj['data']
if key not in cache:
cache[key] = factory()
self.misses += 1
log.debug('added %s (%s)', key, mtime)
if self.misses % self.check == 0:
self.cleanup()
else:
self.hits += 1
return cache[key]
def cleanup(self):
cache = self.cache
over = len(cache) - self.size
log.info('size: %d/%d, hits: %d, misses: %d',
over + self.size, self.size, self.hits, self.misses)
if over > 0:
log.debug('removing %d items', over)
last = sorted((cache[uid]['last'], uid) for uid in cache)
for _, (_, uid) in zip(range(over), last):
del cache[uid]
@property
def usage(self):
cache = self.cache
stats = (dict(uid=uid, used=cache[uid]['used']) for uid in cache)
return sorted(stats, key=itemgetter('used'))
def info(self):
cache = self.cache
usage = {info['uid']: info['used'] for info in reversed(self.usage)}
return dict(
size=self.size,
count=len(cache),
hits=self.hits,
misses=self.misses,
usage=usage)
__cache = ContentCache()
get = __cache.get
info = __cache.info
| 1.914063 | 2 |
genesis/project.py | genialis/genesis-genapi | 3 | 3671 | <reponame>genialis/genesis-genapi
"""Project"""
from __future__ import absolute_import, division, print_function, unicode_literals
class GenProject(object):
"""Genesais project annotation."""
def __init__(self, data, gencloud):
for field in data:
setattr(self, field, data[field])
self.gencloud = gencloud
self.id = getattr(self, 'id', None) # pylint: disable=invalid-name
self.name = getattr(self, 'name', None)
def data_types(self):
"""Return a list of data types."""
data = self.gencloud.project_data(self.id)
return sorted(set(d.type for d in data))
def data(self, **query):
"""Query for Data object annotation."""
data = self.gencloud.project_data(self.id)
query['case_ids__contains'] = self.id
ids = set(d['id'] for d in self.gencloud.api.dataid.get(**query)['objects'])
return [d for d in data if d.id in ids]
def find(self, filter_str):
"""Filter Data object annotation."""
raise NotImplementedError()
def __str__(self):
return self.name or 'n/a'
def __repr__(self):
return u"GenProject: {} - {}".format(self.id, self.name)
| 2.53125 | 3 |
account/views.py | KimSoungRyoul/drf_unitteset_study_project | 0 | 3672 | <reponame>KimSoungRyoul/drf_unitteset_study_project<gh_stars>0
# Create your views here.
from django.db.models import QuerySet
from django.utils.decorators import method_decorator
from drf_yasg.utils import swagger_auto_schema
from rest_framework import viewsets, status
from rest_framework.permissions import IsAuthenticated, AllowAny
from rest_framework.response import Response
from rest_framework.viewsets import mixins
from account.documents import DjangoFilterDescriptionInspector
from account.models import Customer
from account.serializers import CustomerInfoSerializer, SignUpFormSerializer
@method_decorator(name='retrieve', decorator=swagger_auto_schema(
operation_description="회원 개인정보 조회 API",
filter_inspectors=[DjangoFilterDescriptionInspector],
))
@method_decorator(name='create', decorator=swagger_auto_schema(
operation_description="회원 가입 API",
))
@method_decorator(name='update', decorator=swagger_auto_schema(
operation_description="회원 정보 수정 API",
))
@method_decorator(name='destroy', decorator=swagger_auto_schema(
operation_description="회원 탈퇴 API",
))
class CustomerAPIViewSet(mixins.CreateModelMixin,
mixins.DestroyModelMixin,
mixins.RetrieveModelMixin,
mixins.UpdateModelMixin,
viewsets.GenericViewSet):
queryset: QuerySet = Customer.objects
permission_classes = (IsAuthenticated,)
http_method_names = ['get', 'post', 'put', 'delete']
def get_serializer_class(self):
if self.request.method == 'POST':
return SignUpFormSerializer
elif self.request.method == 'GET':
return CustomerInfoSerializer
elif self.request.method == 'PUT':
return SignUpFormSerializer
elif self.request.method == 'DELETE':
return SignUpFormSerializer
def get_permissions(self):
if self.request.method == 'POST':
permission_classes = [AllowAny]
return [permission() for permission in permission_classes]
def create(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
self.perform_create(serializer)
headers = self.get_success_headers(serializer.data)
return Response({'id': serializer.data['id']}, status=status.HTTP_201_CREATED, headers=headers)
| 2.03125 | 2 |
src/front-door/azext_front_door/_validators.py | Mannan2812/azure-cli-extensions | 207 | 3673 | <reponame>Mannan2812/azure-cli-extensions<gh_stars>100-1000
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import argparse
def get_name_or_id_validator(dest, child_type=None, resource_type='Frontdoors', resource_namespace='Microsoft.Network',
resource_name_dest='front_door_name'):
def _validate_name_or_id(cmd, namespace):
from azure.cli.core.commands.client_factory import get_subscription_id
from msrestazure.tools import is_valid_resource_id, resource_id
subscription_id = get_subscription_id(cmd.cli_ctx)
resource_group = namespace.resource_group_name
names_or_ids = getattr(namespace, dest)
is_list = True
# treat single values as a list, but convert back in the end
if not isinstance(names_or_ids, list):
is_list = False
names_or_ids = [names_or_ids]
if names_or_ids == [None] or not names_or_ids:
return
ids = []
for val in names_or_ids:
id_params = {
'subscription': subscription_id,
'resource_group': resource_group,
'namespace': resource_namespace,
'type': resource_type,
'name': getattr(namespace, resource_name_dest) if child_type else val,
'child_type_1': child_type,
'child_name_1': val if child_type else None
}
if not is_valid_resource_id(val):
val = resource_id(**id_params)
ids.append(val)
setattr(namespace, dest, ids if is_list else ids[0])
return _validate_name_or_id
def validate_waf_policy(cmd, namespace):
get_name_or_id_validator(
dest='waf_policy',
resource_type='WebApplicationFirewallPolicy'
)(cmd, namespace)
def validate_keyvault(cmd, namespace):
get_name_or_id_validator(
dest='vault',
resource_type='vaults',
resource_namespace='Microsoft.Keyvault'
)(cmd, namespace)
def validate_load_balancing_settings(cmd, namespace):
get_name_or_id_validator('load_balancing_settings', 'loadBalancingSettings')(cmd, namespace)
def validate_probe_settings(cmd, namespace):
get_name_or_id_validator('probe_settings', 'healthProbeSettings')(cmd, namespace)
def validate_frontend_endpoints(cmd, namespace):
get_name_or_id_validator('frontend_endpoints', 'frontendEndpoints')(cmd, namespace)
def validate_backend_pool(cmd, namespace):
get_name_or_id_validator('backend_pool', 'backendPools')(cmd, namespace)
def validate_rules_engine(cmd, namespace):
get_name_or_id_validator('rules_engine', 'rulesEngines')(cmd, namespace)
# pylint: disable=protected-access
class MatchConditionAction(argparse._AppendAction):
# pylint: disable=no-self-use
def parse_match_condition(self, values):
from azext_front_door.vendored_sdks.models import MatchCondition
if not isinstance(values, list):
values = values.split(' ')
try:
return MatchCondition(
match_variable=values[0],
operator=values[1],
match_value=values[2:]
)
except IndexError:
from knack.util import CLIError
raise CLIError('usage error: --match-condition VARIABLE OPERATOR [VALUE [VALUE ...]]')
def __call__(self, parser, namespace, values, option_string=None):
match_condition = self.parse_match_condition(values)
super(MatchConditionAction, self).__call__(parser, namespace, match_condition, option_string)
| 1.992188 | 2 |
mimesis/data/int/development.py | DevAerial/mimesis | 0 | 3674 | <filename>mimesis/data/int/development.py
"""Provides all the data related to the development."""
LICENSES = [
"Apache License, 2.0 (Apache-2.0)",
"The BSD 3-Clause License",
"The BSD 2-Clause License",
"GNU General Public License (GPL)",
"General Public License (LGPL)",
"MIT License (MIT)",
"Mozilla Public License 2.0 (MPL-2.0)",
"Common Development and Distribution License (CDDL-1.0)",
"Eclipse Public License (EPL-1.0)",
]
PROGRAMMING_LANGS = [
"ASP",
"Assembly",
"AutoIt",
"Awk",
"Bash",
"C",
"C Shell",
"C#",
"C++",
"Caml",
"Ceylon",
"Clojure",
"CoffeeScript",
"Common Lisp",
"D",
"Dart",
"Delphi",
"Dylan",
"ECMAScript",
"Elixir",
"Emacs Lisp",
"Erlang",
"F#",
"Falcon",
"Fortran",
"GNU Octave",
"Go",
"Groovy",
"Haskell",
"haXe",
"Io",
"J#",
"Java",
"JavaScript",
"Julia",
"Kotlin",
"Lisp",
"Lua",
"Mathematica",
"Objective-C",
"OCaml",
"Perl",
"PHP",
"PL-I",
"PL-SQL",
"PowerShell",
"Prolog",
"Python",
"R",
"Racket",
"Ruby",
"Rust",
"Scala",
"Scheme",
"Smalltalk",
"Tcl",
"Tex",
"Transact-SQL",
"TypeScript",
"Z shell",
]
OS = [
"Arch",
"CentOS",
"Debian",
"Fedora",
"FreeBSD",
"Gentoo",
"Kali",
"Lubuntu",
"Manjaro",
"Mint",
"OS X",
"macOS",
"OpenBSD",
"PCLinuxOS",
"Slackware",
"Ubuntu",
"Windows 10",
"Windows 7",
"Windows 8",
"Windows 8.1",
"Zorin",
"elementaryOS",
"macOS",
"openSUSE",
]
FOLDERS = [
"Development",
"Downloads",
"Documents",
"Music",
"Video",
"Work",
"Pictures",
"Desktop",
"Study",
]
PROJECT_NAMES = [
"aardonyx",
"abelisaurus",
"achelousaurus",
"achillobator",
"acrocanthosaurus",
"aegyptosaurus",
"afrovenator",
"agilisaurus",
"alamosaurus",
"albertaceratops",
"albertosaurus",
"alectrosaurus",
"alioramus",
"allosaurus",
"alvarezsaurus",
"amargasaurus",
"ammosaurus",
"ampelosaurus",
"amygdalodon",
"anatotitan",
"anchiceratops",
"anchisaurus",
"ankylosaurus",
"anserimimus",
"antarctopelta",
"antarctosaurus",
"apatosaurus",
"aragosaurus",
"aralosaurus",
"archaeoceratops",
"archaeopteryx",
"archaeornithomimus",
"argentinosaurus",
"arrhinoceratops",
"atlascopcosaurus",
"aucasaurus",
"austrosaurus",
"avaceratops",
"avalonia",
"avimimus",
"azendohsaurus",
"bactrosaurus",
"bagaceratops",
"bambiraptor",
"barapasaurus",
"barosaurus",
"baryonyx",
"becklespinax",
"beipiaosaurus",
"bellusaurus",
"borogovia",
"brachiosaurus",
"brachyceratops",
"bugenasaura",
"buitreraptor",
"camarasaurus",
"camptosaurus",
"carnotaurus",
"caudipteryx",
"cedarpelta",
"centrosaurus",
"ceratosaurus",
"cetiosauriscus",
"cetiosaurus",
"chaoyangsaurus",
"chasmosaurus",
"chialingosaurus",
"chindesaurus",
"chinshakiangosaurus",
"chirostenotes",
"chubutisaurus",
"chungkingosaurus",
"citipati",
"coelophysis",
"coelurus",
"coloradisaurus",
"compsognathus",
"conchoraptor",
"confuciusornis",
"corythosaurus",
"cryolophosaurus",
"dacentrurus",
"daspletosaurus",
"datousaurus",
"deinocheirus",
"deinonychus",
"deltadromeus",
"diceratops",
"dicraeosaurus",
"dilophosaurus",
"diplodocus",
"dracorex",
"dravidosaurus",
"dromaeosaurus",
"dromiceiomimus",
"dryosaurus",
"dryptosaurus",
"dubreuillosaurus",
"edmontonia",
"edmontosaurus",
"einiosaurus",
"elaphrosaurus",
"emausaurus",
"eolambia",
"eoraptor",
"eotyrannus",
"equijubus",
"erketu",
"erlikosaurus",
"euhelopus",
"euoplocephalus",
"europasaurus",
"euskelosaurus",
"eustreptospondylus",
"fukuiraptor",
"fukuisaurus",
"gallimimus",
"gargoyleosaurus",
"garudimimus",
"gasosaurus",
"gasparinisaura",
"gastonia",
"giganotosaurus",
"gilmoreosaurus",
"giraffatitan",
"gobisaurus",
"gorgosaurus",
"goyocephale",
"graciliceratops",
"gryposaurus",
"guaibasaurus",
"guanlong",
"hadrosaurus",
"hagryphus",
"haplocanthosaurus",
"harpymimus",
"herrerasaurus",
"hesperosaurus",
"heterodontosaurus",
"homalocephale",
"huayangosaurus",
"hylaeosaurus",
"hypacrosaurus",
"hypselosaurus",
"hypsilophodon",
"iguanodon",
"indosuchus",
"ingenia",
"irritator",
"isisaurus",
"janenschia",
"jaxartosaurus",
"jingshanosaurus",
"jinzhousaurus",
"jobaria",
"juravenator",
"kentrosaurus",
"khaan",
"kotasaurus",
"kritosaurus",
"lamaceratops",
"lambeosaurus",
"lapparentosaurus",
"leaellynasaura",
"leptoceratops",
"lesothosaurus",
"lexovisaurus",
"liaoceratops",
"liaoxiornis",
"ligabuesaurus",
"liliensternus",
"lophorhothon",
"lophostropheus",
"lufengosaurus",
"lurdusaurus",
"lycorhinus",
"magyarosaurus",
"maiasaura",
"majungatholus",
"malawisaurus",
"mamenchisaurus",
"mapusaurus",
"marshosaurus",
"masiakasaurus",
"massospondylus",
"maxakalisaurus",
"megalosaurus",
"melanorosaurus",
"metriacanthosaurus",
"microceratops",
"micropachycephalosaurus",
"microraptor",
"minmi",
"monolophosaurus",
"mononykus",
"mussaurus",
"muttaburrasaurus",
"nanotyrannus",
"nanshiungosaurus",
"nemegtosaurus",
"neovenator",
"neuquenosaurus",
"nigersaurus",
"nipponosaurus",
"noasaurus",
"nodosaurus",
"nomingia",
"nothronychus",
"nqwebasaurus",
"omeisaurus",
"ornitholestes",
"ornithomimus",
"orodromeus",
"oryctodromeus",
"othnielia",
"ouranosaurus",
"oviraptor",
"rebbachisaurus",
"rhabdodon",
"rhoetosaurus",
"rinchenia",
"riojasaurus",
"rugops",
"saichania",
"saltasaurus",
"saltopus",
"sarcosaurus",
"saurolophus",
"sauropelta",
"saurophaganax",
"saurornithoides",
"scelidosaurus",
"scutellosaurus",
"secernosaurus",
"segisaurus",
"segnosaurus",
"seismosaurus",
"shamosaurus",
"shanag",
"shantungosaurus",
"shunosaurus",
"shuvuuia",
"silvisaurus",
"sinocalliopteryx",
"sinornithosaurus",
"sinosauropteryx",
"sinraptor",
"sinvenator",
"zalmoxes",
"zephyrosaurus",
"zuniceratops",
"byzantine",
"svengali",
"accolade",
"acrimony",
"angst",
"anomaly",
"antidote",
"baroque",
"bona_fide",
"bourgeois",
"bravado",
"brogue",
"brusque",
"cacophony",
"caustic",
"charisma",
"cloying",
"deja-vu",
"dichotomy",
"elan",
"ennui",
"epitome",
"esoteric",
"euphemism",
"faux pas",
"fiasco",
"finagle",
"glib",
"harbinger",
"hedonist",
"heresy",
"idyllic",
"insidious",
"junket",
"kitsch",
"litany",
"lurid",
"malaise",
"malinger",
"mantra",
"maudlin",
"mercenary",
"misnomer",
"nirvana",
"oblivion",
"ogle",
"ostracize",
"panacea",
"paradox",
"peevish",
"propriety",
"revel",
"rhetoric",
"spartan",
"stigma",
"stoic",
"suave",
"sycophant",
"tirade",
"tryst",
"untenable",
"vicarious",
"vile",
"waft",
"zealous",
]
| 1.515625 | 2 |
docs/mathparse.py | pcmoritz/flow | 16 | 3675 | <filename>docs/mathparse.py
"""
A preliminary attempt at parsing an RST file's math syntax
in order to make math render as inline rather than display
mode. This doesn't work as of yet but might be useful.
It could, however, be not useful if there's a pandoc option
for converting .md to .rst that makes math inline and not
display. Keeping it around, though.
"""
import re
s = """Define
.. math:: v_{des}
as the desired velocity,
.. math:: 1^k
a vector of ones of length"""
with open('/Users/nishant/Downloads/tutorialtest.rst', 'r') as myfile:
s = myfile.read()
print([elem[11:-2] for elem in re.findall('\n.. math:: *\S*\n\n', s)])
| 3.015625 | 3 |
lib/layout/primitives.py | tailhook/pyzza | 2 | 3676 | from layout import Shape, Widget
from flash.text.engine import TextBlock, TextElement
@package('layout')
class Poly(Shape):
__slots__ = ('fillcolor', 'sequence')
def __init__(self, name, fillcolor, seq, states):
super().__init__(name, states)
self.fillcolor = fillcolor
self.sequence = seq
def draw(self, w, h):
g = self.graphics
g.clear()
for line in values(self.sequence):
g.beginFill(self.fillcolor)
g.moveTo(int(line[0][0]*w), int(line[0][1]*h))
for idx in range(1, line.length):
g.lineTo(int(line[idx][0]*w), int(line[idx][1]*h))
g.endFill()
@package('layout')
class RoundRect(Shape):
__slots__ = ('fillcolor', 'radius')
def __init__(self, name, fillcolor, radius, states):
super().__init__(name, states)
self.fillcolor = fillcolor
self.radius = radius
def draw(self, width, height):
g = self.graphics
g.clear()
g.beginFill(self.fillcolor)
g.drawRoundRect(0, 0, width, height, self.radius, self.radius)
g.endFill()
@package('layout')
class TextLine(Widget):
__slots__ = ('format', 'text', 'textline')
def __init__(self, format, text, name, states):
self.format = format
self.text = text
super().__init__(name, states)
def draw(self, width, height):
if self.textline:
self.removeChild(self.textline)
tb = TextBlock()
tb.content = TextElement(self.text, self.format)
self.textline = tb.createTextLine(None, width)
self.addChild(self.textline)
@package('layout')
class CenteredLine(TextLine):
def __init__(self, format, text, name, states):
super().__init__(format, text, name, states)
def draw(self, width, height):
super().draw(width, height)
self.textline.x = int((width - self.textline.width)/2)
self.textline.y = int((height - self.textline.height)/2)
| 2.765625 | 3 |
tests/testing_server.py | ImportTaste/WebRequest | 0 | 3677 | <gh_stars>0
import traceback
import uuid
import socket
import logging
import os
import base64
import zlib
import gzip
import time
import datetime
from http import cookies
from http.server import BaseHTTPRequestHandler
from http.server import HTTPServer
from threading import Thread
import WebRequest
def capture_expected_headers(expected_headers, test_context, is_chromium=False, is_selenium_garbage_chromium=False, is_annoying_pjs=False, skip_header_checks=False):
# print("Capturing expected headers:")
# print(expected_headers)
assert isinstance(expected_headers, dict), "expected_headers must be a dict. Passed a %s" & type(expected_headers)
for key, val in expected_headers.items():
assert isinstance(key, str)
assert isinstance(val, str)
cookie_key = <KEY>
log = logging.getLogger("Main.TestServer")
sucuri_reqs_1 = 0
sucuri_reqs_2 = 0
sucuri_reqs_3 = 0
class MockServerRequestHandler(BaseHTTPRequestHandler):
def log_message(self, format, *args):
return
def validate_headers(self):
for key, value in expected_headers.items():
if (is_annoying_pjs or is_selenium_garbage_chromium or skip_header_checks) and key == 'Accept-Encoding':
# So PhantomJS monkeys with accept-encoding headers
# Just ignore that particular header, I guess.
pass
# Selenium is fucking retarded, and I can't override the user-agent
# and other assorted parameters via their API at all.
elif (is_selenium_garbage_chromium or skip_header_checks) and key == 'Accept-Language':
pass
elif (is_annoying_pjs or is_chromium or is_selenium_garbage_chromium or skip_header_checks) and key == 'Accept':
pass
elif not skip_header_checks:
v1 = value.replace(" ", "")
v2 = self.headers[key]
if v2 is None:
v2 = ""
v2 = v2.replace(" ", "")
test_context.assertEqual(v1, v2, msg="Mismatch in header parameter '{}' : '{}' -> '{}' ({})".format(
key,
value,
self.headers[key],
{
'is_annoying_pjs' : is_annoying_pjs,
'is_chromium' : is_chromium,
'is_selenium_garbage_chromium' : is_selenium_garbage_chromium,
'skip_header_checks' : skip_header_checks,
},
)
)
def _get_handler(self):
# Process an HTTP GET request and return a response with an HTTP 200 status.
# print("Path: ", self.path)
# print("Headers: ", self.headers)
# print("Cookie(s): ", self.headers.get_all('Cookie', failobj=[]))
try:
self.validate_headers()
except Exception:
self.send_response(500)
self.send_header('Content-type', "text/html")
self.end_headers()
self.wfile.write(b"Headers failed validation!")
raise
if self.path == "/":
self.send_response(200)
self.send_header('Content-type', "text/html")
self.end_headers()
self.wfile.write(b"Root OK?")
elif self.path == "/favicon.ico":
self.send_response(404)
self.end_headers()
elif self.path == "/raw-txt":
self.send_response(200)
self.send_header('Content-type', "text/plain")
self.end_headers()
self.wfile.write(b"Root OK?")
elif self.path == "/html-decode":
self.send_response(200)
self.send_header('Content-type', "text/html")
self.end_headers()
self.wfile.write(b"Root OK?")
elif self.path == "/html/real":
self.send_response(200)
self.send_header('Content-type', "text/html")
self.end_headers()
self.wfile.write(b"<html><body>Root OK?</body></html>")
elif self.path == "/compressed/deflate":
self.send_response(200)
self.send_header('Content-Encoding', 'deflate')
self.send_header('Content-type', "text/html")
self.end_headers()
inb = b"Root OK?"
cobj = zlib.compressobj(wbits=-zlib.MAX_WBITS)
t1 = cobj.compress(inb) + cobj.flush()
self.wfile.write(t1)
elif self.path == "/compressed/gzip":
self.send_response(200)
self.send_header('Content-Encoding', 'gzip')
self.send_header('Content-type', "text/html")
self.end_headers()
self.wfile.write(gzip.compress(b"Root OK?"))
elif self.path == "/json/invalid":
self.send_response(200)
self.send_header('Content-type', "text/html")
self.end_headers()
self.wfile.write(b"LOLWAT")
elif self.path == "/json/valid":
self.send_response(200)
self.send_header('Content-type', "text/html")
self.end_headers()
self.wfile.write(b'{"oh" : "hai"}')
elif self.path == "/json/no-coding":
self.send_response(200)
self.end_headers()
self.wfile.write(b'{"oh" : "hai"}')
elif self.path == "/filename/path-only.txt":
self.send_response(200)
self.end_headers()
self.wfile.write(b"LOLWAT?")
elif self.path == "/filename/path-only-trailing-slash/":
self.send_response(200)
self.end_headers()
self.wfile.write(b"LOLWAT?")
elif self.path == "/filename/content-disposition":
self.send_response(200)
self.send_header('Content-Disposition', "filename=lolercoaster.txt")
self.end_headers()
self.wfile.write(b"LOLWAT?")
elif self.path == "/filename_mime/path-only.txt":
self.send_response(200)
self.end_headers()
self.wfile.write(b"LOLWAT?")
elif self.path == "/filename_mime/content-disposition":
self.send_response(200)
self.send_header('Content-Disposition', "filename=lolercoaster.txt")
self.end_headers()
self.wfile.write(b"LOLWAT?")
elif self.path == "/filename_mime/content-disposition-html-suffix":
self.send_response(200)
self.send_header('Content-Disposition', "filename=lolercoaster.html")
self.end_headers()
self.wfile.write(b"LOLWAT?")
elif self.path == "/filename_mime/content-disposition-quotes-1":
self.send_response(200)
self.send_header('Content-Disposition', "filename='lolercoaster.html'")
self.end_headers()
self.wfile.write(b"LOLWAT?")
elif self.path == "/filename_mime/content-disposition-quotes-2":
self.send_response(200)
self.send_header('Content-Disposition', "filename=\'lolercoaster.html\'")
self.end_headers()
self.wfile.write(b"LOLWAT?")
elif self.path == "/filename_mime/content-disposition-quotes-spaces-1":
self.send_response(200)
self.send_header('Content-Disposition', "filename='loler coaster.html'")
self.end_headers()
self.wfile.write(b"LOLWAT?")
elif self.path == "/filename_mime/content-disposition-quotes-spaces-2":
self.send_response(200)
self.send_header('Content-Disposition', "filename=\"loler coaster.html\"")
self.end_headers()
self.wfile.write(b"LOLWAT?")
elif self.path == "/filename_mime/explicit-html-mime":
self.send_response(200)
self.send_header('Content-Disposition', "filename=lolercoaster.html")
self.send_header('Content-type', "text/html")
self.end_headers()
self.wfile.write(b"LOLWAT?")
elif self.path == "/redirect/bad-1":
self.send_response(302)
self.end_headers()
elif self.path == "/redirect/bad-2":
self.send_response(302)
self.send_header('location', "bad-2")
self.end_headers()
elif self.path == "/redirect/bad-3":
self.send_response(302)
self.send_header('location', "gopher://www.google.com")
self.end_headers()
elif self.path == "/redirect/from-1":
self.send_response(302)
self.send_header('location', "to-1")
self.end_headers()
elif self.path == "/redirect/to-1":
self.send_response(200)
self.end_headers()
self.wfile.write(b"Redirect-To-1")
elif self.path == "/redirect/from-2":
self.send_response(302)
self.send_header('uri', "to-2")
self.end_headers()
elif self.path == "/redirect/to-2":
self.send_response(200)
self.end_headers()
self.wfile.write(b"Redirect-To-2")
elif self.path == "/redirect/from-3":
self.send_response(302)
newurl = "http://{}:{}".format(self.server.server_address[0], self.server.server_address[1])
self.send_header('uri', newurl)
self.end_headers()
elif self.path == "/password/expect":
# print("Password")
# print(self.headers)
self.send_response(200)
self.end_headers()
if not 'Authorization' in self.headers:
self.wfile.write(b"Password not sent!!")
return
val = self.headers['Authorization']
passval = val.split(" ")[-1]
passstr = base64.b64decode(passval)
if passstr == b'lol:<PASSWORD>':
self.wfile.write(b"Password Ok?")
else:
self.wfile.write(b"Password Bad!")
elif self.path == "/content/have-title":
self.send_response(200)
self.end_headers()
self.wfile.write(b"<html><head><title>I can haz title?</title></head><body>This page has a title!</body></html>")
elif self.path == "/content/no-title":
self.send_response(200)
self.end_headers()
self.wfile.write(b"<html><head></head><body>This page has no title. Sadface.jpg</body></html>")
elif self.path == "/binary_ctnt":
self.send_response(200)
self.send_header('Content-type', "image/jpeg")
self.end_headers()
self.wfile.write(b"Binary!\x00\x01\x02\x03")
elif self.path == "/binary_ctnt":
self.send_response(200)
self.send_header('Content-type', "image/jpeg")
self.end_headers()
self.wfile.write(b"Binary!\x00\x01\x02\x03")
##################################################################################################################################
# Cookie stuff
##################################################################################################################################
elif self.path == '/cookie_test':
cook = cookies.SimpleCookie()
cook['cookie_test_key'] = cookie_key
cook['cookie_test_key']['path'] = "/"
cook['cookie_test_key']['domain'] = ""
expiration = datetime.datetime.now() + datetime.timedelta(days=30)
cook['cookie_test_key']["expires"] = expiration.strftime("%a, %d-%b-%Y %H:%M:%S PST")
self.send_response(200)
self.send_header('Content-type', "text/html")
self.send_header('Set-Cookie', cook['cookie_test_key'].OutputString())
self.end_headers()
self.wfile.write(b"<html><body>CF Cookie Test</body></html>")
elif self.path == '/cookie_require':
if self.headers.get_all('Cookie', failobj=[]):
cook = self.headers.get_all('Cookie', failobj=[])[0]
cook_key, cook_value = cook.split("=", 1)
if cook_key == 'cookie_test_key' and cook_value == cookie_key:
self.send_response(200)
self.send_header('Content-type', "text/html")
self.end_headers()
self.wfile.write(b"<html><body>Cookie forwarded properly!</body></html>")
return
self.send_response(200)
self.send_header('Content-type', "text/html")
self.end_headers()
self.wfile.write(b"<html><body>Cookie is missing</body></html>")
##################################################################################################################################
# Sucuri validation
##################################################################################################################################
elif self.path == '/sucuri_shit_3':
# I'd like to get this down to just 2 requests (cookie bounce, and fetch).
# Doing that requires pulling html content out of chromium, though.
# Annoying.
nonlocal sucuri_reqs_3
sucuri_reqs_3 += 1
if sucuri_reqs_3 > 3:
raise RuntimeError("Too many requests to sucuri_shit_3 (%s)!" % sucuri_reqs_3)
if self.headers.get_all('Cookie', failobj=[]):
cook = self.headers.get_all('Cookie', failobj=[])[0]
cook_key, cook_value = cook.split("=", 1)
if cook_key == 'sucuri_cloudproxy_uuid_6293e0004' and cook_value == '<KEY>':
# if cook['']
self.send_response(200)
self.send_header('Content-type', "text/html")
self.end_headers()
self.wfile.write(b"<html><head><title>At target preemptive Sucuri page!</title></head><body>Preemptive waf circumvented OK (p3)?</body></html>")
return
container_dir = os.path.dirname(__file__)
fpath = os.path.join(container_dir, "waf_garbage", 'sucuri_garbage.html')
with open(fpath, "rb") as fp:
plain_contents = fp.read()
self.send_response(200)
self.send_header('Content-type', "text/html")
self.end_headers()
self.wfile.write(plain_contents)
elif self.path == '/sucuri_shit_2':
# This particular path is the one we should already have a cookie for.
# As such, we expect one request only
nonlocal sucuri_reqs_2
sucuri_reqs_2 += 1
if sucuri_reqs_2 > 1:
raise RuntimeError("Too many requests to sucuri_shit_2 (%s)!" % sucuri_reqs_2)
if self.headers.get_all('Cookie', failobj=[]):
cook = self.headers.get_all('Cookie', failobj=[])[0]
cook_key, cook_value = cook.split("=", 1)
if cook_key == 'sucuri_cloudproxy_uuid_6293e0004' and cook_value == '04cbb56494ebedbcd19a61b2d728c478':
# if cook['']
self.send_response(200)
self.send_header('Content-type', "text/html")
self.end_headers()
self.wfile.write(b"<html><head><title>At target preemptive Sucuri page!</title></head><body>Preemptive waf circumvented OK (p2)?</body></html>")
return
container_dir = os.path.dirname(__file__)
fpath = os.path.join(container_dir, "waf_garbage", 'sucuri_garbage.html')
with open(fpath, "rb") as fp:
plain_contents = fp.read()
self.send_response(200)
self.send_header('Content-type', "text/html")
self.end_headers()
self.wfile.write(plain_contents)
elif self.path == '/sucuri_shit':
nonlocal sucuri_reqs_1
sucuri_reqs_1 += 1
if sucuri_reqs_1 > 4:
raise RuntimeError("Too many requests to sucuri_shit (%s)!" % sucuri_reqs_1)
# print("Fetch for ", self.path)
# print("Cookies:", self.headers.get_all('Cookie', failobj=[]))
if self.headers.get_all('Cookie', failobj=[]):
cook = self.headers.get_all('Cookie', failobj=[])[0]
cook_key, cook_value = cook.split("=", 1)
if cook_key == 'sucuri_cloudproxy_uuid_6293e0004' and cook_value == '<KEY>':
# if cook['']
self.send_response(200)
self.send_header('Content-type', "text/html")
self.end_headers()
self.wfile.write(b"<html><head><title>At target Sucuri page!</title></head><body>Sucuri Redirected OK?</body></html>")
return
container_dir = os.path.dirname(__file__)
fpath = os.path.join(container_dir, "waf_garbage", 'sucuri_garbage.html')
with open(fpath, "rb") as fp:
plain_contents = fp.read()
self.send_response(200)
self.send_header('Content-type', "text/html")
self.end_headers()
self.wfile.write(plain_contents)
##################################################################################################################################
# Cloudflare validation
##################################################################################################################################
elif self.path == '/cloudflare_under_attack_shit_2':
if self.headers.get_all('Cookie', failobj=[]):
cook = self.headers.get_all('Cookie', failobj=[])[0]
cook_key, cook_value = cook.split("=", 1)
if cook_key == 'cloudflare_validate_key' and cook_value == cookie_key:
# if cook['']
self.send_response(200)
self.send_header('Content-type', "text/html")
self.end_headers()
self.wfile.write(b"<html><head><title>At target CF page!</title></head><body>CF Redirected OK?</body></html>")
return
container_dir = os.path.dirname(__file__)
fpath = os.path.join(container_dir, "waf_garbage", 'cf_js_challenge_03_12_2018.html')
with open(fpath, "rb") as fp:
plain_contents = fp.read()
self.server_version = "cloudflare is garbage"
self.send_response(503)
self.send_header('Server', "cloudflare is garbage")
self.send_header('Content-type','text/html')
self.end_headers()
self.wfile.write(plain_contents)
elif self.path == '/cloudflare_under_attack_shit':
if self.headers.get_all('Cookie', failobj=[]):
cook = self.headers.get_all('Cookie', failobj=[])[0]
cook_key, cook_value = cook.split("=", 1)
if cook_key == 'cloudflare_validate_key' and cook_value == cookie_key:
# if cook['']
self.send_response(200)
self.send_header('Content-type', "text/html")
self.end_headers()
self.wfile.write(b"<html><head><title>At target CF page!</title></head><body>CF Redirected OK?</body></html>")
return
container_dir = os.path.dirname(__file__)
fpath = os.path.join(container_dir, "waf_garbage", 'cf_js_challenge_03_12_2018.html')
with open(fpath, "rb") as fp:
plain_contents = fp.read()
self.server_version = "cloudflare is garbage"
self.send_response(503)
self.send_header('Server', "cloudflare is garbage")
self.send_header('Content-type','text/html')
self.end_headers()
self.wfile.write(plain_contents)
elif self.path == '/cdn-cgi/l/chk_jschl?jschl_vc=427c2b1cd4fba29608ee81b200e94bfa&pass=<PASSWORD>&jschl_answer=<PASSWORD>':
cook = cookies.SimpleCookie()
cook['cloudflare_validate_key'] = cookie_key
cook['cloudflare_validate_key']['path'] = "/"
cook['cloudflare_validate_key']['domain'] = ""
expiration = datetime.datetime.now() + datetime.timedelta(days=30)
cook['cloudflare_validate_key']["expires"] = expiration.strftime("%a, %d-%b-%Y %H:%M:%S PST")
self.send_response(200)
self.send_header('Content-type', "text/html")
self.send_header('Set-Cookie', cook['cloudflare_validate_key'].OutputString())
self.end_headers()
body = "<html><body>Setting cookies.<script>window.location.href='/cloudflare_under_attack_shit'</script></body></html>"
self.wfile.write(body.encode("utf-8"))
##################################################################################################################################
# Handle requests for an unknown path
##################################################################################################################################
else:
test_context.assertEqual(self.path, "This shouldn't happen!")
def do_GET(self):
# Process an HTTP GET request and return a response with an HTTP 200 status.
log.info("Request for URL path: '%s'", self.path)
# print("Headers: ", self.headers)
# print("Cookie(s): ", self.headers.get_all('Cookie', failobj=[]))
try:
return self._get_handler()
except Exception as e:
log.error("Exception in handler!")
for line in traceback.format_exc().split("\n"):
log.error(line)
raise e
return MockServerRequestHandler
def get_free_port():
s = socket.socket(socket.AF_INET, type=socket.SOCK_STREAM)
s.bind(('localhost', 0))
address, port = s.getsockname()
s.close()
return port
def start_server(assertion_class,
from_wg,
port_override = None,
is_chromium = None,
is_selenium_garbage_chromium = False,
is_annoying_pjs = False,
skip_header_checks = False
):
# Configure mock server.
if port_override:
mock_server_port = port_override
else:
mock_server_port = get_free_port()
expected_headers = dict(from_wg.browserHeaders)
print(from_wg)
print(expected_headers)
assert isinstance(expected_headers, dict)
captured_server = capture_expected_headers(
expected_headers = expected_headers,
test_context = assertion_class,
is_chromium = is_chromium,
is_selenium_garbage_chromium = is_selenium_garbage_chromium,
is_annoying_pjs = is_annoying_pjs,
skip_header_checks = skip_header_checks
)
retries = 4
for x in range(retries + 1):
try:
mock_server = HTTPServer(('0.0.0.0', mock_server_port), captured_server)
break
except OSError:
time.sleep(0.2)
if x >= retries:
raise
# Start running mock server in a separate thread.
# Daemon threads automatically shut down when the main process exits.
mock_server_thread = Thread(target=mock_server.serve_forever)
mock_server_thread.setDaemon(True)
mock_server_thread.start()
return mock_server_port, mock_server, mock_server_thread
if __name__ == '__main__':
wg = WebRequest.WebGetRobust()
srv = start_server(
assertion_class = None,
from_wg = wg,
skip_header_checks = True)
print("running server on port: ", srv)
while 1:
time.sleep(1)
| 2.28125 | 2 |
calcgrades.py | qrowsxi/calcgrades | 0 | 3678 | import csv
import math
import numpy as np
import pandas
import scipy.optimize
import sys
import argparse
def ineq_constraint_1(v):
return np.array([vi for vi in v])
def ineq_constraint_2(v):
return np.array([-vi + 30 for vi in v])
class WeightAverage:
def __init__(self, mean, csv):
self.df = pandas.read_csv(csv)
self.course = self.df['name']
self.expected_mean = mean
self.credits = self.df[['credits', 'grade']].query('grade == 0')[['credits']].transpose().to_numpy()[0]
self.grade_initial_sol = np.array([mean for _ in range(0, len(self.credits))])
self.owned_credits = self.df[['credits', 'grade']].query('grade > 0')[['credits']].transpose().to_numpy()[0]
self.owned_grades = self.df[['grade']].query('grade > 0').transpose().to_numpy()[0]
self.tot_credits = sum(self.owned_credits) + sum(self.credits)
def weight_average(self, v):
term1 = 0
term2 = 0
for i in range(0, len(self.owned_grades)):
term1 = term1 + self.owned_grades[i] * self.owned_credits[i]
for i in range(0, len(v)):
term2 = term2 + v[i] * self.credits[i]
return (term1 + term2) / self.tot_credits
def eq_constraint(self, v):
return self.weight_average(v) - self.expected_mean
def solve(self):
cons = (
{'type': 'eq', 'fun': self.eq_constraint},
{'type': 'ineq', 'fun': ineq_constraint_1},
{'type': 'ineq', 'fun': ineq_constraint_2})
res = scipy.optimize.minimize(self.weight_average, self.grade_initial_sol, method='SLSQP', constraints=cons)
if not res.success:
return None
return res.x
def error_no_solution():
print("Mean not possible with current vote :(")
exit(0)
def output_result(solver, sol):
avg = solver.weight_average(sol)
df = solver.df
print(f"Expected mean: {avg} -> {int(round(avg / 30 * 110, 0))} / 110")
if sol is None:
print("Not Possible with current grades :(")
exit()
for index, row in df.query('grade > 0').iterrows():
print(f"'{row['name']}', credits: {row['credits']}, grade {row['grade']}")
i = 0
for index, row in df.query('grade == 0').iterrows():
print(f"'{row['name']}', credits: {row['credits']}, grade {int(sol[i])}")
i += 1
return 0
def main():
name = "calcGrades"
description = """CalcGrades is an utility which purpose is to compute the minimum
grades required to get a certain weight average of the grades over the credits,
given the desired output and the grades already owned."""
parser = argparse.ArgumentParser(name, description=description)
parser.add_argument('mean', metavar='M', type=float, nargs='+', help='The expected mean')
parser.add_argument('--file',dest='file', default='courses.csv', type=str,
help='path to the csv file containing the courses (default: courses.csv)')
parser.add_argument('--floor', default=False, action='store_true',
help='apply floor operation instead of round to solution')
parser.add_argument('--ceil', default=False, action='store_true',
help='apply ceil operation instead of round to solution')
args = parser.parse_args()
mean = args.mean
courses = args.file
solver = WeightAverage(mean, courses)
sol = solver.solve()
if sol is None:
error_no_solution()
if args.ceil:
sol = [math.ceil(x) for x in sol]
elif args.floor:
sol = [math.floor(x) for x in sol]
else:
sol = [round(x) for x in sol]
output_result(solver, sol)
return 0
if __name__ == '__main__':
main()
| 3.03125 | 3 |
sdk/python/pulumi_google_native/testing/v1/test_matrix.py | AaronFriel/pulumi-google-native | 44 | 3679 | <reponame>AaronFriel/pulumi-google-native
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['TestMatrixArgs', 'TestMatrix']
@pulumi.input_type
class TestMatrixArgs:
def __init__(__self__, *,
environment_matrix: pulumi.Input['EnvironmentMatrixArgs'],
result_storage: pulumi.Input['ResultStorageArgs'],
test_specification: pulumi.Input['TestSpecificationArgs'],
client_info: Optional[pulumi.Input['ClientInfoArgs']] = None,
fail_fast: Optional[pulumi.Input[bool]] = None,
flaky_test_attempts: Optional[pulumi.Input[int]] = None,
project: Optional[pulumi.Input[str]] = None,
request_id: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a TestMatrix resource.
:param pulumi.Input['EnvironmentMatrixArgs'] environment_matrix: The devices the tests are being executed on.
:param pulumi.Input['ResultStorageArgs'] result_storage: Where the results for the matrix are written.
:param pulumi.Input['TestSpecificationArgs'] test_specification: How to run the test.
:param pulumi.Input['ClientInfoArgs'] client_info: Information about the client which invoked the test.
:param pulumi.Input[bool] fail_fast: If true, only a single attempt at most will be made to run each execution/shard in the matrix. Flaky test attempts are not affected. Normally, 2 or more attempts are made if a potential infrastructure issue is detected. This feature is for latency sensitive workloads. The incidence of execution failures may be significantly greater for fail-fast matrices and support is more limited because of that expectation.
:param pulumi.Input[int] flaky_test_attempts: The number of times a TestExecution should be re-attempted if one or more of its test cases fail for any reason. The maximum number of reruns allowed is 10. Default is 0, which implies no reruns.
:param pulumi.Input[str] project: The cloud project that owns the test matrix.
"""
pulumi.set(__self__, "environment_matrix", environment_matrix)
pulumi.set(__self__, "result_storage", result_storage)
pulumi.set(__self__, "test_specification", test_specification)
if client_info is not None:
pulumi.set(__self__, "client_info", client_info)
if fail_fast is not None:
pulumi.set(__self__, "fail_fast", fail_fast)
if flaky_test_attempts is not None:
pulumi.set(__self__, "flaky_test_attempts", flaky_test_attempts)
if project is not None:
pulumi.set(__self__, "project", project)
if request_id is not None:
pulumi.set(__self__, "request_id", request_id)
@property
@pulumi.getter(name="environmentMatrix")
def environment_matrix(self) -> pulumi.Input['EnvironmentMatrixArgs']:
"""
The devices the tests are being executed on.
"""
return pulumi.get(self, "environment_matrix")
@environment_matrix.setter
def environment_matrix(self, value: pulumi.Input['EnvironmentMatrixArgs']):
pulumi.set(self, "environment_matrix", value)
@property
@pulumi.getter(name="resultStorage")
def result_storage(self) -> pulumi.Input['ResultStorageArgs']:
"""
Where the results for the matrix are written.
"""
return pulumi.get(self, "result_storage")
@result_storage.setter
def result_storage(self, value: pulumi.Input['ResultStorageArgs']):
pulumi.set(self, "result_storage", value)
@property
@pulumi.getter(name="testSpecification")
def test_specification(self) -> pulumi.Input['TestSpecificationArgs']:
"""
How to run the test.
"""
return pulumi.get(self, "test_specification")
@test_specification.setter
def test_specification(self, value: pulumi.Input['TestSpecificationArgs']):
pulumi.set(self, "test_specification", value)
@property
@pulumi.getter(name="clientInfo")
def client_info(self) -> Optional[pulumi.Input['ClientInfoArgs']]:
"""
Information about the client which invoked the test.
"""
return pulumi.get(self, "client_info")
@client_info.setter
def client_info(self, value: Optional[pulumi.Input['ClientInfoArgs']]):
pulumi.set(self, "client_info", value)
@property
@pulumi.getter(name="failFast")
def fail_fast(self) -> Optional[pulumi.Input[bool]]:
"""
If true, only a single attempt at most will be made to run each execution/shard in the matrix. Flaky test attempts are not affected. Normally, 2 or more attempts are made if a potential infrastructure issue is detected. This feature is for latency sensitive workloads. The incidence of execution failures may be significantly greater for fail-fast matrices and support is more limited because of that expectation.
"""
return pulumi.get(self, "fail_fast")
@fail_fast.setter
def fail_fast(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "fail_fast", value)
@property
@pulumi.getter(name="flakyTestAttempts")
def flaky_test_attempts(self) -> Optional[pulumi.Input[int]]:
"""
The number of times a TestExecution should be re-attempted if one or more of its test cases fail for any reason. The maximum number of reruns allowed is 10. Default is 0, which implies no reruns.
"""
return pulumi.get(self, "flaky_test_attempts")
@flaky_test_attempts.setter
def flaky_test_attempts(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "flaky_test_attempts", value)
@property
@pulumi.getter
def project(self) -> Optional[pulumi.Input[str]]:
"""
The cloud project that owns the test matrix.
"""
return pulumi.get(self, "project")
@project.setter
def project(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "project", value)
@property
@pulumi.getter(name="requestId")
def request_id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "request_id")
@request_id.setter
def request_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "request_id", value)
class TestMatrix(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
client_info: Optional[pulumi.Input[pulumi.InputType['ClientInfoArgs']]] = None,
environment_matrix: Optional[pulumi.Input[pulumi.InputType['EnvironmentMatrixArgs']]] = None,
fail_fast: Optional[pulumi.Input[bool]] = None,
flaky_test_attempts: Optional[pulumi.Input[int]] = None,
project: Optional[pulumi.Input[str]] = None,
request_id: Optional[pulumi.Input[str]] = None,
result_storage: Optional[pulumi.Input[pulumi.InputType['ResultStorageArgs']]] = None,
test_specification: Optional[pulumi.Input[pulumi.InputType['TestSpecificationArgs']]] = None,
__props__=None):
"""
Creates and runs a matrix of tests according to the given specifications. Unsupported environments will be returned in the state UNSUPPORTED. A test matrix is limited to use at most 2000 devices in parallel. May return any of the following canonical error codes: - PERMISSION_DENIED - if the user is not authorized to write to project - INVALID_ARGUMENT - if the request is malformed or if the matrix tries to use too many simultaneous devices.
Auto-naming is currently not supported for this resource.
Note - this resource's API doesn't support deletion. When deleted, the resource will persist
on Google Cloud even though it will be deleted from Pulumi state.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[pulumi.InputType['ClientInfoArgs']] client_info: Information about the client which invoked the test.
:param pulumi.Input[pulumi.InputType['EnvironmentMatrixArgs']] environment_matrix: The devices the tests are being executed on.
:param pulumi.Input[bool] fail_fast: If true, only a single attempt at most will be made to run each execution/shard in the matrix. Flaky test attempts are not affected. Normally, 2 or more attempts are made if a potential infrastructure issue is detected. This feature is for latency sensitive workloads. The incidence of execution failures may be significantly greater for fail-fast matrices and support is more limited because of that expectation.
:param pulumi.Input[int] flaky_test_attempts: The number of times a TestExecution should be re-attempted if one or more of its test cases fail for any reason. The maximum number of reruns allowed is 10. Default is 0, which implies no reruns.
:param pulumi.Input[str] project: The cloud project that owns the test matrix.
:param pulumi.Input[pulumi.InputType['ResultStorageArgs']] result_storage: Where the results for the matrix are written.
:param pulumi.Input[pulumi.InputType['TestSpecificationArgs']] test_specification: How to run the test.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: TestMatrixArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Creates and runs a matrix of tests according to the given specifications. Unsupported environments will be returned in the state UNSUPPORTED. A test matrix is limited to use at most 2000 devices in parallel. May return any of the following canonical error codes: - PERMISSION_DENIED - if the user is not authorized to write to project - INVALID_ARGUMENT - if the request is malformed or if the matrix tries to use too many simultaneous devices.
Auto-naming is currently not supported for this resource.
Note - this resource's API doesn't support deletion. When deleted, the resource will persist
on Google Cloud even though it will be deleted from Pulumi state.
:param str resource_name: The name of the resource.
:param TestMatrixArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(TestMatrixArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
client_info: Optional[pulumi.Input[pulumi.InputType['ClientInfoArgs']]] = None,
environment_matrix: Optional[pulumi.Input[pulumi.InputType['EnvironmentMatrixArgs']]] = None,
fail_fast: Optional[pulumi.Input[bool]] = None,
flaky_test_attempts: Optional[pulumi.Input[int]] = None,
project: Optional[pulumi.Input[str]] = None,
request_id: Optional[pulumi.Input[str]] = None,
result_storage: Optional[pulumi.Input[pulumi.InputType['ResultStorageArgs']]] = None,
test_specification: Optional[pulumi.Input[pulumi.InputType['TestSpecificationArgs']]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = TestMatrixArgs.__new__(TestMatrixArgs)
__props__.__dict__["client_info"] = client_info
if environment_matrix is None and not opts.urn:
raise TypeError("Missing required property 'environment_matrix'")
__props__.__dict__["environment_matrix"] = environment_matrix
__props__.__dict__["fail_fast"] = fail_fast
__props__.__dict__["flaky_test_attempts"] = flaky_test_attempts
__props__.__dict__["project"] = project
__props__.__dict__["request_id"] = request_id
if result_storage is None and not opts.urn:
raise TypeError("Missing required property 'result_storage'")
__props__.__dict__["result_storage"] = result_storage
if test_specification is None and not opts.urn:
raise TypeError("Missing required property 'test_specification'")
__props__.__dict__["test_specification"] = test_specification
__props__.__dict__["invalid_matrix_details"] = None
__props__.__dict__["outcome_summary"] = None
__props__.__dict__["state"] = None
__props__.__dict__["test_executions"] = None
__props__.__dict__["test_matrix_id"] = None
__props__.__dict__["timestamp"] = None
super(TestMatrix, __self__).__init__(
'google-native:testing/v1:TestMatrix',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'TestMatrix':
"""
Get an existing TestMatrix resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = TestMatrixArgs.__new__(TestMatrixArgs)
__props__.__dict__["client_info"] = None
__props__.__dict__["environment_matrix"] = None
__props__.__dict__["fail_fast"] = None
__props__.__dict__["flaky_test_attempts"] = None
__props__.__dict__["invalid_matrix_details"] = None
__props__.__dict__["outcome_summary"] = None
__props__.__dict__["project"] = None
__props__.__dict__["result_storage"] = None
__props__.__dict__["state"] = None
__props__.__dict__["test_executions"] = None
__props__.__dict__["test_matrix_id"] = None
__props__.__dict__["test_specification"] = None
__props__.__dict__["timestamp"] = None
return TestMatrix(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="clientInfo")
def client_info(self) -> pulumi.Output['outputs.ClientInfoResponse']:
"""
Information about the client which invoked the test.
"""
return pulumi.get(self, "client_info")
@property
@pulumi.getter(name="environmentMatrix")
def environment_matrix(self) -> pulumi.Output['outputs.EnvironmentMatrixResponse']:
"""
The devices the tests are being executed on.
"""
return pulumi.get(self, "environment_matrix")
@property
@pulumi.getter(name="failFast")
def fail_fast(self) -> pulumi.Output[bool]:
"""
If true, only a single attempt at most will be made to run each execution/shard in the matrix. Flaky test attempts are not affected. Normally, 2 or more attempts are made if a potential infrastructure issue is detected. This feature is for latency sensitive workloads. The incidence of execution failures may be significantly greater for fail-fast matrices and support is more limited because of that expectation.
"""
return pulumi.get(self, "fail_fast")
@property
@pulumi.getter(name="flakyTestAttempts")
def flaky_test_attempts(self) -> pulumi.Output[int]:
"""
The number of times a TestExecution should be re-attempted if one or more of its test cases fail for any reason. The maximum number of reruns allowed is 10. Default is 0, which implies no reruns.
"""
return pulumi.get(self, "flaky_test_attempts")
@property
@pulumi.getter(name="invalidMatrixDetails")
def invalid_matrix_details(self) -> pulumi.Output[str]:
"""
Describes why the matrix is considered invalid. Only useful for matrices in the INVALID state.
"""
return pulumi.get(self, "invalid_matrix_details")
@property
@pulumi.getter(name="outcomeSummary")
def outcome_summary(self) -> pulumi.Output[str]:
"""
Output Only. The overall outcome of the test. Only set when the test matrix state is FINISHED.
"""
return pulumi.get(self, "outcome_summary")
@property
@pulumi.getter
def project(self) -> pulumi.Output[str]:
"""
The cloud project that owns the test matrix.
"""
return pulumi.get(self, "project")
@property
@pulumi.getter(name="resultStorage")
def result_storage(self) -> pulumi.Output['outputs.ResultStorageResponse']:
"""
Where the results for the matrix are written.
"""
return pulumi.get(self, "result_storage")
@property
@pulumi.getter
def state(self) -> pulumi.Output[str]:
"""
Indicates the current progress of the test matrix.
"""
return pulumi.get(self, "state")
@property
@pulumi.getter(name="testExecutions")
def test_executions(self) -> pulumi.Output[Sequence['outputs.TestExecutionResponse']]:
"""
The list of test executions that the service creates for this matrix.
"""
return pulumi.get(self, "test_executions")
@property
@pulumi.getter(name="testMatrixId")
def test_matrix_id(self) -> pulumi.Output[str]:
"""
Unique id set by the service.
"""
return pulumi.get(self, "test_matrix_id")
@property
@pulumi.getter(name="testSpecification")
def test_specification(self) -> pulumi.Output['outputs.TestSpecificationResponse']:
"""
How to run the test.
"""
return pulumi.get(self, "test_specification")
@property
@pulumi.getter
def timestamp(self) -> pulumi.Output[str]:
"""
The time this test matrix was initially created.
"""
return pulumi.get(self, "timestamp")
| 1.859375 | 2 |
View/View.py | MoriokaReimen/ConfigHeaderGenerator | 0 | 3680 | import tkinter as tk
import tkinter.messagebox
from Control import Control
class View:
def __init__(self, control : Control.Control):
self.control = control
# Init Window
self.root = tk.Tk()
self.root.title(u"Header File Generator")
self.root.geometry("700x800")
self.config_frame = tk.Frame(self.root)
# Config Table
lb_symbol = tk.Label(self.config_frame, width = 20)
lb_symbol["text"] = "Symbol"
lb_symbol.grid(row = 0, column = 0)
lb_description = tk.Label(self.config_frame, width = 40)
lb_description["text"] = "Detail"
lb_description.grid(row = 0, column = 1)
lb_enable = tk.Label(self.config_frame, width = 10)
lb_enable["text"] = "Enable"
lb_enable.grid(row = 0, column = 2)
for i, config in enumerate(self.control.getConfigs()):
symbol_entry = tk.Entry(self.config_frame, width=20)
symbol_entry.insert(tk.END, config.symbol)
symbol_entry.config(state = tk.DISABLED)
symbol_entry.config(disabledforeground = "black", disabledbackground = "white")
symbol_entry.grid(row= i + 1, column = 0)
detail_entry = tk.Entry(self.config_frame, width=40)
detail_entry.insert(tk.END, config.detail)
detail_entry.config(state = tk.DISABLED)
detail_entry.config(disabledforeground = "black", disabledbackground = "white")
detail_entry.grid(row= i + 1, column = 1)
bt_enable = tk.Button(self.config_frame, text="ON", width= 5)
bt_enable["text"] = "ON" if config.enable else "OFF"
color = "green" if config.enable else "red"
bt_enable.config(bg=color, activebackground = color)
bt_enable["command"] = lambda id = i, button = bt_enable : self.toggle_config_enable(id, button)
bt_enable.grid(row = i + 1, column = 2)
self.config_frame.pack(side=tk.TOP, anchor=tk.NW)
self.value_config_frame = tk.Frame(self.root)
# Config Table
lb_symbol = tk.Label(self.value_config_frame, width = 20)
lb_symbol["text"] = "Symbol"
lb_symbol.grid(row = 0, column = 0)
lb_description = tk.Label(self.value_config_frame, width = 40)
lb_description["text"] = "Detail"
lb_description.grid(row = 0, column = 1)
lb_value = tk.Label(self.value_config_frame, width = 10)
lb_value["text"] = "Value"
lb_value.grid(row = 0, column = 2)
lb_enable = tk.Label(self.value_config_frame, width = 10)
lb_enable["text"] = "Enable"
lb_enable.grid(row = 0, column = 3)
for i, val_config in enumerate(self.control.getValConfigs()):
symbol_entry = tk.Entry(self.value_config_frame, width=20)
symbol_entry.insert(tk.END, val_config.symbol)
symbol_entry.config(state = tk.DISABLED)
symbol_entry.config(disabledforeground = "black", disabledbackground = "white")
symbol_entry.grid(row= i + 1, column = 0)
detail_entry = tk.Entry(self.value_config_frame, width=40)
detail_entry.insert(tk.END, val_config.detail)
detail_entry.config(state = tk.DISABLED)
detail_entry.config(disabledforeground = "black", disabledbackground = "white")
detail_entry.grid(row= i + 1, column = 1)
value_entry = tk.Entry(self.value_config_frame, width=10)
value_entry.insert(tk.END, val_config.value)
value_entry.config(state = tk.DISABLED)
value_entry.config(disabledforeground = "black", disabledbackground = "white")
value_entry.grid(row= i + 1, column = 2)
bt_enable = tk.Button(self.value_config_frame, text="ON", width= 5)
bt_enable["text"] = "ON" if val_config.enable else "OFF"
color = "green" if val_config.enable else "red"
bt_enable.config(bg=color, activebackground = color)
bt_enable["command"] = lambda id = i, button = bt_enable : self.toggle_val_config_enable(id, button)
bt_enable.grid(row = i + 1, column = 3)
self.value_config_frame.pack(side=tk.TOP, anchor=tk.W)
# Generator Button
self.bt_generate = tk.Button(self.root)
self.bt_generate["text"] = "Generate Header"
self.bt_generate["command"] = self.generateHeader
self.bt_generate.pack(side=tk.BOTTOM, anchor=tk.SE)
def start(self):
self.root.mainloop()
def generateHeader(self):
self.control.generateHeader()
tk.messagebox.showinfo("Header Generator Info", "Generated:{0}".format(self.control.header_config.path))
def update(self):
pass
def toggle_config_enable(self, id, button : tk.Button):
config = self.control.getConfigs()[id]
config.enable = not config.enable
button["text"] = "ON" if config.enable else "OFF"
color = "green" if config.enable else "red"
button.config(bg=color, activebackground = color)
def toggle_val_config_enable(self, id, button : tk.Button):
val_config = self.control.getValConfigs()[id]
val_config.enable = not val_config.enable
button["text"] = "ON" if val_config.enable else "OFF"
color = "green" if val_config.enable else "red"
button.config(bg=color, activebackground = color)
| 2.859375 | 3 |
tests/bugs/core_3355_test.py | FirebirdSQL/firebird-qa | 1 | 3681 | <filename>tests/bugs/core_3355_test.py
#coding:utf-8
#
# id: bugs.core_3355
# title: Wrong comparsion of DATE and TIMESTAMP if index is used
# decription:
# tracker_id: CORE-3355
# min_versions: ['2.1.5']
# versions: 3.0
# qmid: None
import pytest
from firebird.qa import db_factory, isql_act, Action
# version: 3.0
# resources: None
substitutions_1 = []
init_script_1 = """create table tdate (id integer not null primary key, val date);
create index tdateix1 on tdate (val);
commit;
insert into tdate values (0, '1997-12-31');
insert into tdate values (1, '1998-01-01');
insert into tdate values (2, '1998-01-02');
insert into tdate values (3, '1998-01-03');
insert into tdate values (4, '1998-01-04');
insert into tdate values (5, '1998-01-05');
commit;
"""
db_1 = db_factory(page_size=4096, sql_dialect=3, init=init_script_1)
test_script_1 = """select count(*) from tdate where val >= timestamp'1998-01-04 12:00:00.0000';
select count(*) from tdate where val < timestamp'1998-01-04 12:00:00.0000';
"""
act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1)
expected_stdout_1 = """
COUNT
=====================
1
COUNT
=====================
5
"""
@pytest.mark.version('>=3.0')
def test_1(act_1: Action):
act_1.expected_stdout = expected_stdout_1
act_1.execute()
assert act_1.clean_stdout == act_1.clean_expected_stdout
| 1.84375 | 2 |
dags/download_decrypt_transfer_files.py | hms-dbmi/bch-pic-sure-airflow-dags | 0 | 3682 | <reponame>hms-dbmi/bch-pic-sure-airflow-dags
"""
@author: anilkdegala
"""
import os
from airflow import DAG
from airflow.operators.bash_operator import BashOperator
from airflow.operators.python_operator import PythonOperator, BranchPythonOperator
from datetime import date, timedelta, datetime
from collections import OrderedDict
from scripts.dag_pebbles import DagPebbles
from airflow.configuration import conf
from scripts.configurations import *
from airflow.operators.dummy_operator import DummyOperator
default_args = {
"owner": "anilkdegala",
"depends_on_past": True,
"max_active_runs": 1,
"start_date": datetime(2015, 6, 1),
"is_active": True,
"is_paused_upon_creation": False,
}
def begin_pipeline(**kwargs):
print("begin_pipeline:")
files = kwargs['dag_run'].conf.get('files')
download_decrypt_arguments = ''
transfer_arguments_list = []
for f in files:
print("download_decrypt_transfer_files: file: ", f['name'], ', location: ', f['path'])
output = f['name']+','+f['path']+','+f['final_name']
download_decrypt_arguments = download_decrypt_arguments + " " + output
transfer_arguments_list.append(DATA_LOCATION + "/"+f['final_name'])
transfer_arguments = ",".join(transfer_arguments_list)
print("final download_decrypt_arguments: ",download_decrypt_arguments)
print("final transfer_arguments: ",transfer_arguments)
kwargs["ti"].xcom_push(key="download_decrypt_arguments", value=download_decrypt_arguments)
kwargs["ti"].xcom_push(key="transfer_arguments", value=transfer_arguments)
def pipeline_enable_check(**kwargs):
dp = DagPebbles()
if dp.pipeline_enable_check('DATA_LOAD'):
return "pipeline_check_passed"
else:
return "pipeline_check_skipped"
def pipeline_check_passed(**kwargs):
print("pipeline_check_passed:")
def end_pipeline(**kwargs):
print("end_pipeline:")
def pipeline_check_skipped(**kwargs):
print("pipeline_check_skipped:")
def cleanup(**kwargs):
dp = DagPebbles()
print("cleanup")
def notify(**kwargs):
dp = DagPebbles()
print("notify")
def end(**kwargs):
dp = DagPebbles()
print("end")
with DAG( "DOWNLOAD_DECRYPT_TRANSFER",
description="Download, Decrypt, Transfer files (Source: S3, Staging: EC2: Target: RDS Oracle)",
default_args=default_args,
schedule_interval=None,
catchup=False,
orientation="TB",
tags=['Utils'],
dagrun_timeout=timedelta(hours=240)
) as dag:
t_pipeline_begin = PythonOperator(
task_id="begin_pipeline",
python_callable=begin_pipeline,
provide_context=True,
dag=dag,
)
t_check_pipeline = BranchPythonOperator(
task_id="check_pipeline",
python_callable=pipeline_enable_check,
provide_context=True,
dag=dag,
)
t_pipeline_check_passed = PythonOperator(
task_id="pipeline_check_passed",
python_callable=pipeline_check_passed,
provide_context=True,
dag=dag,
)
t_pipeline_check_skipped = PythonOperator(
task_id="pipeline_check_skipped",
python_callable=pipeline_check_skipped,
provide_context=True,
dag=dag,
)
download_files_cmd = "/opt/bitnami/airflow/airflow-data/scripts/download_files.sh "+"{{ ti.xcom_pull(key='download_decrypt_arguments')}}"
t_download_files = BashOperator(
task_id='download_files',
bash_command=download_files_cmd,
dag=dag)
decrypt_files_cmd = "/opt/bitnami/airflow/airflow-data/scripts/decrypt_files.sh "+"{{ ti.xcom_pull(key='download_decrypt_arguments')}} "
t_decrypt_files = BashOperator(
task_id='decrypt_files',
bash_command=decrypt_files_cmd,
dag=dag)
transfer_files_cmd = "/opt/bitnami/airflow/airflow-data/scripts/transfer_files_rds.pl "+"{{ ti.xcom_pull(key='transfer_arguments')}} "
t_transfer_files = BashOperator(
task_id='transfer_files',
bash_command=transfer_files_cmd,
dag=dag)
t_end_pipeline = PythonOperator(
task_id="end_pipeline",
python_callable=end_pipeline,
provide_context=True,
trigger_rule="none_failed",
dag=dag,
)
t_notify = PythonOperator(
task_id="send_notifications",
python_callable=notify,
provide_context=True,
trigger_rule="none_failed",
dag=dag,
)
t_cleanup = PythonOperator(
task_id="cleanup",
python_callable=cleanup,
provide_context=True,
trigger_rule="none_failed",
dag=dag,
)
t_end = PythonOperator(
task_id="end",
python_callable=end,
provide_context=True,
trigger_rule="none_failed",
dag=dag,
)
t_pipeline_begin >> t_check_pipeline
t_check_pipeline >> t_pipeline_check_skipped >> t_end_pipeline
t_check_pipeline >> t_pipeline_check_passed >> t_download_files >> t_decrypt_files >> t_transfer_files >> t_end_pipeline
t_end_pipeline >> t_cleanup >> t_notify >> t_end
| 2.1875 | 2 |
keystone-moon/keystone/endpoint_policy/controllers.py | hashnfv/hashnfv-moon | 0 | 3683 | <reponame>hashnfv/hashnfv-moon<gh_stars>0
# Copyright 2014 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from keystone.common import controller
from keystone.common import dependency
from keystone import notifications
@dependency.requires('policy_api', 'catalog_api', 'endpoint_policy_api')
class EndpointPolicyV3Controller(controller.V3Controller):
collection_name = 'endpoints'
member_name = 'endpoint'
def __init__(self):
super(EndpointPolicyV3Controller, self).__init__()
notifications.register_event_callback(
'deleted', 'endpoint', self._on_endpoint_delete)
notifications.register_event_callback(
'deleted', 'service', self._on_service_delete)
notifications.register_event_callback(
'deleted', 'region', self._on_region_delete)
notifications.register_event_callback(
'deleted', 'policy', self._on_policy_delete)
def _on_endpoint_delete(self, service, resource_type, operation, payload):
self.endpoint_policy_api.delete_association_by_endpoint(
payload['resource_info'])
def _on_service_delete(self, service, resource_type, operation, payload):
self.endpoint_policy_api.delete_association_by_service(
payload['resource_info'])
def _on_region_delete(self, service, resource_type, operation, payload):
self.endpoint_policy_api.delete_association_by_region(
payload['resource_info'])
def _on_policy_delete(self, service, resource_type, operation, payload):
self.endpoint_policy_api.delete_association_by_policy(
payload['resource_info'])
@controller.protected()
def create_policy_association_for_endpoint(self, context,
policy_id, endpoint_id):
"""Create an association between a policy and an endpoint."""
self.policy_api.get_policy(policy_id)
self.catalog_api.get_endpoint(endpoint_id)
self.endpoint_policy_api.create_policy_association(
policy_id, endpoint_id=endpoint_id)
@controller.protected()
def check_policy_association_for_endpoint(self, context,
policy_id, endpoint_id):
"""Check an association between a policy and an endpoint."""
self.policy_api.get_policy(policy_id)
self.catalog_api.get_endpoint(endpoint_id)
self.endpoint_policy_api.check_policy_association(
policy_id, endpoint_id=endpoint_id)
@controller.protected()
def delete_policy_association_for_endpoint(self, context,
policy_id, endpoint_id):
"""Delete an association between a policy and an endpoint."""
self.policy_api.get_policy(policy_id)
self.catalog_api.get_endpoint(endpoint_id)
self.endpoint_policy_api.delete_policy_association(
policy_id, endpoint_id=endpoint_id)
@controller.protected()
def create_policy_association_for_service(self, context,
policy_id, service_id):
"""Create an association between a policy and a service."""
self.policy_api.get_policy(policy_id)
self.catalog_api.get_service(service_id)
self.endpoint_policy_api.create_policy_association(
policy_id, service_id=service_id)
@controller.protected()
def check_policy_association_for_service(self, context,
policy_id, service_id):
"""Check an association between a policy and a service."""
self.policy_api.get_policy(policy_id)
self.catalog_api.get_service(service_id)
self.endpoint_policy_api.check_policy_association(
policy_id, service_id=service_id)
@controller.protected()
def delete_policy_association_for_service(self, context,
policy_id, service_id):
"""Delete an association between a policy and a service."""
self.policy_api.get_policy(policy_id)
self.catalog_api.get_service(service_id)
self.endpoint_policy_api.delete_policy_association(
policy_id, service_id=service_id)
@controller.protected()
def create_policy_association_for_region_and_service(
self, context, policy_id, service_id, region_id):
"""Create an association between a policy and region+service."""
self.policy_api.get_policy(policy_id)
self.catalog_api.get_service(service_id)
self.catalog_api.get_region(region_id)
self.endpoint_policy_api.create_policy_association(
policy_id, service_id=service_id, region_id=region_id)
@controller.protected()
def check_policy_association_for_region_and_service(
self, context, policy_id, service_id, region_id):
"""Check an association between a policy and region+service."""
self.policy_api.get_policy(policy_id)
self.catalog_api.get_service(service_id)
self.catalog_api.get_region(region_id)
self.endpoint_policy_api.check_policy_association(
policy_id, service_id=service_id, region_id=region_id)
@controller.protected()
def delete_policy_association_for_region_and_service(
self, context, policy_id, service_id, region_id):
"""Delete an association between a policy and region+service."""
self.policy_api.get_policy(policy_id)
self.catalog_api.get_service(service_id)
self.catalog_api.get_region(region_id)
self.endpoint_policy_api.delete_policy_association(
policy_id, service_id=service_id, region_id=region_id)
@controller.protected()
def get_policy_for_endpoint(self, context, endpoint_id):
"""Get the effective policy for an endpoint."""
self.catalog_api.get_endpoint(endpoint_id)
ref = self.endpoint_policy_api.get_policy_for_endpoint(endpoint_id)
# NOTE(henry-nash): since the collection and member for this class is
# set to endpoints, we have to handle wrapping this policy entity
# ourselves.
self._add_self_referential_link(context, ref)
return {'policy': ref}
# NOTE(henry-nash): As in the catalog controller, we must ensure that the
# legacy_endpoint_id does not escape.
@classmethod
def filter_endpoint(cls, ref):
if 'legacy_endpoint_id' in ref:
ref.pop('legacy_endpoint_id')
return ref
@classmethod
def wrap_member(cls, context, ref):
ref = cls.filter_endpoint(ref)
return super(EndpointPolicyV3Controller, cls).wrap_member(context, ref)
@controller.protected()
def list_endpoints_for_policy(self, context, policy_id):
"""List endpoints with the effective association to a policy."""
self.policy_api.get_policy(policy_id)
refs = self.endpoint_policy_api.list_endpoints_for_policy(policy_id)
return EndpointPolicyV3Controller.wrap_collection(context, refs)
| 1.6875 | 2 |
src/nibetaseries/cli/run.py | ipacheco-uy/NiBetaSeries | 1 | 3684 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Module that contains the command line app.
Why does this file exist, and why not put this in __main__?
You might be tempted to import things from __main__ later, but that will cause
problems: the code will get executed twice:
- When you run `python -m nibetaseries` python will execute
``__main__.py`` as a script. That means there won't be any
``nibetaseries.__main__`` in ``sys.modules``.
- When you import __main__ it will get executed again (as a module) because
there's no ``nibetaseries.__main__`` in ``sys.modules``.
Also see (1) from http://click.pocoo.org/5/setuptools/#setuptools-integration
"""
from __future__ import absolute_import
import os
import argparse
from argparse import RawTextHelpFormatter
from glob import glob
from multiprocessing import cpu_count
from nipype import config as ncfg
def get_parser():
"""Build parser object"""
from ..__init__ import __version__
import sys
verstr = 'nibs v{}'.format(__version__)
parser = argparse.ArgumentParser(description='NiBetaSeries BIDS arguments',
formatter_class=RawTextHelpFormatter)
parser.add_argument('bids_dir', help='The directory with the input dataset '
'formatted according to the BIDS standard.')
parser.add_argument('derivatives_pipeline', help='The pipeline that contains '
'minimally preprocessed img, brainmask, and confounds.tsv')
parser.add_argument('output_dir', help='The directory where the output directory '
'and files should be stored. If you are running group level analysis '
'this folder should be prepopulated with the results of the'
'participant level analysis.')
parser.add_argument('analysis_level', choices=['participant', 'group'],
help='Level of the analysis that will be performed '
'Multiple participant level analyses can be run independently '
'(in parallel) using the same output_dir')
parser.add_argument('-v', '--version', action='version',
version=verstr)
# Atlas Arguments (Required Options)
atlas_args = parser.add_argument_group('Required Atlas Arguments')
atlas_args.add_argument('-a', '--atlas-img', action='store',
required=('-l' in sys.argv or '--atlas-lut' in sys.argv),
help='input atlas nifti where each voxel within a "region" '
'is labeled with the same integer and there is a unique '
'integer associated with each region of interest.')
atlas_args.add_argument('-l', '--atlas-lut', action='store',
required=('-a' in sys.argv or '--atlas-img' in sys.argv),
help='atlas look up table (tsv) formatted with the columns: '
'index, regions which correspond to the regions in the '
'nifti file specified by --atlas-img.')
# preprocessing options
proc_opts = parser.add_argument_group('Options for processing')
proc_opts.add_argument('--estimator', default='lss',
choices=['lss', 'lsa'],
help='beta series modeling method')
proc_opts.add_argument('-sm', '--smoothing-kernel', action='store', type=float, default=6.0,
help='select a smoothing kernel (mm)')
proc_opts.add_argument('-hp', '--high-pass', action='store', type=float,
default=0.0078125, help='high pass filter (Hz)')
proc_opts.add_argument('-c', '--confounds', help='The confound column names '
'that are to be included in nuisance regression. '
'write the confounds you wish to include separated by a space',
nargs="+")
proc_opts.add_argument('--hrf-model', default='glover',
choices=['glover', 'spm', 'fir',
'glover + derivative',
'glover + derivative + dispersion',
'spm + derivative',
'spm + derivative + dispersion'],
help='convolve your regressors '
'with one of the following hemodynamic response functions')
proc_opts.add_argument('--fir-delays', default=None,
nargs='+', type=int, help='FIR delays in volumes',
metavar='VOL')
proc_opts.add_argument('-w', '--work-dir', help='directory where temporary files '
'are stored (i.e. non-essential files). '
'This directory can be deleted once you are reasonably '
'certain nibs finished as expected.')
# Image Selection options
image_opts = parser.add_argument_group('Options for selecting images')
parser.add_argument('--participant-label', nargs="+",
help='The label(s) of the participant(s) '
'that should be analyzed. The label '
'corresponds to sub-<participant_label> from the BIDS spec '
'(so it does not include "sub-"). If this parameter is not '
'provided all subjects should be analyzed. Multiple '
'participants can be specified with a space separated list.')
image_opts.add_argument('--session-label', action='store',
default=None, help='select a session to analyze')
image_opts.add_argument('-t', '--task-label', action='store',
default=None, help='select a specific task to be processed')
image_opts.add_argument('--run-label', action='store',
default=None, help='select a run to analyze')
image_opts.add_argument('-sp', '--space-label', action='store', default='MNI152NLin2009cAsym',
choices=['MNI152NLin2009cAsym'],
help='select a bold derivative in a specific space to be used')
image_opts.add_argument('--description-label', action='store',
default=None, help='select a bold file with particular '
'`desc` label to process')
image_opts.add_argument('--exclude-description-label', action='store_true',
default=False, help='exclude this `desc` label from nibetaseries')
# performance options
g_perfm = parser.add_argument_group('Options to handle performance')
g_perfm.add_argument('--nthreads', '-n-cpus', action='store', type=int,
help='maximum number of threads across all processes')
g_perfm.add_argument('--use-plugin', action='store', default=None,
help='nipype plugin configuration file')
# misc options
misc = parser.add_argument_group('misc options')
misc.add_argument('--graph', action='store_true', default=False,
help='generates a graph png of the workflow')
return parser
def main():
from ..workflows.base import init_nibetaseries_participant_wf
# get commandline options
opts = get_parser().parse_args()
# check inputs
if (opts.hrf_model == 'fir') and (opts.fir_delays is None):
raise ValueError('If the FIR HRF model is selected, '
'FIR delays must be provided.')
# Set up directories
# TODO: set up some sort of versioning system
bids_dir = os.path.abspath(opts.bids_dir)
derivatives_pipeline_dir = os.path.join(bids_dir, 'derivatives', opts.derivatives_pipeline)
output_dir = os.path.abspath(opts.output_dir)
os.makedirs(output_dir, exist_ok=True)
log_dir = os.path.join(output_dir, 'logs')
os.makedirs(log_dir, exist_ok=True)
if opts.work_dir:
work_dir = os.path.abspath(opts.work_dir)
else:
work_dir = os.path.join(os.getcwd(), 'nibetaseries_work')
os.makedirs(work_dir, exist_ok=True)
# only for a subset of subjects
if opts.participant_label:
subject_list = opts.participant_label
# for all subjects
else:
subject_dirs = glob(os.path.join(bids_dir, "sub-*"))
subject_list = [subject_dir.split("-")[-1] for subject_dir in subject_dirs]
# Nipype plugin configuration
# Load base plugin_settings from file if --use-plugin
if opts.use_plugin is not None:
from yaml import load as loadyml
with open(opts.use_plugin) as f:
plugin_settings = loadyml(f)
plugin_settings.setdefault('plugin_args', {})
else:
# Defaults
plugin_settings = {
'plugin': 'MultiProc',
'plugin_args': {
'raise_insufficient': False,
'maxtasksperchild': 1,
}
}
# Resource management options
# Note that we're making strong assumptions about valid plugin args
# This may need to be revisited if people try to use batch plugins
nthreads = plugin_settings['plugin_args'].get('n_procs')
# Permit overriding plugin config with specific CLI options
if nthreads is None or opts.nthreads is not None:
nthreads = opts.nthreads
if nthreads is None or nthreads < 1:
nthreads = cpu_count()
plugin_settings['plugin_args']['n_procs'] = nthreads
# Nipype config (logs and execution)
ncfg.update_config({
'logging': {'log_directory': log_dir,
'log_to_file': True},
'execution': {'crashdump_dir': log_dir,
'crashfile_format': 'txt',
'parameterize_dirs': False},
})
# running participant level
if opts.analysis_level == "participant":
nibetaseries_participant_wf = init_nibetaseries_participant_wf(
estimator=opts.estimator,
atlas_img=os.path.abspath(opts.atlas_img),
atlas_lut=os.path.abspath(opts.atlas_lut),
bids_dir=bids_dir,
derivatives_pipeline_dir=derivatives_pipeline_dir,
exclude_description_label=opts.exclude_description_label,
fir_delays=opts.fir_delays,
hrf_model=opts.hrf_model,
high_pass=opts.high_pass,
output_dir=output_dir,
run_label=opts.run_label,
selected_confounds=opts.confounds,
session_label=opts.session_label,
smoothing_kernel=opts.smoothing_kernel,
space_label=opts.space_label,
subject_list=subject_list,
task_label=opts.task_label,
description_label=opts.description_label,
work_dir=work_dir,
)
if opts.graph:
nibetaseries_participant_wf.write_graph(graph2use='colored',
format='svg',
simple_form=True)
try:
nibetaseries_participant_wf.run(**plugin_settings)
except RuntimeError as e:
if "Workflow did not execute cleanly" in str(e):
print("Workflow did not execute cleanly")
else:
raise e
elif opts.analysis_level == "group":
raise NotImplementedError('group analysis not currently implemented')
def init():
if __name__ == "__main__":
raise RuntimeError("NiBetaSeries/cli/run.py should not be run directly;\n"
"Please `pip install` NiBetaSeries and use the `nibs` command")
init()
| 1.65625 | 2 |
custom_components/senz/config_flow.py | astrandb/senz_hass | 2 | 3685 | <gh_stars>1-10
"""Config flow for SENZ WiFi."""
from __future__ import annotations
import logging
from typing import Any
import voluptuous as vol
from homeassistant.components import persistent_notification
from homeassistant.data_entry_flow import FlowResult
from homeassistant.helpers import config_entry_oauth2_flow
from .const import DOMAIN
from .pysenz import PreAPI
class OAuth2FlowHandler(
config_entry_oauth2_flow.AbstractOAuth2FlowHandler, domain=DOMAIN
):
"""Config flow to handle SENZ WiFi OAuth2 authentication."""
DOMAIN = DOMAIN
@property
def logger(self) -> logging.Logger:
"""Return logger."""
return logging.getLogger(__name__)
@property
def extra_authorize_data(self) -> dict:
"""Extra data that needs to be appended to the authorize url."""
return {
"scope": "restapi offline_access",
}
async def async_step_reauth(
self, entry: dict[str, Any] | None = None
) -> FlowResult:
"""Perform reauth upon an API authentication error."""
self.entry = entry
persistent_notification.async_create(
self.hass,
f"Senz integration for account {entry['auth_implementation']} needs to be re-authenticated. Please go to the [integrations page](/config/integrations) to re-configure it.",
"Senz re-authentication",
"senz_reauth",
)
return await self.async_step_reauth_confirm()
async def async_step_reauth_confirm(
self, user_input: dict[str, Any] | None = None
) -> FlowResult:
"""Dialog that informs the user that reauth is required."""
if user_input is None:
return self.async_show_form(
step_id="reauth_confirm",
description_placeholders={"account": self.entry["auth_implementation"]},
data_schema=vol.Schema({}),
errors={},
)
persistent_notification.async_dismiss(self.hass, "senz_reauth")
return await self.async_step_user()
async def async_oauth_create_entry(self, data: dict) -> dict:
"""Create an oauth config entry or update existing entry for reauth."""
pre_api = PreAPI(self.hass)
resp = await pre_api.getAccount(data["token"]["access_token"])
account = resp["userName"]
existing_entry = await self.async_set_unique_id(account)
if existing_entry:
self.hass.config_entries.async_update_entry(existing_entry, data=data)
await self.hass.config_entries.async_reload(existing_entry.entry_id)
return self.async_abort(reason="reauth_successful")
return self.async_create_entry(title=account, data=data)
| 2.296875 | 2 |
astropy_helpers/git_helpers.py | bsipocz/astropy-helpers | 9 | 3686 | <reponame>bsipocz/astropy-helpers
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Utilities for retrieving revision information from a project's git repository.
"""
# Do not remove the following comment; it is used by
# astropy_helpers.version_helpers to determine the beginning of the code in
# this module
# BEGIN
import locale
import os
import subprocess
import warnings
def _decode_stdio(stream):
try:
stdio_encoding = locale.getdefaultlocale()[1] or 'utf-8'
except ValueError:
stdio_encoding = 'utf-8'
try:
text = stream.decode(stdio_encoding)
except UnicodeDecodeError:
# Final fallback
text = stream.decode('latin1')
return text
def update_git_devstr(version, path=None):
"""
Updates the git revision string if and only if the path is being imported
directly from a git working copy. This ensures that the revision number in
the version string is accurate.
"""
try:
# Quick way to determine if we're in git or not - returns '' if not
devstr = get_git_devstr(sha=True, show_warning=False, path=path)
except OSError:
return version
if not devstr:
# Probably not in git so just pass silently
return version
if 'dev' in version: # update to the current git revision
version_base = version.split('.dev', 1)[0]
devstr = get_git_devstr(sha=False, show_warning=False, path=path)
return version_base + '.dev' + devstr
else:
# otherwise it's already the true/release version
return version
def get_git_devstr(sha=False, show_warning=True, path=None):
"""
Determines the number of revisions in this repository.
Parameters
----------
sha : bool
If True, the full SHA1 hash will be returned. Otherwise, the total
count of commits in the repository will be used as a "revision
number".
show_warning : bool
If True, issue a warning if git returns an error code, otherwise errors
pass silently.
path : str or None
If a string, specifies the directory to look in to find the git
repository. If `None`, the current working directory is used, and must
be the root of the git repository.
If given a filename it uses the directory containing that file.
Returns
-------
devversion : str
Either a string with the revision number (if `sha` is False), the
SHA1 hash of the current commit (if `sha` is True), or an empty string
if git version info could not be identified.
"""
if path is None:
path = os.getcwd()
if not os.path.isdir(path):
path = os.path.abspath(os.path.dirname(path))
if sha:
# Faster for getting just the hash of HEAD
cmd = ['rev-parse', 'HEAD']
else:
cmd = ['rev-list', '--count', 'HEAD']
def run_git(cmd):
try:
p = subprocess.Popen(['git'] + cmd, cwd=path,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE)
stdout, stderr = p.communicate()
except OSError as e:
if show_warning:
warnings.warn('Error running git: ' + str(e))
return (None, b'', b'')
if p.returncode == 128:
if show_warning:
warnings.warn('No git repository present at {0!r}! Using '
'default dev version.'.format(path))
return (p.returncode, b'', b'')
if p.returncode == 129:
if show_warning:
warnings.warn('Your git looks old (does it support {0}?); '
'consider upgrading to v1.7.2 or '
'later.'.format(cmd[0]))
return (p.returncode, stdout, stderr)
elif p.returncode != 0:
if show_warning:
warnings.warn('Git failed while determining revision '
'count: {0}'.format(_decode_stdio(stderr)))
return (p.returncode, stdout, stderr)
return p.returncode, stdout, stderr
returncode, stdout, stderr = run_git(cmd)
if not sha and returncode == 128:
# git returns 128 if the command is not run from within a git
# repository tree. In this case, a warning is produced above but we
# return the default dev version of '0'.
return '0'
elif not sha and returncode == 129:
# git returns 129 if a command option failed to parse; in
# particular this could happen in git versions older than 1.7.2
# where the --count option is not supported
# Also use --abbrev-commit and --abbrev=0 to display the minimum
# number of characters needed per-commit (rather than the full hash)
cmd = ['rev-list', '--abbrev-commit', '--abbrev=0', 'HEAD']
returncode, stdout, stderr = run_git(cmd)
# Fall back on the old method of getting all revisions and counting
# the lines
if returncode == 0:
return str(stdout.count(b'\n'))
else:
return ''
elif sha:
return _decode_stdio(stdout)[:40]
else:
return _decode_stdio(stdout).strip()
# This function is tested but it is only ever executed within a subprocess when
# creating a fake package, so it doesn't get picked up by coverage metrics.
def _get_repo_path(pathname, levels=None): # pragma: no cover
"""
Given a file or directory name, determine the root of the git repository
this path is under. If given, this won't look any higher than ``levels``
(that is, if ``levels=0`` then the given path must be the root of the git
repository and is returned if so.
Returns `None` if the given path could not be determined to belong to a git
repo.
"""
if os.path.isfile(pathname):
current_dir = os.path.abspath(os.path.dirname(pathname))
elif os.path.isdir(pathname):
current_dir = os.path.abspath(pathname)
else:
return None
current_level = 0
while levels is None or current_level <= levels:
if os.path.exists(os.path.join(current_dir, '.git')):
return current_dir
current_level += 1
if current_dir == os.path.dirname(current_dir):
break
current_dir = os.path.dirname(current_dir)
return None
| 2.109375 | 2 |
src/sot_talos_balance/test/test_feet_admittance.py | imaroger/sot-talos-balance | 0 | 3687 | <reponame>imaroger/sot-talos-balance
'''Test feet admittance control'''
from sot_talos_balance.utils.run_test_utils import run_ft_calibration, run_test, runCommandClient
try:
# Python 2
input = raw_input # noqa
except NameError:
pass
run_test('appli_feet_admittance.py')
run_ft_calibration('robot.ftc')
input("Wait before running the test")
print('Set saturation value')
runCommandClient('robot.admBF_dqSaturation.sin.value = [0.0, 0.0, 0.01, 0.0, 0.0, 0.0]')
input("Wait before dumping the data")
runCommandClient('dump_tracer(robot.tracer)')
| 2.046875 | 2 |
tests/test_db.py | davebryson/py-tendermint | 24 | 3688 | <reponame>davebryson/py-tendermint
import os
from tendermint.db import VanillaDB
from tendermint.utils import home_dir
def test_database():
dbfile = home_dir('temp', 'test.db')
db = VanillaDB(dbfile)
db.set(b'dave',b'one')
result = db.get(b'dave')
assert(b'one' == result)
db.set(b'dave',b'two')
result = db.get(b'dave')
assert(b'two' == result)
assert(None == db.get(b'doesntexist'))
assert(db.exists(b'dave'))
db.delete(b'dave')
assert(db.exists(b'dave') == False)
if os.path.exists(dbfile):
os.remove(dbfile)
| 2.46875 | 2 |
auth/tests/test_views.py | asb29/Redundant | 0 | 3689 | from django.test import TestCase
from django.test import Client
class RegisterTestCase(TestCase):
def test_register(self):
c = Client()
# on success redirects to /
response = c.post('/accounts/register/', {
'username': 'asdas',
'password1': '<PASSWORD>',
'password2': '<PASSWORD>'
})
self.assertRedirects(response, '/')
# passwords don't match
response = c.post('/accounts/register/', {
'username': 'asdasdasd1',
'password1': '<PASSWORD>',
'password2': '<PASSWORD>'
})
self.assertEquals(response.status_code, 200)
# username is empty
response = c.post('/accounts/register/', {
'username': '',
'password1': '<PASSWORD>',
'password2': '<PASSWORD>'
})
self.assertEquals(response.status_code, 200)
# no password
response = c.post('/accounts/register/', {
'username': 'asdasdasd',
'password1': '',
'password2': ''
})
self.assertEquals(response.status_code, 200)
# username and password are similar
response = c.post('/accounts/register/', {
'username': 'asdasdasd0',
'password1': '<PASSWORD>',
'password2': '<PASSWORD>'
})
self.assertEquals(response.status_code, 200)
| 2.875 | 3 |
projects/OneNet/onenet/head.py | iFighting/OneNet | 2 | 3690 | #
# Modified by <NAME>
# Contact: <EMAIL>
#
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
OneNet Transformer class.
Copy-paste from torch.nn.Transformer with modifications:
* positional encodings are passed in MHattention
* extra LN at the end of encoder is removed
* decoder returns a stack of activations from all decoding layers
"""
import copy
import math
from typing import Optional, List
import torch
from torch import nn, Tensor
import torch.nn.functional as F
from detectron2.modeling.poolers import ROIPooler, cat
from detectron2.structures import Boxes
from .deconv import CenternetDeconv
class Head(nn.Module):
def __init__(self, cfg, backbone_shape=[2048, 1024, 512, 256]):
super().__init__()
# Build heads.
num_classes = cfg.MODEL.OneNet.NUM_CLASSES
d_model = cfg.MODEL.OneNet.DECONV_CHANNEL[-1]
activation = cfg.MODEL.OneNet.ACTIVATION
self.deconv = CenternetDeconv(cfg, backbone_shape)
self.num_classes = num_classes
self.d_model = d_model
self.num_classes = num_classes
self.activation = _get_activation_fn(activation)
self.feat1 = nn.Conv2d(self.d_model, self.d_model, kernel_size=3, stride=1, padding=1)
self.cls_score = nn.Conv2d(d_model, num_classes, kernel_size=3, stride=1, padding=1)
self.ltrb_pred = nn.Conv2d(d_model, 4, kernel_size=3, stride=1, padding=1)
# Init parameters.
prior_prob = cfg.MODEL.OneNet.PRIOR_PROB
self.bias_value = -math.log((1 - prior_prob) / prior_prob)
self._reset_parameters()
def _reset_parameters(self):
# init all parameters.
for p in self.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
# initialize the bias for focal loss.
if p.shape[-1] == self.num_classes:
nn.init.constant_(p, self.bias_value)
def forward(self, features_list):
features = self.deconv(features_list)
locations = self.locations(features)[None]
feat = self.activation(self.feat1(features))
class_logits = self.cls_score(feat)
pred_ltrb = F.relu(self.ltrb_pred(feat))
pred_bboxes = self.apply_ltrb(locations, pred_ltrb)
return class_logits, pred_bboxes
def apply_ltrb(self, locations, pred_ltrb):
"""
:param locations: (1, 2, H, W)
:param pred_ltrb: (N, 4, H, W)
"""
pred_boxes = torch.zeros_like(pred_ltrb)
pred_boxes[:,0,:,:] = locations[:,0,:,:] - pred_ltrb[:,0,:,:] # x1
pred_boxes[:,1,:,:] = locations[:,1,:,:] - pred_ltrb[:,1,:,:] # y1
pred_boxes[:,2,:,:] = locations[:,0,:,:] + pred_ltrb[:,2,:,:] # x2
pred_boxes[:,3,:,:] = locations[:,1,:,:] + pred_ltrb[:,3,:,:] # y2
return pred_boxes
@torch.no_grad()
def locations(self, features, stride=4):
"""
Arguments:
features: (N, C, H, W)
Return:
locations: (2, H, W)
"""
h, w = features.size()[-2:]
device = features.device
shifts_x = torch.arange(
0, w * stride, step=stride,
dtype=torch.float32, device=device
)
shifts_y = torch.arange(
0, h * stride, step=stride,
dtype=torch.float32, device=device
)
shift_y, shift_x = torch.meshgrid(shifts_y, shifts_x)
shift_x = shift_x.reshape(-1)
shift_y = shift_y.reshape(-1)
locations = torch.stack((shift_x, shift_y), dim=1) + stride // 2
locations = locations.reshape(h, w, 2).permute(2, 0, 1)
return locations
def _get_activation_fn(activation):
"""Return an activation function given a string"""
if activation == "relu":
return F.relu
if activation == "gelu":
return F.gelu
if activation == "glu":
return F.glu
raise RuntimeError(F"activation should be relu/gelu, not {activation}.")
| 2.328125 | 2 |
mermaid/utils.py | HastingsGreer/mermaid | 120 | 3691 | <gh_stars>100-1000
"""Various utility functions.
.. todo::
Reorganize this package in a more meaningful way.
"""
from __future__ import print_function
from __future__ import absolute_import
# from builtins import str
# from builtins import range
import torch
from torch.nn.parameter import Parameter
from torch.autograd import Variable
from .libraries.modules.stn_nd import STN_ND_BCXYZ
from .data_wrapper import AdaptVal
from .data_wrapper import MyTensor
from . import smoother_factory as sf
from .data_wrapper import USE_CUDA
import numpy as np
from . import finite_differences as fd
import torch.nn as nn
import torch.nn.init as init
from . import module_parameters as pars
from .spline_interpolation import SplineInterpolation_ND_BCXYZ
import os
try:
from .libraries.functions.nn_interpolation import get_nn_interpolation
except ImportError:
print('WARNING: nn_interpolation could not be imported (only supported in CUDA at the moment). '
'Some functionality may not be available.')
def my_hasnan(x):
"""Check if any input elements are NaNs.
:param x: numpy array
:return: True if NaNs are present, False else
"""
return (x != x).any()
def create_symlink_with_correct_ext(sf, tf):
abs_s = os.path.abspath(sf)
ext_s = os.path.splitext(abs_s)[1]
abs_t = os.path.abspath(tf)
root_t,ext_t = os.path.splitext(abs_t)
abs_t_with_right_ext = root_t + ext_s
if os.path.isfile(abs_t_with_right_ext):
if os.path.samefile(abs_s,abs_t_with_right_ext):
# nothing to do here, these are already the same file
return
else:
os.remove(abs_t_with_right_ext)
# now we can do the symlink
os.symlink(abs_s,abs_t_with_right_ext)
def combine_dict(d1,d2):
"""Creates a dictionary which has entries from both of them.
:param d1: dictionary 1
:param d2: dictionary 2
:return: resulting dictionary
"""
d = d1.copy()
d.update(d2)
return d
def get_parameter_list_from_parameter_dict(pd):
"""Takes a dictionary which contains key value pairs for model parameters and converts it into a list of
parameters that can be used as an input to an optimizer.
:param pd: parameter dictionary
:return: list of parameters
"""
pl = []
for key in pd:
pl.append(pd[key])
return pl
def get_parameter_list_and_par_to_name_dict_from_parameter_dict(pd):
"""Same as get_parameter_list_from_parameter_dict; but also returns a dictionary which keeps track of the keys
based on memory id.
:param pd: parameter dictionary
:return: tuple of (parameter_list, name_dictionary)
"""
par_to_name_dict = dict()
pl = []
for key in pd:
pl.append(pd[key])
par_to_name_dict[pd[key]] = key
return pl, par_to_name_dict
def remove_infs_from_variable(v):
# 32 - bit floating point: torch.FloatTensor, torch.cuda.FloatTensor
# 64 - bit floating point: torch.DoubleTensor, torch.cuda.DoubleTensor
# 16 - bit floating point: torch.HalfTensor, torch.cuda.HalfTensor
# todo: maybe find a cleaner way of handling this
# this is to make sure that subsequent sums work (hence will be smaller than it could be,
# but values of this size should not occur in practice anyway
sz = v.size()
reduction_factor = np.prod(np.array(sz))
condition = True
if type(v.data) == torch.cuda.FloatTensor or v.data.dtype==torch.float32:
return torch.clamp(v,
min=(np.asscalar(np.finfo('float32').min))/reduction_factor,
max=(np.asscalar(np.finfo('float32').max))/reduction_factor)
elif v.data.dtype == torch.DoubleTensor or type(v.data) == torch.cuda.DoubleTensor:
return torch.clamp(v,
min=(np.asscalar(np.finfo('float64').min))/reduction_factor,
max=(np.asscalar(np.finfo('float64').max))/reduction_factor)
elif v.data.dtype == torch.HalfTensor or type(v.data) == torch.cuda.HalfTensor:
return torch.clamp(v,
min=(np.asscalar(np.finfo('float16').min))/reduction_factor,
max=(np.asscalar(np.finfo('float16').max))/reduction_factor)
else:
raise ValueError('Unknown data type: ' + str( type(v.data)))
def lift_to_dimension(A, dim):
"""Creates a view of A of dimension dim (by adding dummy dimensions if necessary).
:param A: numpy array
:param dim: desired dimension of view
:return: returns view of A of appropriate dimension
"""
current_dim = len(A.shape)
if current_dim > dim:
raise ValueError('Can only add dimensions, but not remove them')
if current_dim == dim:
return A
else:
return A.reshape([1]*(dim-current_dim)+list(A.shape))
def get_dim_of_affine_transform(Ab):
"""Returns the number of dimensions corresponding to an affine transformation of the
form y=Ax+b stored in a column vector. For A =[a1,a2,a3], the parameter vector is simply
[a1;a2;a3;b], i.e., all columns stacked on top of each other.
:param Ab: parameter vector
:return: dimensionality of transform (1,2,or 3)
"""
nr = len(Ab)
if nr==2:
return 1
elif nr==6:
return 2
elif nr==12:
return 3
else:
raise ValueError('Only supports dimensions 1, 2, and 3.')
def set_affine_transform_to_identity(Ab):
"""Sets the affine transformation as given by the column vector Ab to the identity transform.
:param Ab: Affine parameter vector (will be overwritten with the identity transform)
:return:
"""
dim = get_dim_of_affine_transform(Ab)
if dim==1:
Ab.zero_()
Ab[0]=1.
elif dim==2:
Ab.zero_()
Ab[0]=1.
Ab[3]=1.
elif dim==3:
Ab.zero_()
Ab[0]=1.
Ab[4]=1.
Ab[8]=1.
else:
raise ValueError('Only supports dimensions 1, 2, and 3.')
def set_affine_transform_to_identity_multiN(Ab):
"""Set the affine transforms to the identity (in the case of arbitrary batch size).
:param Ab: Parameter vectors B x pars (batch size x param. vector); will be overwritten with identity trans.
:return:
"""
sz = Ab.size()
nr_of_images = sz[0]
for nrI in range(nr_of_images):
set_affine_transform_to_identity(Ab[nrI, :])
def get_inverse_affine_param(Ab):
"""Computes inverse of affine transformation.
Formally: C(Ax+b)+d = CAx+Cb+d = x; C = inv(A), d = -Cb
:param Ab: B x pars (batch size x param. vector)
:return: Inverse of affine parameters
"""
dim =0
if Ab.shape[1] == 2:
dim = 1
elif Ab.shape[1] == 6:
dim = 2
elif Ab.shape[1] == 12:
dim = 3
if dim not in [1, 2, 3]:
raise ValueError('Only supports dimensions 1, 2, and 3.')
Ab = Ab.view(Ab.shape[0], dim+1, dim).transpose(1,2)
Ab_inv = torch.zeros_like(Ab)
for n in range(Ab.shape[0]):
tm_inv = torch.inverse(Ab[n, :, :dim])
Ab_inv[n, :, :dim] = tm_inv
Ab_inv[n, :, dim] = - torch.matmul(tm_inv, Ab[n,:,dim])
inv_affine_param = Ab_inv.transpose(1, 2).contiguous().view(Ab.shape[0], -1)
return inv_affine_param
def update_affine_param(Ab, Cd):
"""Update affine parameters.
Formally: C(Ax+b)+d = CAx+Cb+d
:param Ab: B x pars (batch size x param. vector)
:return: Updated affine parameters
"""
dim = 0
if Ab.shape[1]==2:
dim = 1
elif Ab.shape[1]==6:
dim = 2
elif Ab.shape[1]==12:
dim = 3
if dim not in [1, 2, 3]:
raise ValueError('Only supports dimensions 1, 2, and 3.')
Ab = Ab.view(Ab.shape[0], dim+1, dim).transpose(1, 2)
Cd = Cd.view(Cd.shape[0], dim+1, dim).transpose(1, 2)
updated_param = torch.zeros_like(Ab)
for n in range(Ab.shape[0]):
tm_param = torch.matmul(Cd[n,:,:dim],Ab[n,:,:dim])
updated_param[n,:,:dim] = tm_param
updated_param[n,:,dim] = torch.matmul(Cd[n,:,:dim], Ab[n,:,dim]) +Cd[n,:,dim]
updated_param = updated_param.transpose(1,2).contiguous().view(Ab.shape[0],-1)
return updated_param
def apply_affine_transform_to_map(Ab,phi):
"""Applies an affine transform to a map.
:param Ab: affine transform parameter column vector
:param phi: map; format nrCxXxYxZ (nrC corresponds to dimension)
:return: returns transformed map
"""
sz = phi.size()
dim = len(sz) - 1
if dim not in [1,2,3]:
raise ValueError('Only supports dimensions 1, 2, and 3.')
phiR = MyTensor(sz).zero_().type_as(phi)
if dim == 1:
phiR = phi * Ab[0] + Ab[1]
elif dim == 2:
phiR[0, ...] = Ab[0] * phi[0, ...] + Ab[2] * phi[1, ...] + Ab[4] # a_11x+a_21y+b1
phiR[1, ...] = Ab[1] * phi[0, ...] + Ab[3] * phi[1, ...] + Ab[5] # a_12x+a_22y+b2
elif dim == 3:
phiR[0, ...] = Ab[0] * phi[0, ...] + Ab[3] * phi[1, ...] + Ab[6] * phi[2, ...] + Ab[9]
phiR[1, ...] = Ab[1] * phi[0, ...] + Ab[4] * phi[1, ...] + Ab[7] * phi[2, ...] + Ab[10]
phiR[2, ...] = Ab[2] * phi[0, ...] + Ab[5] * phi[1, ...] + Ab[8] * phi[2, ...] + Ab[11]
else:
raise ValueError('Only supports dimensions 1, 2, and 3.')
return phiR
def apply_affine_transform_to_map_multiNC(Ab,phi):
"""Applies an affine transform to maps (for arbitrary batch size).
:param Ab: affine transform parameter column vectors (batch size x param. vector)
:param phi: maps; format batchxnrCxXxYxZ (nrC corresponds to dimension)
:return: returns transformed maps
"""
sz = phi.size()
dim = get_dim_of_affine_transform(Ab[0,:])
nr_of_images = Ab.size()[0]
if nr_of_images != sz[0]:
raise ValueError('Incompatible number of affine transforms')
if dim != len(sz)-2:
raise ValueError('Incompatible number of affine transforms')
phiR = MyTensor(sz).zero_().type_as(phi)
for nrI in range(nr_of_images):
phiR[nrI, ...] = apply_affine_transform_to_map(Ab[nrI, :], phi[nrI, ...])
return phiR
def compute_normalized_gaussian(X, mu, sig):
"""Computes a normalized Gaussian.
:param X: map with coordinates at which to evaluate
:param mu: array indicating the mean
:param sig: array indicating the standard deviations for the different dimensions
:return: Normalized Gaussian evaluated at coordinates in X
Example::
>>> mu, sig = [1,1], [1,1]
>>> X = [0,0]
>>> print(compute_normalized_gaussian(X, mu, sig)
"""
dim = len(mu)
if dim == 1:
g = np.exp(-np.power(X[0, :] - mu[0], 2.)/(2*np.power(sig[0], 2.)))
g = g/g.sum()
return g
elif dim == 2:
g = np.exp(-np.power(X[0,:,:]-mu[0],2.)/(2*np.power(sig[0],2.))
- np.power(X[1,:, :] - mu[1], 2.) / (2 * np.power(sig[1], 2.)))
g = g/g.sum()
return g
elif dim == 3:
g = np.exp(-np.power(X[0,:, :, :] - mu[0], 2.) / (2 * np.power(sig[0], 2.))
-np.power(X[1,:, :, :] - mu[1], 2.) / (2 * np.power(sig[1], 2.))
-np.power(X[2,:, :, :] - mu[2], 2.) / (2 * np.power(sig[2], 2.)))
g = g / g.sum()
return g
else:
raise ValueError('Can only compute Gaussians in dimensions 1-3')
def _compute_warped_image_multiNC_1d(I0, phi, spacing, spline_order, zero_boundary=False, use_01_input=True):
if spline_order not in [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]:
raise ValueError('Currently only orders 0 to 9 are supported')
if spline_order == 0:
stn = STN_ND_BCXYZ(spacing,
zero_boundary,
use_bilinear=False,
use_01_input=use_01_input)
elif spline_order == 1:
stn = STN_ND_BCXYZ(spacing,
zero_boundary,
use_bilinear=True,
use_01_input=use_01_input)
else:
stn = SplineInterpolation_ND_BCXYZ(spacing,
spline_order)
I1_warped = stn(I0, phi)
return I1_warped
def _compute_warped_image_multiNC_2d(I0, phi, spacing, spline_order,zero_boundary=False,use_01_input=True):
if spline_order not in [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]:
raise ValueError('Currently only orders 0 to 9 are supported')
if spline_order == 0:
stn = STN_ND_BCXYZ(spacing,
zero_boundary,
use_bilinear=False,
use_01_input=use_01_input)
elif spline_order == 1:
stn = STN_ND_BCXYZ(spacing,
zero_boundary,
use_bilinear=True,
use_01_input=use_01_input)
else:
stn = SplineInterpolation_ND_BCXYZ(spacing,
spline_order)
I1_warped = stn(I0, phi)
return I1_warped
def _compute_warped_image_multiNC_3d(I0, phi, spacing, spline_order,zero_boundary=False,use_01_input=True):
if spline_order not in [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]:
raise ValueError('Currently only orders 0 to 9 are supported')
if spline_order == 0:
# return get_warped_label_map(I0,phi,spacing)
stn = STN_ND_BCXYZ(spacing,
zero_boundary,
use_bilinear=False,
use_01_input=use_01_input)
elif spline_order == 1:
stn = STN_ND_BCXYZ(spacing,zero_boundary,
use_bilinear=True,
use_01_input=use_01_input)
else:
stn = SplineInterpolation_ND_BCXYZ(spacing,
spline_order)
I1_warped = stn(I0, phi)
return I1_warped
def compute_warped_image(I0, phi, spacing, spline_order, zero_boundary=False, use_01_input=True):
"""Warps image.
:param I0: image to warp, image size XxYxZ
:param phi: map for the warping, size dimxXxYxZ
:param spacing: image spacing [dx,dy,dz]
:return: returns the warped image of size XxYxZ
"""
# implements this by creating a different view (effectively adding dimensions)
Iw = compute_warped_image_multiNC(I0.view(torch.Size([1, 1] + list(I0.size()))),
phi.view(torch.Size([1] + list(phi.size()))),
spacing,
spline_order,
zero_boundary,
use_01_input)
return Iw.view(I0.size())
def compute_warped_image_multiNC(I0, phi, spacing, spline_order, zero_boundary=False, use_01_input=True):
"""Warps image.
:param I0: image to warp, image size BxCxXxYxZ
:param phi: map for the warping, size BxdimxXxYxZ
:param spacing: image spacing [dx,dy,dz]
:return: returns the warped image of size BxCxXxYxZ
"""
dim = I0.dim()-2
if dim == 1:
return _compute_warped_image_multiNC_1d(I0, phi, spacing, spline_order,zero_boundary,use_01_input=use_01_input)
elif dim == 2:
return _compute_warped_image_multiNC_2d(I0, phi, spacing, spline_order,zero_boundary,use_01_input=use_01_input)
elif dim == 3:
return _compute_warped_image_multiNC_3d(I0, phi, spacing, spline_order,zero_boundary,use_01_input=use_01_input)
else:
raise ValueError('Images can only be warped in dimensions 1 to 3')
def _get_low_res_spacing_from_spacing(spacing, sz, lowResSize):
"""Computes spacing for the low-res parametrization from image spacing.
:param spacing: image spacing
:param sz: size of image
:param lowResSize: size of low re parameterization
:return: returns spacing of low res parameterization
"""
#todo: check that this is the correct way of doing it
return spacing * (np.array(sz[2::])-1) / (np.array(lowResSize[2::])-1)
def _get_low_res_size_from_size(sz, factor):
"""Returns the corresponding low-res size from a (high-res) sz.
:param sz: size (high-res)
:param factor: low-res factor (needs to be <1)
:return: low res size
"""
if (factor is None) or (factor >= 1):
print('WARNING: Could not compute low_res_size as factor was ' + str(factor))
return np.array(sz)
else:
low_res_sz = np.array(sz)
low_res_sz[2::] = (np.ceil((np.array(sz[2::]) * factor))).astype('int16')
return low_res_sz
def _compute_low_res_image(I, spacing, low_res_size, spline_order):
import mermaid.image_sampling as IS
sampler = IS.ResampleImage()
low_res_image, _ = sampler.downsample_image_to_size(I, spacing, low_res_size[2::],spline_order)
return low_res_image
def individual_parameters_to_model_parameters(ind_pars):
model_pars = dict()
if type(ind_pars) == type(dict()):
# should already be in the right format
model_pars = ind_pars
else:
# if ind_pars is not a dictionary assume that they come from the optimizer
# (i.e., list and each list element has a dictionary with keys 'name' and 'model_params'
for par in ind_pars:
model_pars[par['name']] = par['model_params']
return model_pars
def compute_vector_momentum_from_scalar_momentum_multiNC(lam, I, sz, spacing):
"""Computes the vector momentum from the scalar momentum: :math:`m=\\lambda\\nabla I`.
:param lam: scalar momentum, BxCxXxYxZ
:param I: image, BxCxXxYxZ
:param sz: size of image
:param spacing: spacing of image
:return: returns the vector momentum
"""
nrOfI = sz[0] # number of images
m = create_ND_vector_field_variable_multiN(sz[2::], nrOfI) # attention that the second dimension here is image dim, not nrOfC
nrOfC = sz[1]
for c in range(nrOfC): # loop over all the channels and add the results
m = m + compute_vector_momentum_from_scalar_momentum_multiN(lam[:, c, ...],
I[:, c, ...],
nrOfI,
sz[2::],
spacing)
return m
def compute_vector_momentum_from_scalar_momentum_multiN(lam, I, nrOfI, sz, spacing):
"""Computes the vector momentum from the scalar momentum: :math:`m=\\lambda\\nabla I`.
:param lam: scalar momentum, batchxXxYxZ
:param I: image, batchXxYxZ
:param sz: size of image
:param spacing: spacing of image
:return: returns the vector momentum
"""
fdt = fd.FD_torch(spacing)
dim = len(sz)
m = create_ND_vector_field_variable_multiN(sz, nrOfI)
if dim == 1:
m[:, 0, :] = fdt.dXc(I)*lam
elif dim == 2:
m[:, 0, :, :] = fdt.dXc(I)*lam
m[:, 1, :, :] = fdt.dYc(I)*lam
elif dim == 3:
m[:, 0, :, :, :] = fdt.dXc(I)*lam
m[:, 1, :, :, :] = fdt.dYc(I)*lam
m[:, 2, :, :, :] = fdt.dZc(I)*lam
else:
raise ValueError('Can only convert scalar to vector momentum in dimensions 1-3')
return m
def create_ND_vector_field_variable_multiN(sz, nr_of_images=1):
"""
Create vector field torch Variable of given size
:param sz: just the spatial sizes (e.g., [5] in 1D, [5,10] in 2D, [5,10,10] in 3D)
:param nrOfI: number of images
:return: returns vector field of size nrOfIxdimxXxYxZ
"""
dim = len(sz)
csz = np.array(sz) # just to make sure it is a numpy array
csz = np.array([nr_of_images, dim]+list(csz))
return MyTensor(*(csz.tolist())).normal_(0., 1e-7)
def create_ND_vector_field_variable(sz):
"""Create vector field torch Variable of given size.
:param sz: just the spatial sizes (e.g., [5] in 1D, [5,10] in 2D, [5,10,10] in 3D)
:return: returns vector field of size dimxXxYxZ
"""
dim = len(sz)
csz = np.array(sz) # just to make sure it is a numpy array
csz = np.array([dim]+list(csz))
return MyTensor(*(csz.tolist())).normal_(0.,1e-7)
def create_vector_parameter(nr_of_elements):
"""Creates a vector parameters with a specified number of elements.
:param nr_of_elements: number of vector elements
:return: returns the parameter vector
"""
return Parameter(MyTensor(nr_of_elements).normal_(0., 1e-7))
def create_ND_vector_field_parameter_multiN(sz, nrOfI=1,get_field_from_external_network=False):
"""Create vector field torch Parameter of given size.
:param sz: just the spatial sizes (e.g., [5] in 1D, [5,10] in 2D, [5,10,10] in 3D)
:param nrOfI: number of images
:return: returns vector field of size nrOfIxdimxXxYxZ
"""
dim = len(sz)
csz = np.array(sz) # just to make sure it is a numpy array
csz = np.array([nrOfI, dim]+list(csz))
if get_field_from_external_network:
tmp = MyTensor(*(csz.tolist())).normal_(0.,1e-7)
tmp.requires_grad = True
else:
tmp = Parameter(MyTensor(*(csz.tolist())).normal_(0.,1e-7))
return tmp
def create_local_filter_weights_parameter_multiN(sz,gaussian_std_weights, nrOfI=1,sched='w_K_w',get_preweight_from_network=False):
"""
Create vector field torch Parameter of given size
:param sz: just the spatial sizes (e.g., [5] in 1D, [5,10] in 2D, [5,10,10] in 3D)
:param nrOfI: number of images
:return: returns vector field of size nrOfIxdimxXxYxZ
"""
nr_of_mg_weights = len(gaussian_std_weights)
csz = np.array(sz) # just to make sure it is a numpy array
csz = np.array([nrOfI,nr_of_mg_weights]+list(csz))
weights = torch.empty(*csz)
# set the default
if sched =='w_K_w':
gaussian_std_weights = [torch.sqrt(std_w) for std_w in gaussian_std_weights]
for g in range(nr_of_mg_weights):
weights[:, g, ...] = gaussian_std_weights[g]
tmp = AdaptVal(weights)
if get_preweight_from_network:
tmp.requires_grad = True
else:
tmp = Parameter(tmp)
return tmp
def create_ND_scalar_field_parameter_multiNC(sz, nrOfI=1, nrOfC=1):
"""
Create vector field torch Parameter of given size
:param sz: just the spatial sizes (e.g., [5] in 1D, [5,10] in 2D, [5,10,10] in 3D)
:param nrOfI: number of images
:param nrOfC: number of channels
:return: returns vector field of size nrOfIxnrOfCxXxYxZ
"""
csz = np.array(sz) # just to make sure it is a numpy array
csz = np.array([nrOfI,nrOfC]+list(csz))
return Parameter(MyTensor(*(csz.tolist())).normal_(0.,1e-7))
def centered_identity_map_multiN(sz, spacing, dtype='float32'):
"""
Create a centered identity map (shifted so it is centered around 0)
:param sz: size of an image in BxCxXxYxZ format
:param spacing: list with spacing information [sx,sy,sz]
:param dtype: numpy data-type ('float32', 'float64', ...)
:return: returns the identity map
"""
dim = len(sz) - 2
nrOfI = sz[0]
if dim == 1:
id = np.zeros([nrOfI, 1, sz[2]], dtype=dtype)
elif dim == 2:
id = np.zeros([nrOfI, 2, sz[2], sz[3]], dtype=dtype)
elif dim == 3:
id = np.zeros([nrOfI, 3, sz[2], sz[3], sz[4]], dtype=dtype)
else:
raise ValueError('Only dimensions 1-3 are currently supported for the identity map')
for n in range(nrOfI):
id[n, ...] = centered_identity_map(sz[2::], spacing,dtype=dtype)
return id
def identity_map_multiN(sz,spacing,dtype='float32'):
"""
Create an identity map
:param sz: size of an image in BxCxXxYxZ format
:param spacing: list with spacing information [sx,sy,sz]
:param dtype: numpy data-type ('float32', 'float64', ...)
:return: returns the identity map
"""
dim = len(sz)-2
nrOfI = int(sz[0])
if dim == 1:
id = np.zeros([nrOfI,1,sz[2]],dtype=dtype)
elif dim == 2:
id = np.zeros([nrOfI,2,sz[2],sz[3]],dtype=dtype)
elif dim == 3:
id = np.zeros([nrOfI,3,sz[2],sz[3],sz[4]],dtype=dtype)
else:
raise ValueError('Only dimensions 1-3 are currently supported for the identity map')
for n in range(nrOfI):
id[n,...] = identity_map(sz[2::],spacing,dtype=dtype)
return id
def centered_identity_map(sz, spacing, dtype='float32'):
"""
Returns a centered identity map (with 0 in the middle) if the sz is odd
Otherwise shifts everything by 0.5*spacing
:param sz: just the spatial dimensions, i.e., XxYxZ
:param spacing: list with spacing information [sx,sy,sz]
:param dtype: numpy data-type ('float32', 'float64', ...)
:return: returns the identity map of dimension dimxXxYxZ
"""
dim = len(sz)
if dim == 1:
id = np.mgrid[0:sz[0]]
elif dim == 2:
id = np.mgrid[0:sz[0], 0:sz[1]]
elif dim == 3:
id = np.mgrid[0:sz[0], 0:sz[1], 0:sz[2]]
else:
raise ValueError('Only dimensions 1-3 are currently supported for the identity map')
# now get it into range [0,(sz-1)*spacing]^d
id = np.array(id.astype(dtype))
if dim == 1:
id = id.reshape(1, sz[0]) # add a dummy first index
for d in range(dim):
id[d] *= spacing[d]
if sz[d]%2==0:
#even
id[d] -= spacing[d]*(sz[d]//2)
else:
#odd
id[d] -= spacing[d]*((sz[d]+1)//2)
# and now store it in a dim+1 array
if dim == 1:
idnp = np.zeros([1, sz[0]], dtype=dtype)
idnp[0, :] = id[0]
elif dim == 2:
idnp = np.zeros([2, sz[0], sz[1]], dtype=dtype)
idnp[0, :, :] = id[0]
idnp[1, :, :] = id[1]
elif dim == 3:
idnp = np.zeros([3, sz[0], sz[1], sz[2]], dtype=dtype)
idnp[0, :, :, :] = id[0]
idnp[1, :, :, :] = id[1]
idnp[2, :, :, :] = id[2]
else:
raise ValueError('Only dimensions 1-3 are currently supported for the centered identity map')
return idnp
#
# def centered_min_normalized_identity_map(sz, spacing, dtype='float32'):
# """
# Returns a centered identity map (with 0 in the middle) if the sz is odd
# Otherwise shifts everything by 0.5*spacing
#
# :param sz: just the spatial dimensions, i.e., XxYxZ
# :param spacing: list with spacing information [sx,sy,sz]
# :param dtype: numpy data-type ('float32', 'float64', ...)
# :return: returns the identity map of dimension dimxXxYxZ
# """
# dim = len(sz)
# if dim == 1:
# id = np.mgrid[0:sz[0]]
# elif dim == 2:
# id = np.mgrid[0:sz[0], 0:sz[1]]
# elif dim == 3:
# id = np.mgrid[0:sz[0], 0:sz[1], 0:sz[2]]
# else:
# raise ValueError('Only dimensions 1-3 are currently supported for the identity map')
#
# min_spacing = np.min(spacing)
# spacing_ratio = spacing/min_spacing
#
#
# # now get it into range [0,(sz-1)*spacing]^d
# id = np.array(id.astype(dtype))
# if dim == 1:
# id = id.reshape(1, sz[0]) # add a dummy first index
#
# for d in range(dim):
# id[d] *= spacing[d]
# if sz[d]%2==0:
# #even
# id[d] -= spacing[d]*(sz[d]//2)
# else:
# #odd
# id[d] -= spacing[d]*((sz[d]+1)//2)
#
# # and now store it in a dim+1 array and rescale by the ratio
# if dim == 1:
# idnp = np.zeros([1, sz[0]], dtype=dtype)
# idnp[0, :] = id[0] * spacing_ratio[0]
# elif dim == 2:
# idnp = np.zeros([2, sz[0], sz[1]], dtype=dtype)
# idnp[0, :, :] = id[0] * spacing_ratio[0]
# idnp[1, :, :] = id[1] * spacing_ratio[1]
# elif dim == 3:
# idnp = np.zeros([3, sz[0], sz[1], sz[2]], dtype=dtype)
# idnp[0, :, :, :] = id[0] * spacing_ratio[0]
# idnp[1, :, :, :] = id[1] * spacing_ratio[1]
# idnp[2, :, :, :] = id[2] * spacing_ratio[2]
# else:
# raise ValueError('Only dimensions 1-3 are currently supported for the centered identity map')
#
# return idnp
#
# def tranfrom_var_list_into_min_normalized_space(var_list,spacing,do_transform=True):
# if do_transform:
# min_spacing = np.min(spacing)
# spacing_ratio =min_spacing/spacing
# dim = spacing.size
# spacing_ratio_t = AdaptVal(torch.Tensor(spacing_ratio))
# sp_sz = [1]+[dim] +[1]*dim
# spacing_ratio_t = spacing_ratio_t.view(*sp_sz)
# new_var_list = [var*spacing_ratio_t if var is not None else None for var in var_list]
# else:
# new_var_list = var_list
# return new_var_list
# def recover_var_list_from_min_normalized_space(var_list,spacing,do_transform=True):
# if do_transform:
# min_spacing = np.min(spacing)
# spacing_ratio =spacing/min_spacing
# dim = spacing.size
# spacing_ratio_t = AdaptVal(torch.Tensor(spacing_ratio))
# sp_sz = [1]+[dim] +[1]*dim
# spacing_ratio_t = spacing_ratio_t.view(*sp_sz)
# new_var_list = [var*spacing_ratio_t if var is not None else None for var in var_list]
# else:
# new_var_list = var_list
# return new_var_list
#
def identity_map(sz,spacing,dtype='float32'):
"""
Returns an identity map.
:param sz: just the spatial dimensions, i.e., XxYxZ
:param spacing: list with spacing information [sx,sy,sz]
:param dtype: numpy data-type ('float32', 'float64', ...)
:return: returns the identity map of dimension dimxXxYxZ
"""
dim = len(sz)
if dim==1:
id = np.mgrid[0:sz[0]]
elif dim==2:
id = np.mgrid[0:sz[0],0:sz[1]]
elif dim==3:
id = np.mgrid[0:sz[0],0:sz[1],0:sz[2]]
else:
raise ValueError('Only dimensions 1-3 are currently supported for the identity map')
# now get it into range [0,(sz-1)*spacing]^d
id = np.array( id.astype(dtype) )
if dim==1:
id = id.reshape(1,sz[0]) # add a dummy first index
for d in range(dim):
id[d]*=spacing[d]
#id[d]*=2./(sz[d]-1)
#id[d]-=1.
# and now store it in a dim+1 array
if dim==1:
idnp = np.zeros([1, sz[0]], dtype=dtype)
idnp[0,:] = id[0]
elif dim==2:
idnp = np.zeros([2, sz[0], sz[1]], dtype=dtype)
idnp[0,:, :] = id[0]
idnp[1,:, :] = id[1]
elif dim==3:
idnp = np.zeros([3,sz[0], sz[1], sz[2]], dtype=dtype)
idnp[0,:, :, :] = id[0]
idnp[1,:, :, :] = id[1]
idnp[2,:, :, :] = id[2]
else:
raise ValueError('Only dimensions 1-3 are currently supported for the identity map')
return idnp
def omt_boundary_weight_mask(img_sz,spacing,mask_range=5,mask_value=5,smoother_std =0.05):
"""generate a smooth weight mask for the omt """
dim = len(img_sz)
mask_sz = [1,1]+ list(img_sz)
mask = AdaptVal(torch.ones(*mask_sz))*mask_value
if dim ==2:
mask[:,:,mask_range:-mask_range,mask_range:-mask_range]=1
elif dim==3:
mask[:,:,mask_range:-mask_range,mask_range:-mask_range,mask_range:-mask_range ]=1
sm = get_single_gaussian_smoother(smoother_std,img_sz,spacing)
mask = sm.smooth(mask)
return mask.detach()
def momentum_boundary_weight_mask(img_sz,spacing,mask_range=5,smoother_std =0.05,pow=2):
"""generate a smooth weight mask for the omt """
dim = len(img_sz)
mask_sz = [1,1]+ list(img_sz)
mask = AdaptVal(torch.zeros(*mask_sz))
if dim ==2:
mask[:,:,mask_range:-mask_range,mask_range:-mask_range]=1
elif dim==3:
mask[:,:,mask_range:-mask_range,mask_range:-mask_range,mask_range:-mask_range ]=1
sm = get_single_gaussian_smoother(smoother_std,img_sz,spacing)
mask = sm.smooth(mask)
if pow ==2:
mask = mask**2
if pow ==3:
mask = mask*mask*mask
return mask
# def compute_omt_const(stds,param,dim):
# omt_power = param['forward_model']['smoother']['omt_power']
# omt_weight_penalty = param['forward_model']['smoother']['omt_weight_penalty']
# min_std = torch.min(stds)
# max_std = torch.max(stds)
# omt_const = torch.abs(torch.log(max_std/stds))**omt_power
# omt_const = omt_const/(torch.abs(torch.log(max_std / min_std)) ** omt_power)
# omt_const = omt_const*omt_weight_penalty/(EV.reg_factor_in_mermaid*2)
# sz = [1]+ [len(stds)] +[1]*(dim+1)
# return omt_const.view(*sz)
def get_single_gaussian_smoother(gaussian_std,sz,spacing):
s_m_params = pars.ParameterDict()
s_m_params['smoother']['type'] = 'gaussian'
s_m_params['smoother']['gaussian_std'] = gaussian_std
s_m = sf.SmootherFactory(sz, spacing).create_smoother(s_m_params)
return s_m
def get_warped_label_map(label_map, phi, spacing, sched='nn'):
if sched == 'nn':
warped_label_map = compute_warped_image_multiNC(label_map, phi, spacing,spline_order=0,zero_boundary=True)
# check if here should be add assert
assert abs(torch.sum(warped_label_map.data -warped_label_map.data.round()))< 0.1, "nn interpolation is not precise"
else:
raise ValueError(" the label warping method is not implemented")
return warped_label_map
def t2np(v):
"""
Takes a torch array and returns it as a numpy array on the cpu
:param v: torch array
:return: numpy array
"""
return (v.detach()).cpu().numpy()
def cxyz_to_xyzc( v ):
"""
Takes a torch array and returns it as a numpy array on the cpu
:param v: torch array
:return: numpy array
"""
dim = len(v.shape)-2
if dim ==2:
v = v.permute(0,2,3,1)
if dim ==3:
v = v.permute(0,2,3,4,1)
return v
def get_scalar(v):
if isinstance(v, float):
return v
elif isinstance(v, np.ndarray) and v.size == 1:
return float(v)
def checkNan(x):
""""
input should be list of Variable
"""
return [len(np.argwhere(np.isnan(elem.detach().cpu().numpy()))) for elem in x]
def noramlized_spacing_to_smallest(spacing):
min_sp = np.min(spacing)
spacing[spacing>min_sp]=min_sp
return spacing
def time_warped_function(f):
def __time_warped_function(input=None):
start = torch.cuda.Event(enable_timing=True)
end = torch.cuda.Event(enable_timing=True)
start.record()
output = f(input)
end.record()
# Waits for everything to finish running
torch.cuda.synchronize()
print(start.elapsed_time(end))
return output
return __time_warped_function
def interoplate_boundary_right(tensor):
dim = len(tensor.shape)-2
if dim==1:
tensor[:,:,-1]= tensor[:,:-2]+ tensor[:,:-2]-tensor[:,:-3]
if dim==2:
tensor[:, :, -1,:] = tensor[:, :,-2,:] + tensor[:, :,-2,:] - tensor[:, :,-3,:]
tensor[:, :, :,-1] = tensor[:, :, :,-2] + tensor[:, :, :,-2] - tensor[:, :, :,-3]
if dim==3:
tensor[:, :,:, -1,:, :] = tensor[:, :, -2, :] + tensor[:, :, -2, :] - tensor[:, :, -3, :]
tensor[:, :,:, :, -1, :] = tensor[:, :, :, -2] + tensor[:, :, :, -2] - tensor[:, :, :, -3]
tensor[:, :,:, :, :, -1] = tensor[:, :, :, -2] + tensor[:, :, :, -2] - tensor[:, :, :, -3]
def get_resampled_image(I, spacing, desiredSize, spline_order=1, zero_boundary=False, identity_map=None):
"""
:param I: B C X Y Z
:param spacing: spx spy spz
:param desiredSize: B C X Y Z
:param spline_order:
:param zero_boundary:
:param identity_map:
:return:
"""
if spacing is None:
img_sz = I.shape[2:]
spacing = 1. / (np.array(img_sz) - 1)
if identity_map is not None: # todo will remove, currently fix for symmetric training
if I.shape[0] != identity_map.shape[0]:
n_batch = I.shape[0]
desiredSize = desiredSize.copy()
desiredSize[0] = n_batch
identity_map = identity_map[:n_batch]
resampled, new_spacing = resample_image(I, spacing, desiredSize, spline_order=spline_order,
zero_boundary=zero_boundary, identity_map=identity_map)
return resampled
def resample_image(I, spacing, desiredSize, spline_order=1, zero_boundary=False, identity_map=None):
"""
Resample an image to a given desired size
:param I: Input image (expected to be of BxCxXxYxZ format)
:param spacing: array describing the spatial spacing
:param desiredSize: array for the desired size (excluding B and C, i.e, 1 entry for 1D, 2 for 2D, and 3 for 3D)
:return: returns a tuple: the downsampled image, the new spacing after downsampling
"""
desiredSize = desiredSize[2:]
is_numpy = False
if not isinstance(I, torch.Tensor):
I = torch.Tensor(I)
is_numpy = True
sz = np.array(list(I.size()))
# check that the batch size and the number of channels is the same
nrOfI = sz[0]
nrOfC = sz[1]
desiredSizeNC = np.array([nrOfI, nrOfC] + list(desiredSize))
newspacing = spacing * ((sz[2::].astype('float') - 1.) / (
desiredSizeNC[2::].astype('float') - 1.)) ###########################################
if identity_map is not None:
idDes = identity_map
else:
idDes = AdaptVal(torch.from_numpy(identity_map_multiN(desiredSizeNC, newspacing)))
# now use this map for resampling
ID = compute_warped_image_multiNC(I, idDes, newspacing, spline_order, zero_boundary)
return ID if not is_numpy else ID.numpy(), newspacing
def get_res_size_from_size(sz, factor):
"""
Returns the corresponding low-res size from a (high-res) sz
:param sz: size (high-res)
:param factor: low-res factor (needs to be <1)
:return: low res size
"""
if (factor is None):
print('WARNING: Could not compute low_res_size as factor was ' + str(factor))
return sz
else:
lowResSize = np.array(sz)
if not isinstance(factor, list):
lowResSize[2::] = (np.ceil((np.array(sz[2:]) * factor))).astype('int16')
else:
lowResSize[2::] = (np.ceil((np.array(sz[2:]) * np.array(factor)))).astype('int16')
if lowResSize[-1] % 2 != 0:
lowResSize[-1] -= 1
print(
'\n\nWARNING: forcing last dimension to be even: fix properly in the Fourier transform later!\n\n')
return lowResSize
def get_res_spacing_from_spacing(spacing, sz, lowResSize):
"""
Computes spacing for the low-res parameterization from image spacing
:param spacing: image spacing
:param sz: size of image
:param lowResSize: size of low re parameterization
:return: returns spacing of low res parameterization
"""
# todo: check that this is the correct way of doing it
return spacing * (np.array(sz[2::]) - 1) / (np.array(lowResSize[2::]) - 1)
########################################## Adaptive Net ###################################################3
def space_normal(tensors, std=0.1):
"""
space normalize for the net kernel
:param tensor:
:param mean:
:param std:
:return:
"""
if isinstance(tensors, Variable):
space_normal(tensors.data, std=std)
return tensors
for n in range(tensors.size()[0]):
for c in range(tensors.size()[1]):
dim = tensors[n][c].dim()
sz = tensors[n][c].size()
mus = np.zeros(dim)
stds = std * np.ones(dim)
print('WARNING: What should the spacing be here? Needed for new identity map code')
raise ValueError('Double check the spacing here before running this code')
spacing = np.ones(dim)
centered_id = centered_identity_map(sz,spacing)
g = compute_normalized_gaussian(centered_id, mus, stds)
tensors[n,c] = torch.from_numpy(g)
def weights_init_uniform(m):
classname = m.__class__.__name__
# print(classname)
if classname.find('Conv') != -1:
init.uniform(m.weight.data, 0.038, 0.042)
elif classname.find('Linear') != -1:
init.uniform(m.weight.data, 0.0, 0.02)
elif classname.find('BatchNorm2d') != -1:
init.uniform(m.weight.data, 1.0, 0.02)
init.constant(m.bias.data, 0.0)
def weights_init_normal(m):
classname = m.__class__.__name__
# print(classname)
if classname.find('Conv') != -1:
space_normal(m.weight.data)
elif classname.find('Linear') != -1:
space_normal(m.weight.data)
elif classname.find('BatchNorm2d') != -1:
init.uniform(m.weight.data, 1.0, 0.02)
init.constant(m.bias.data, 0.0)
def weights_init_rd_normal(m):
classname = m.__class__.__name__
# print(classname)
if classname.find('Conv') != -1:
init.normal(m.weight.data)
elif classname.find('Linear') != -1:
init.normal(m.weight.data)
elif classname.find('BatchNorm2d') != -1:
init.uniform(m.weight.data, 1.0, 0.02)
init.constant(m.bias.data, 0.0)
def weights_init_xavier(m):
classname = m.__class__.__name__
# print(classname)
if classname.find('Conv') != -1:
init.xavier_normal(m.weight.data, gain=1)
elif classname.find('Linear') != -1:
init.xavier_normal(m.weight.data, gain=1)
elif classname.find('BatchNorm2d') != -1:
init.uniform(m.weight.data, 1.0, 0.02)
init.constant(m.bias.data, 0.0)
def weights_init_kaiming(m):
classname = m.__class__.__name__
# print(classname)
if classname.find('Conv') != -1:
init.kaiming_normal(m.weight.data, a=0, mode='fan_in')
elif classname.find('Linear') != -1:
init.kaiming_normal(m.weight.data, a=0, mode='fan_in')
elif classname.find('BatchNorm2d') != -1:
init.uniform(m.weight.data, 1.0, 0.02)
init.constant(m.bias.data, 0.0)
def weights_init_orthogonal(m):
classname = m.__class__.__name__
print(classname)
if classname.find('Conv') != -1:
init.orthogonal(m.weight.data, gain=1)
elif classname.find('Linear') != -1:
init.orthogonal(m.weight.data, gain=1)
elif classname.find('BatchNorm2d') != -1:
init.uniform(m.weight.data, 1.0, 0.02)
init.constant(m.bias.data, 0.0)
def init_weights(net, init_type='normal'):
print('initialization method [%s]' % init_type)
if init_type == 'rd_normal':
net.apply(weights_init_rd_normal)
elif init_type == 'normal':
net.apply(weights_init_normal)
elif init_type == 'uniform':
net.apply(weights_init_uniform)
elif init_type == 'xavier':
net.apply(weights_init_xavier)
elif init_type == 'kaiming':
net.apply(weights_init_kaiming)
elif init_type == 'orthogonal':
net.apply(weights_init_orthogonal)
else:
raise NotImplementedError('initialization method [%s] is not implemented' % init_type)
def organize_data(moving, target, sched='depth_concat'):
if sched == 'depth_concat':
input = torch.cat([moving, target], dim=1)
elif sched == 'width_concat':
input = torch.cat((moving, target), dim=3)
elif sched == 'list_concat':
input = torch.cat((moving.unsqueeze(0),target.unsqueeze(0)),dim=0)
elif sched == 'difference':
input = moving-target
return input
def bh(m,gi,go):
print("Grad Input")
print((torch.sum(gi[0].data), torch.sum(gi[1].data)))
print("Grad Output")
print(torch.sum(go[0].data))
return gi[0], gi[1], gi[2]
class ConvBnRel(nn.Module):
# conv + bn (optional) + relu
def __init__(self, in_channels, out_channels, kernel_size, stride=1, active_unit='relu', same_padding=False,
bn=False, reverse=False, bias=False):
super(ConvBnRel, self).__init__()
padding = int((kernel_size - 1) // 2) if same_padding else 0
if not reverse:
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding=padding, bias=bias)
else:
self.conv = nn.ConvTranspose2d(in_channels, out_channels, kernel_size, stride, padding=padding,bias=bias)
#y = \frac{x - mean[x]}{ \sqrt{Var[x] + \epsilon}} * gamma + beta
#When affine=False the output of BatchNorm is equivalent to considering gamma=1 and beta=0 as constants.
self.bn = nn.BatchNorm2d(out_channels, eps=0.0001, momentum=0, affine=True) if bn else None
if active_unit == 'relu':
self.active_unit = nn.ReLU(inplace=True)
elif active_unit == 'elu':
self.active_unit = nn.ELU(inplace=True)
else:
self.active_unit = None
def forward(self, x):
x = self.conv(x)
if self.bn is not None:
x = self.bn(x)
if self.active_unit is not None:
x = self.active_unit(x)
return x
class FcRel(nn.Module):
# fc+ relu(option)
def __init__(self, in_features, out_features, active_unit='relu'):
super(FcRel, self).__init__()
self.fc = nn.Linear(in_features, out_features)
if active_unit == 'relu':
self.active_unit = nn.ReLU(inplace=True)
elif active_unit == 'elu':
self.active_unit = nn.ELU(inplace=True)
else:
self.active_unit = None
def forward(self, x):
x = self.fc(x)
if self.active_unit is not None:
x = self.active_unit(x)
return x
class AdpSmoother(nn.Module):
"""
a simple conv. implementation, generate displacement field
"""
def __init__(self, inputs, dim, net_sched=None):
# settings should include [using_bias, using bn, using elu]
# inputs should be a dictionary could contain ['s'],['t']
super(AdpSmoother, self).__init__()
self.dim = dim
self.net_sched = 'm_only'
self.s = inputs['s'].detach()
self.t = inputs['t'].detach()
self.mask = Parameter(torch.cat([torch.ones(inputs['s'].size())]*dim, 1), requires_grad = True)
self.get_net_sched()
#self.net.register_backward_hook(bh)
def get_net_sched(self, debugging=True, using_bn=True, active_unit='relu', using_sigmoid=False , kernel_size=5):
# return the self.net and self.net_input
padding_size = (kernel_size-1)//2
if self.net_sched == 'm_only':
if debugging:
self.net = nn.Conv2d(2, 2, kernel_size, 1, padding=padding_size, bias=False,groups=2)
else:
net = \
[ConvBnRel(self.dim, 20, 5, active_unit=active_unit, same_padding=True, bn=using_bn),
ConvBnRel(20,self.dim, 5, active_unit=active_unit, same_padding=True, bn=using_bn)]
if using_sigmoid:
net += [nn.Sigmoid()]
self.net = nn.Sequential(*net)
elif self.net_sched =='m_f_s':
if debugging:
self.net = nn.Conv2d(self.dim+1, self.dim, kernel_size, 1, padding=padding_size, bias=False)
else:
net = \
[ConvBnRel(self.dim +1, 20, 5, active_unit=active_unit, same_padding=True, bn=using_bn),
ConvBnRel(20, self.dim, 5, active_unit=active_unit, same_padding=True, bn=using_bn)]
if using_sigmoid:
net += [nn.Sigmoid()]
self.net = nn.Sequential(*net)
elif self.net_sched == 'm_d_s':
if debugging:
self.net = nn.Conv2d(self.dim+1, self.dim, kernel_size, 1, padding=padding_size, bias=False)
else:
net = \
[ConvBnRel(self.dim + 1, 20, 5, active_unit=active_unit, same_padding=True, bn=using_bn),
ConvBnRel(20, self.dim, 5, active_unit=active_unit, same_padding=True, bn=using_bn)]
if using_sigmoid:
net += [nn.Sigmoid()]
self.net = nn.Sequential(*net)
elif self.net_sched == 'm_f_s_t':
if debugging:
self.net = nn.Conv2d(self.dim+2, self.dim, kernel_size, 1, padding=padding_size, bias=False)
else:
net = \
[ConvBnRel(self.dim + 2, 20, 5, active_unit=active_unit, same_padding=True, bn=using_bn),
ConvBnRel(20, self.dim, 5, active_unit=active_unit, same_padding=True, bn=using_bn)]
if using_sigmoid:
net += [nn.Sigmoid()]
self.net = nn.Sequential(*net)
elif self.net_sched == 'm_d_s_f_t':
if debugging:
self.net = nn.Conv2d(self.dim + 2, self.dim, kernel_size, 1, padding=padding_size, bias=False)
else:
net = \
[ConvBnRel(self.dim + 2, 20, 5, active_unit=active_unit, same_padding=True, bn=using_bn),
ConvBnRel(20, self.dim, 5, active_unit=active_unit, same_padding=True, bn=using_bn)]
if using_sigmoid:
net += [nn.Sigmoid()]
self.net = nn.Sequential(*net)
def prepare_data(self, m, new_s):
input=None
if self.net_sched == 'm_only':
input = m
elif self.net_sched == 'm_f_s':
input = organize_data(m,self.s,sched='depth_concat')
elif self.net_sched == 'm_d_s':
input = organize_data(m, new_s, sched='depth_concat')
elif self.net_sched == 'm_f_s_t':
input = organize_data(m, self.s, sched='depth_concat')
input = organize_data(input, self.t, sched='depth_concat')
elif self.net_sched == 'm_f_s_t':
input = organize_data(m, self.s, sched='depth_concat')
input = organize_data(input, self.t, sched='depth_concat')
elif self.net_sched == 'm_d_s_f_t':
input = organize_data(m, new_s, sched='depth_concat')
input = organize_data(input, self.t, sched='depth_concat')
return input
def forward(self, m,new_s=None):
m = m * self.mask
input = self.prepare_data(m,new_s)
x= input
x = self.net(x)
return x
| 1.851563 | 2 |
examples/io/plot_read_evoked.py | fmamashli/mne-python | 3 | 3692 | <filename>examples/io/plot_read_evoked.py
"""
==================================
Reading and writing an evoked file
==================================
This script shows how to read and write evoked datasets.
"""
# Author: <NAME> <<EMAIL>>
#
# License: BSD (3-clause)
from mne import read_evokeds
from mne.datasets import sample
print(__doc__)
data_path = sample.data_path()
fname = data_path + '/MEG/sample/sample_audvis-ave.fif'
# Reading
condition = 'Left Auditory'
evoked = read_evokeds(fname, condition=condition, baseline=(None, 0),
proj=True)
###############################################################################
# Show result as a butterfly plot:
# By using exclude=[] bad channels are not excluded and are shown in red
evoked.plot(exclude=[], time_unit='s')
# Show result as a 2D image (x: time, y: channels, color: amplitude)
evoked.plot_image(exclude=[], time_unit='s')
###############################################################################
# Use :func:`mne.Evoked.save` or :func:`mne.write_evokeds` to write the evoked
# responses to a file.
| 2.8125 | 3 |
source/monkeyPatches/__init__.py | lukaszgo1/nvda | 19 | 3693 | <filename>source/monkeyPatches/__init__.py
# A part of NonVisual Desktop Access (NVDA)
# Copyright (C) 2021 NV Access Limited
# This file is covered by the GNU General Public License.
# See the file COPYING for more details.
from . import wxMonkeyPatches
applyWxMonkeyPatches = wxMonkeyPatches.apply
def applyMonkeyPatches():
# Apply several monkey patches to comtypes
# F401 - imported but unused: Patches are applied during import
from . import comtypesMonkeyPatches # noqa: F401
# Apply patches to Enum, prevent cyclic references on ValueError during construction
from . import enumPatches
enumPatches.replace__new__()
| 1.539063 | 2 |
yt_dlp/extractor/ninenow.py | nxtreaming/yt-dlp | 11 | 3694 | <reponame>nxtreaming/yt-dlp<gh_stars>10-100
from .common import InfoExtractor
from ..compat import compat_str
from ..utils import (
ExtractorError,
int_or_none,
float_or_none,
smuggle_url,
str_or_none,
try_get,
unified_strdate,
unified_timestamp,
)
class NineNowIE(InfoExtractor):
IE_NAME = '9now.com.au'
_VALID_URL = r'https?://(?:www\.)?9now\.com\.au/(?:[^/]+/){2}(?P<id>[^/?#]+)'
_GEO_COUNTRIES = ['AU']
_TESTS = [{
# clip
'url': 'https://www.9now.com.au/afl-footy-show/2016/clip-ciql02091000g0hp5oktrnytc',
'md5': '17cf47d63ec9323e562c9957a968b565',
'info_dict': {
'id': '16801',
'ext': 'mp4',
'title': '<NAME>\'s Joey Montagna on the potential for a player\'s strike',
'description': 'Is a boycott of the NAB Cup "on the table"?',
'uploader_id': '4460760524001',
'upload_date': '20160713',
'timestamp': 1468421266,
},
'skip': 'Only available in Australia',
}, {
# episode
'url': 'https://www.9now.com.au/afl-footy-show/2016/episode-19',
'only_matching': True,
}, {
# DRM protected
'url': 'https://www.9now.com.au/andrew-marrs-history-of-the-world/season-1/episode-1',
'only_matching': True,
}, {
# episode of series
'url': 'https://www.9now.com.au/lego-masters/season-3/episode-3',
'info_dict': {
'id': '6249614030001',
'title': 'Episode 3',
'ext': 'mp4',
'season_number': 3,
'episode_number': 3,
'description': 'In the first elimination of the competition, teams will have 10 hours to build a world inside a snow globe.',
'uploader_id': '4460760524001',
'timestamp': 1619002200,
'upload_date': '20210421',
},
'expected_warnings': ['Ignoring subtitle tracks'],
'params':{
'skip_download': True,
}
}]
BRIGHTCOVE_URL_TEMPLATE = 'http://players.brightcove.net/4460760524001/default_default/index.html?videoId=%s'
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
page_data = self._parse_json(self._search_regex(
r'window\.__data\s*=\s*({.*?});', webpage,
'page data', default='{}'), display_id, fatal=False)
if not page_data:
page_data = self._parse_json(self._parse_json(self._search_regex(
r'window\.__data\s*=\s*JSON\.parse\s*\(\s*(".+?")\s*\)\s*;',
webpage, 'page data'), display_id), display_id)
for kind in ('episode', 'clip'):
current_key = page_data.get(kind, {}).get(
'current%sKey' % kind.capitalize())
if not current_key:
continue
cache = page_data.get(kind, {}).get('%sCache' % kind, {})
if not cache:
continue
common_data = {
'episode': (cache.get(current_key) or list(cache.values())[0])[kind],
'season': (cache.get(current_key) or list(cache.values())[0]).get('season', None)
}
break
else:
raise ExtractorError('Unable to find video data')
if not self.get_param('allow_unplayable_formats') and try_get(common_data, lambda x: x['episode']['video']['drm'], bool):
self.report_drm(display_id)
brightcove_id = try_get(
common_data, lambda x: x['episode']['video']['brightcoveId'], compat_str) or 'ref:%s' % common_data['episode']['video']['referenceId']
video_id = str_or_none(try_get(common_data, lambda x: x['episode']['video']['id'])) or brightcove_id
title = try_get(common_data, lambda x: x['episode']['name'], compat_str)
season_number = try_get(common_data, lambda x: x['season']['seasonNumber'], int)
episode_number = try_get(common_data, lambda x: x['episode']['episodeNumber'], int)
timestamp = unified_timestamp(try_get(common_data, lambda x: x['episode']['airDate'], compat_str))
release_date = unified_strdate(try_get(common_data, lambda x: x['episode']['availability'], compat_str))
thumbnails_data = try_get(common_data, lambda x: x['episode']['image']['sizes'], dict) or {}
thumbnails = [{
'id': thumbnail_id,
'url': thumbnail_url,
'width': int_or_none(thumbnail_id[1:]),
} for thumbnail_id, thumbnail_url in thumbnails_data.items()]
return {
'_type': 'url_transparent',
'url': smuggle_url(
self.BRIGHTCOVE_URL_TEMPLATE % brightcove_id,
{'geo_countries': self._GEO_COUNTRIES}),
'id': video_id,
'title': title,
'description': try_get(common_data, lambda x: x['episode']['description'], compat_str),
'duration': float_or_none(try_get(common_data, lambda x: x['episode']['video']['duration'], float), 1000),
'thumbnails': thumbnails,
'ie_key': 'BrightcoveNew',
'season_number': season_number,
'episode_number': episode_number,
'timestamp': timestamp,
'release_date': release_date,
}
| 2.0625 | 2 |
apex/fp16_utils/fused_weight_norm.py | mcarilli/apex | 1 | 3695 | <gh_stars>1-10
import torch
from torch.autograd import Variable
from torch.autograd.function import Function, once_differentiable
import apex_C
def check_contig_cuda(tensors, names):
for tensor, name in zip(tensors, names):
if not tensor.is_contiguous():
raise RuntimeError(name+" with size {} is not contiguous"
.format(tensor.size()))
if not tensor.is_cuda:
raise RuntimeError(name+".is_cuda = False."
"Currently, only cuda tensors are supported.")
class Fused_Weight_Norm(Function):
"""
Custom autograd function that implements weight norm, as presented in
`<https://arxiv.org/abs/1602.07868>`_,
along a tensor's slowest or
fastest dimension using fused kernel launches for the forward and backward passes.
Accepts fp32 or fp16 input; the output type will match the input type.
Within the kernels, all calculations are performed in fp32 for numerical stability, regardless
of input/output precision.
"""
@staticmethod
def forward(ctx, input, g, dim=0):
"""
Args:
input(torch.cuda.FloatTensor or torch.cuda.HalfTensor): input tensor corresponding to **v** in the paper. ``input`` should be contiguous.
g(torch.cuda.FloatTensor or torch.cuda.HalfTensor): input tensor corresponding to **g** in the paper. ``g`` should be the same type as ``input``.
dim(int, optional, default=0): Dimension across which to perform weightnorm. Currently, only the first or last dimension of the input tensor is supported.
Returns:
Output tensor corresponding to **w** in the paper. Output type and precision will match
type and precision of ``input``.
"""
# torch.cuda.nvtx.range_push("FusedNorm.forward, input.size() = {}"
# .format(input.size()))
check_contig_cuda((input,g),("input","g"))
"""
This is ok, new() treats a torch.Size object properly.
No need to unpack with an asterisk via new(*input.size()).
"""
output = input.new(input.size()).contiguous()
"""
For output with size (slow, faster, faster, ...fastest), we want
norms with size (slow, 1, 1, ...1), so that if you want retrieve norms
and apply the same normalizing factors to another Tensor "t" with the
same size as output, "t/norms" will broadcast each element of norms
across the corresponding slowest dim of t.
"""
if dim == 0:
norm_size = (output.size(0),) + (1,)*(output.dim() - 1)
elif dim == output.dim() - 1:
norm_size = (1,)*(output.dim() - 1) + (output.size(-1),)
else:
raise RuntimeError("Currently, Fused_Weight_Norm only supports first or last dimension.")
norms = torch.cuda.FloatTensor(*norm_size).contiguous()
"""
Beware: If you call the following:
norms = torch.cuda.FloatTensor(norm_size).contiguous()
the constructor sees a tuple:
FloatTensor( (output_size(0),1,1,...) )
and creates a 1D tensor with values from the tuple:
[output_size(0),1,1,...].
"""
apex_C.weight_norm_fwd(output, norms, input, g, dim)
ctx.save_for_backward(input, g)
# save_for_backward can only save input or output tensors,
# use ctx state to save the norms and dimension:
ctx.norms = norms
ctx.dim = dim
return output
@staticmethod
@once_differentiable
def backward(ctx, grad_output):
"""
Args:
grad_output(torch.cuda.FloatTensor or torch.cuda.HalfTensor): Gradient of loss with respect to output **w**. ``grad_output`` should be contiguous for performance.
Returns:
Gradient of loss with respect to ``input`` and ``g``. The precision of these gradients will match the precision of ``grad_input``.
"""
check_contig_cuda((grad_output), ("grad_output"))
savedInput, savedg = ctx.saved_tensors
savedNorms = ctx.norms
# We expect that these .contiguous() calls will be no-ops. They're present for safety.
grad_output_contig = grad_output.contiguous()
grad_input = grad_output_contig.new(grad_output.size()).contiguous()
grad_g = savedg.new(savedg.size()).contiguous()
apex_C.weight_norm_bwd(grad_input,
grad_g,
grad_output_contig,
savedInput,
savedg,
savedNorms,
ctx.dim)
return grad_input, grad_g, None
| 2.484375 | 2 |
bzt/modules/grinder.py | gerardorf/taurus | 1 | 3696 | """
Module holds all stuff regarding Grinder tool usage
Copyright 2015 BlazeMeter Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import re
import time
from bzt import TaurusConfigError, ToolError
from bzt.engine import ScenarioExecutor, FileLister, HavingInstallableTools, SelfDiagnosable
from bzt.modules.aggregator import ConsolidatingAggregator, ResultsReader
from bzt.modules.console import WidgetProvider, ExecutorWidget
from bzt.modules.java import TaurusJavaHelper
from bzt.requests_model import HTTPRequest
from bzt.six import iteritems
from bzt.utils import MirrorsManager, dehumanize_time, get_full_path, PythonGenerator, CALL_PROBLEMS
from bzt.utils import unzip, RequiredTool, JavaVM, shutdown_process, TclLibrary, FileReader, RESOURCES_DIR
class GrinderExecutor(ScenarioExecutor, WidgetProvider, FileLister, HavingInstallableTools, SelfDiagnosable):
"""
Grinder executor module
"""
def __init__(self):
super(GrinderExecutor, self).__init__()
self.script = None
self.exec_id = "grinder-bzt-%s" % id(self)
self.properties_file = None
self.kpi_file = None
self.cmd_line = None
self.process = None
self.end_time = None
self.retcode = None
self.java_helper = None
def __write_base_props(self, fds):
"""
write base properties and base properties file contents to fds
:param fds: fds
:return:
"""
base_props_file = self.settings.get("properties-file")
if base_props_file:
fds.write("# Base Properies File Start: %s\n" % base_props_file)
with open(base_props_file) as bpf:
fds.write(bpf.read())
fds.write("# Base Properies File End: %s\n\n" % base_props_file)
# base props
base_props = self.settings.get("properties")
if base_props:
fds.write("# Base Properies Start\n")
for key, val in iteritems(base_props):
fds.write("%s=%s\n" % (key, val))
fds.write("# Base Properies End\n\n")
def __write_scenario_props(self, fds, scenario):
"""
Write scenario props and scenario file props to fds
:param fds:
:param scenario: dict
:return:
"""
script_props_file = scenario.get("properties-file")
if script_props_file:
fds.write("# Script Properies File Start: %s\n" % script_props_file)
with open(script_props_file) as spf:
fds.write(spf.read())
fds.write("# Script Properies File End: %s\n\n" % script_props_file)
# scenario props
local_props = scenario.get("properties")
if local_props:
fds.write("# Scenario Properies Start\n")
for key, val in iteritems(local_props):
fds.write("%s=%s\n" % (key, val))
fds.write("# Scenario Properies End\n\n")
def __write_bzt_props(self, fds):
"""
Write bzt properties to fds
:param fds:
:return:
"""
fds.write("# BZT Properies Start\n")
fds.write("grinder.hostID=%s\n" % self.exec_id)
fds.write("grinder.script=%s\n" % self.script.replace(os.path.sep, "/"))
fds.write("grinder.logDirectory=%s\n" % self.engine.artifacts_dir.replace(os.path.sep, "/"))
load = self.get_load()
if load.iterations or load.concurrency:
fds.write("grinder.runs=%s\n" % load.iterations or 0)
if load.concurrency:
fds.write("grinder.threads=%s\n" % load.concurrency)
if load.duration:
fds.write("grinder.duration=%s\n" % int(load.duration * 1000))
fds.write("# taurus load values in case you need them\n")
fds.write("taurus.concurrency=%s\n" % load.concurrency)
fds.write("taurus.throughput=%s\n" % load.throughput)
fds.write("taurus.ramp_up=%s\n" % load.ramp_up)
fds.write("taurus.steps=%s\n" % load.steps)
fds.write("taurus.hold_for=%s\n" % load.hold)
fds.write("taurus.iterations=%s\n" % load.iterations)
fds.write("# BZT Properies End\n")
def prepare(self):
self.stdout = open(self.engine.create_artifact("grinder", ".out"), "w")
self.stderr = open(self.engine.create_artifact("grinder", ".err"), "w")
self.install_required_tools()
scenario = self.get_scenario()
self.exec_id = self.label
self.script = self.get_script_path()
if not self.script:
if "requests" in scenario:
self.script = self.__scenario_from_requests()
else:
msg = "There must be a script file or requests for its generation "
msg += "to run Grinder tool (%s)" % self.execution.get('scenario')
raise TaurusConfigError(msg)
self.properties_file = self.engine.create_artifact("grinder", ".properties")
with open(self.properties_file, 'w') as fds:
self.__write_base_props(fds)
self.__write_scenario_props(fds, scenario)
self.__write_bzt_props(fds)
self.kpi_file = os.path.join(self.engine.artifacts_dir, self.exec_id + "-kpi.log")
self.reader = DataLogReader(self.kpi_file, self.log)
self.reader.report_by_url = self.settings.get("report-by-url", False)
if isinstance(self.engine.aggregator, ConsolidatingAggregator):
self.engine.aggregator.add_underling(self.reader)
# add logback configurations used by worker processes (logback-worker.xml)
self.env.add_path({"CLASSPATH": RESOURCES_DIR}, finish=True)
self.env.add_path({"CLASSPATH": self.java_helper.tool_path}, finish=True)
self.env.add_path({"CLASSPATH": self.settings.get("path", None)}, finish=True)
self.cmd_line = ["java", "net.grinder.Grinder", self.properties_file]
def startup(self):
"""
Should start the tool as fast as possible.
"""
self.env.set({"T_GRINDER_PREFIX": self.exec_id})
self.process = self.execute(self.cmd_line)
def check(self):
"""
Checks if tool is still running. Also checks if resulting logs contains
any data and throws exception otherwise.
:return: bool
:raise TaurusToolError:
"""
self.retcode = self.process.poll()
if self.retcode is not None:
if self.retcode != 0:
raise ToolError("Gatling tool exited with non-zero code: %s" % self.retcode,
self.get_error_diagnostics())
return True
return False
def shutdown(self):
"""
If tool is still running - let's stop it.
"""
shutdown_process(self.process, self.log)
if self.start_time:
self.end_time = time.time()
self.log.debug("Grinder worked for %s seconds", self.end_time - self.start_time)
def post_process(self):
"""
Collect data file artifact
"""
if self.kpi_file:
self.engine.existing_artifact(self.kpi_file)
super(GrinderExecutor, self).post_process()
def __scenario_from_requests(self):
"""
Generate grinder scenario from requests
:return: script
"""
script = self.engine.create_artifact("grinder_requests", ".py")
builder = GrinderScriptBuilder(self.get_scenario(), self.log)
builder.label = self.label
builder.build_source_code()
builder.save(script)
return script
def install_required_tools(self):
grinder = self._get_tool(Grinder, config=self.settings)
self.settings["path"] = grinder.tool_path
self.java_helper = self._get_tool(TaurusJavaHelper)
required_tools = [self._get_tool(TclLibrary),
self._get_tool(JavaVM),
self.java_helper,
grinder]
for tool in required_tools:
if not tool.check_if_installed():
tool.install()
def get_widget(self):
if not self.widget:
if self.script is not None:
label = "Grinder: %s" % os.path.basename(self.script)
else:
label = None
self.widget = ExecutorWidget(self, label)
if self.get_load().ramp_up:
self.widget.duration += self.get_load().ramp_up # because we have ramp-down equal to rampup
return self.widget
def resource_files(self):
resource_files = []
script_file_path = self.get_script_path()
if script_file_path:
resource_files.append(script_file_path)
prop_file = self.get_scenario().get("properties-file")
if prop_file:
resource_files.append(prop_file)
return resource_files
def get_error_diagnostics(self):
diagnostics = []
if self.stdout is not None:
with open(self.stdout.name) as fds:
contents = fds.read().strip()
if contents.strip():
diagnostics.append("Grinder STDOUT:\n" + contents)
if self.stderr is not None:
with open(self.stderr.name) as fds:
contents = fds.read().strip()
if contents.strip():
diagnostics.append("Grinder STDOUT:\n" + contents)
return diagnostics
class DataLogReader(ResultsReader):
""" Class to read KPI from data log """
DELIMITER = ","
DETAILS_REGEX = re.compile(r"worker\.(\S+) (.+) -> (\S+) (.+), (\d+) bytes")
def __init__(self, filename, parent_logger):
super(DataLogReader, self).__init__()
self.report_by_url = False
self.log = parent_logger.getChild(self.__class__.__name__)
self.file = FileReader(filename=filename, parent_logger=self.log)
self.idx = {}
self.partial_buffer = ""
self.start_time = 0
self.end_time = 0
self.concurrency = 0
self.test_names = {}
self.known_threads = set()
def _read(self, last_pass=False):
"""
Generator method that returns next portion of data
:param last_pass:
"""
self.log.debug("Reading grinder results...")
self.lines = list(self.file.get_lines(size=1024 * 1024, last_pass=last_pass))
lnum = None
start = time.time()
for lnum, line in enumerate(self.lines):
if not self.idx:
if not line.startswith('data.'):
self.__split(line) # to capture early test name records
continue
line = line[line.find(' '):]
header_list = line.strip().split(self.DELIMITER)
for _ix, field in enumerate(header_list):
self.idx[field.strip()] = _ix
data_fields, worker_id = self.__split(line)
if not data_fields:
self.log.debug("Skipping line: %s", line.strip())
continue
yield self.parse_line(data_fields, worker_id, lnum)
if lnum is not None:
duration = time.time() - start
if duration < 0.001:
duration = 0.001
self.log.debug("Log reading speed: %s lines/s", (lnum + 1) / duration)
def parse_line(self, data_fields, worker_id, lnum):
worker_id = worker_id.split('.')[1]
t_stamp = int(int(data_fields[self.idx["Start time (ms since Epoch)"]]) / 1000.0)
r_time = int(data_fields[self.idx["Test time"]]) / 1000.0
latency = int(data_fields[self.idx["Time to first byte"]]) / 1000.0
r_code = data_fields[self.idx["HTTP response code"]].strip()
con_time = int(data_fields[self.idx["Time to resolve host"]]) / 1000.0
con_time += int(data_fields[self.idx["Time to establish connection"]]) / 1000.0
bytes_count = int(data_fields[self.idx["HTTP response length"]].strip())
test_id = data_fields[self.idx["Test"]].strip()
thread_id = worker_id + '/' + data_fields[self.idx["Thread"]].strip()
if thread_id not in self.known_threads:
self.known_threads.add(thread_id)
self.concurrency += 1
url, error_msg = self.__parse_prev_lines(worker_id, lnum, r_code, bytes_count)
if int(data_fields[self.idx["Errors"]]) or int(data_fields[self.idx['HTTP response errors']]):
if not error_msg:
if r_code != '0':
error_msg = "HTTP %s" % r_code
else:
error_msg = "Java exception calling TestRunner"
else:
error_msg = None # suppress errors
if self.report_by_url:
label = url
elif test_id in self.test_names:
label = self.test_names[test_id]
else:
label = "Test #%s" % test_id
source_id = '' # maybe use worker_id somehow?
return t_stamp, label, self.concurrency, r_time, con_time, latency, r_code, error_msg, source_id, bytes_count
def __split(self, line):
if not line.endswith("\n"):
self.partial_buffer += line
return None, None
line = "%s%s" % (self.partial_buffer, line)
self.partial_buffer = ""
line = line.strip()
if not line.startswith('data.'):
line_parts = line.split(' ')
if len(line_parts) > 1:
if line_parts[1] == 'starting,':
# self.concurrency += 1
pass
elif line_parts[1] == 'finished':
if self.concurrency > 0:
self.concurrency -= 1
elif set(line_parts[1:5]) == {'Test', 'name', 'for', 'ID'}:
test_id = line_parts[5][:-1]
test_name = ' '.join(line_parts[6:])
self.test_names[test_id] = test_name
self.log.debug("Recognized test id %s => %s", test_id, test_name)
return None, None
worker_id = line[:line.find(' ')]
line = line[line.find(' '):]
data_fields = line.split(self.DELIMITER)
if not data_fields[1].strip().isdigit():
return None, None
if len(data_fields) < max(self.idx.values()):
return None, None
return data_fields, worker_id
def __parse_prev_lines(self, worker_id, lnum, r_code, bytes_count):
url = ''
error_msg = None
for lineNo in reversed(range(max(lnum - 100, 0), lnum)): # looking max 100 lines back. TODO: parameterize?
line = self.lines[lineNo].strip()
matched = self.DETAILS_REGEX.match(line)
if not matched:
continue
if worker_id == matched.group(1) and r_code == matched.group(3) and str(bytes_count) == matched.group(5):
return matched.group(2), matched.group(4)
return url, error_msg
class Grinder(RequiredTool): # todo: take it from maven and convert to JarTool(?)
VERSION = "3.11"
LOCAL_PATH = "~/.bzt/grinder-taurus/lib/grinder.jar"
def __init__(self, config=None, **kwargs):
settings = config or {}
grinder_path = settings.get("path", self.LOCAL_PATH)
grinder_path = get_full_path(grinder_path)
download_link = settings.get("download-link", "")
super(Grinder, self).__init__(tool_path=grinder_path, download_link=download_link, **kwargs)
self.version = self.VERSION
self.mirror_manager = GrinderMirrorsManager(self.http_client, self.log, self.version)
def check_if_installed(self):
self.log.debug("Trying %s: %s", self.tool_name, self.tool_path)
try:
out, err = self.call(["java", "-classpath", self.tool_path, "net.grinder.Grinder"])
if err:
out += err
self.log.debug("%s stdout: %s", self.tool_name, out)
return True
except CALL_PROBLEMS as exc:
self.log.warning("%s check failed: %s", self.tool_name, exc)
return False
def install(self):
dest = get_full_path(self.tool_path, step_up=2)
self.log.info("Will install %s into %s", self.tool_name, dest)
grinder_dist = self._download(use_link=bool(self.download_link))
self.log.info("Unzipping %s", grinder_dist)
unzip(grinder_dist, dest, 'grinder-' + self.version)
os.remove(grinder_dist)
self.log.info("Installed grinder successfully")
if not self.check_if_installed():
raise ToolError("Unable to run %s after installation!" % self.tool_name)
class GrinderMirrorsManager(MirrorsManager):
MIRRORS_SOURCE = "https://sourceforge.net/settings/mirror_choices?projectname=grinder&filename=The%20Grinder" \
"%203/{version}/grinder-{version}-binary.zip&dialog=true"
DOWNLOAD_LINK = "https://downloads.sourceforge.net/project/grinder/The%20Grinder%203/{version}" \
"/grinder-{version}-binary.zip?r=&ts=" + str(int(time.time())) + "&use_mirror=autoselect"
def __init__(self, http_client, parent_logger, grinder_version):
self.grinder_version = grinder_version
base_link = self.MIRRORS_SOURCE.format(version=self.grinder_version)
super(GrinderMirrorsManager, self).__init__(http_client, base_link, parent_logger)
def _parse_mirrors(self):
links = []
if self.page_source is not None:
self.log.debug('Parsing mirrors...')
base_link = "http://sourceforge.net/projects/grinder/files/The%20Grinder%203/{version}/grinder-{version}" \
"-binary.zip/download?use_mirror={mirror}"
li_search_pattern = re.compile(r'<li id=".*?">')
li_elements = li_search_pattern.findall(self.page_source)
if li_elements:
links = [base_link.format(version=self.grinder_version, mirror=link.strip('<li id="').strip('">')) for
link in li_elements]
default_link = self.DOWNLOAD_LINK.format(version=self.grinder_version)
if default_link not in links:
links.append(default_link)
self.log.debug('Total mirrors: %d', len(links))
return links
class GrinderScriptBuilder(PythonGenerator):
IMPORTS = """
from net.grinder.script import Test
from net.grinder.script.Grinder import grinder
from net.grinder.plugin.http import HTTPRequest, HTTPPluginControl, HTTPUtilities
from HTTPClient import NVPair
"""
def __init__(self, scenario, parent_logger):
super(GrinderScriptBuilder, self).__init__(scenario, parent_logger)
self.label = "BZT Requests"
def build_source_code(self):
self.log.debug("Generating Python script for Grinder")
self.root.append(self.gen_comment("This script was generated by Taurus", indent=0))
self.root.append(self.add_imports())
self.root.append(self.gen_new_line())
default_address = self.scenario.get("default-address")
url_arg = "url=%r" % default_address if default_address else ""
self.root.append(self.gen_statement('request = HTTPRequest(%s)' % url_arg, indent=0))
self.root.append(self.gen_statement('test = Test(1, "%s")' % self.label, indent=0))
self.root.append(self.gen_statement('test.record(request)', indent=0))
self.root.append(self.gen_new_line())
self.root.append(self.gen_statement("defaults = HTTPPluginControl.getConnectionDefaults()", indent=0))
self.root.append(self.gen_statement("utilities = HTTPPluginControl.getHTTPUtilities()", indent=0))
headers = self.scenario.get_headers()
if not self.scenario.get("keepalive", True):
headers['Connection'] = 'close'
if headers:
self.root.append(self.gen_statement("defaults.setDefaultHeaders([", indent=0))
for header, value in iteritems(headers):
self.root.append(self.gen_statement("NVPair(%r, %r)," % (header, value), indent=4))
self.root.append(self.gen_statement("])", indent=0))
global_timeout = dehumanize_time(self.scenario.get("timeout", None))
if global_timeout:
self.root.append(self.gen_statement("defaults.setTimeout(%s)" % int(global_timeout * 1000), indent=0))
cookie_flag = int(self.scenario.get("store-cookie", True))
self.root.append(self.gen_statement("defaults.setUseCookies(%s)" % cookie_flag, indent=0))
self.root.append(self.gen_new_line())
self.root.append(self.gen_runner_class())
@staticmethod
def __list_to_nvpair_list(items):
return "[" + ",".join("NVPair(%r, %r)" % (header, value) for header, value in items) + "]"
def gen_runner_class(self):
runner_classdef = self.gen_class_definition("TestRunner", ["object"])
sleep_method = self.gen_method_definition("rampUpSleeper", ["self"])
sleep_method.append(self.gen_statement("if grinder.runNumber != 0: return"))
sleep_method.append(self.gen_statement("tprops = grinder.properties.getPropertySubset('taurus.')"))
sleep_method.append(self.gen_statement("inc = tprops.getDouble('ramp_up', 0)/tprops.getInt('concurrency', 1)"))
sleep_method.append(self.gen_statement("sleep_time = int(1000 * grinder.threadNumber * inc)"))
sleep_method.append(self.gen_statement("grinder.sleep(sleep_time, 0)"))
sleep_method.append(self.gen_statement("if sleep_time: grinder.logger.info('slept for %sms' % sleep_time)"))
sleep_method.append(self.gen_statement("else: grinder.logger.info('No sleep needed')"))
sleep_method.append(self.gen_new_line())
runner_classdef.append(sleep_method)
main_method = self.gen_method_definition("__call__", ["self"])
main_method.append(self.gen_statement("self.rampUpSleeper()"))
for req in self.scenario.get_requests():
if not isinstance(req, HTTPRequest):
msg = "Grinder script generator doesn't support '%s' blocks, skipping"
self.log.warning(msg, req.NAME)
continue
method = req.method.upper()
url = req.url
local_headers = req.headers
params = "[]"
headers = self.__list_to_nvpair_list(iteritems(local_headers))
main_method.append(self.gen_statement("request.%s(%r, %s, %s)" % (method, url, params, headers)))
think_time = dehumanize_time(req.priority_option('think-time'))
if think_time:
main_method.append(self.gen_statement("grinder.sleep(%s)" % int(think_time * 1000)))
runner_classdef.append(main_method)
return runner_classdef
| 1.578125 | 2 |
test/Fortran/fixture/myfortran_flags.py | moroten/scons | 1,403 | 3697 | <reponame>moroten/scons<gh_stars>1000+
import getopt
import sys
comment = ('#' + sys.argv[1]).encode()
opts, args = getopt.getopt(sys.argv[2:], 'cf:o:xy')
optstring = ''
length = len(comment)
for opt, arg in opts:
if opt == '-o': out = arg
elif opt not in ('-f', '-K'): optstring = optstring + ' ' + opt
infile = open(args[0], 'rb')
outfile = open(out, 'wb')
outfile.write((optstring + "\n").encode())
for l in infile.readlines():
if l[:length] != comment:
outfile.write(l)
sys.exit(0)
| 2.5 | 2 |
zen_knit/organizer/__init__.py | Zen-Reportz/zen_knit | 30 | 3698 | <gh_stars>10-100
import io
import os
import base64
from pathlib import Path
from nbconvert import filters
from pygments.formatters.latex import LatexFormatter
from zen_knit import formattor
from zen_knit.data_types import ChunkOption, ExecutedData, OrganizedChunk, OrganizedData
from zen_knit.formattor.html_formatter import HTMLFormatter
mime_extensions = {"image/png" : "png",
"image/jpg" : "jpg"}
class BaseOrganizer:
def __init__(self, executed_data: ExecutedData):
self.format_started = False
self.collected_string = ""
self.fig_folder = None
self.executed_data = executed_data
self.formatted_doc = []
self.organized_data = OrganizedData(
global_options = self.executed_data.global_options,
chunks = []
)
self._create_output_folder_name()
self._create_fig_folder()
self._organize_doc()
self._create_output_file_name()
def _create_output_file_name(self):
global_options = self.organized_data.global_options
global_options.output.file_name = global_options.input.file_name.split(".")[0] + "."+ global_options.output.format
def _create_output_folder_name(self):
global_options = self.organized_data.global_options
if global_options.output.dir is None:
global_options.output.dir = global_options.input.dir
def _create_fig_folder(self):
output_folder = self.organized_data.global_options.output.dir
Path(output_folder).mkdir(parents=True, exist_ok=True)
fig_folder = os.path.join(output_folder, self.organized_data.global_options.output.fig_dir)
self.fig_folder = fig_folder
Path(fig_folder).mkdir(parents=True, exist_ok=True)
def _parse_raw(self, data, output_type):
if data.get("code_text_raw") is not None:
if self._clean_up(data['code_text_raw']) is not None:
if output_type in ("code"):
t = {"type": "code", "str_data": data['code_text_raw'] }
elif output_type in ("sql"):
t = {"type": "sql", "str_data": data['code_text_raw'] }
else:
t = {"type": "markdown", "str_data": data['code_text_raw'] }
self.organized_data.chunks.append(OrganizedChunk(**t))
return True
else:
return False
def _coder_string(self, data):
list_ = ["stream", "error"]
if data["output_type"] is None:
return False
if data["output_type"] in list_:
if data["output_type"] == "stream":
if self._clean_up(data['text']) is not None:
t = {"type": "se_data", "str_data": data['text'] }
self.organized_data.chunks.append(OrganizedChunk(**t))
if data["output_type"] == "error":
t = {"type": "se_data", "str_data": data["evalue"] + filters.strip_ansi("".join(data["traceback"])) }
self.organized_data.chunks.append(OrganizedChunk(**t))
return True
return False
def _raw_string(self, data):
if data["output_type"] is None:
return False
if data["output_type"] == "execute_result":
if data.get("data") is not None:
if 'matplotlib' in data["data"]["text/plain"]:
# Doing nothing here
return True
else:
if ((data["data"]["text/plain"][0] == "'") or (data["data"]["text/plain"][0] == '"')):
temp = data["data"]["text/plain"][1:-1]
else:
temp = data["data"]["text/plain"]
if "<table" in temp:
t = {"type": "html_data", "str_data":temp.encode().decode() }
self.organized_data.chunks.append(OrganizedChunk(**t))
return True
# if "BokehJS" in temp:
# t = {"type": "html_data", "str_data": "<script type='text/javascript'>" + temp.encode().decode() + "</script>" }
# self.organized_data.chunks.append(OrganizedChunk(**t))
# return True
if self._clean_up(temp) is not None:
t = {"type": "e_data", "str_data":temp }
self.organized_data.chunks.append(OrganizedChunk(**t))
return True
return True
return False
def _raw_plots(self, data, chunk_option:ChunkOption):
if data["output_type"] is None:
return False
if data["output_type"] == "display_data":
plot_infos = self._save_plots(data, chunk_option)
t = {"type": "plot", "complex_data":{"plots": plot_infos, "options": chunk_option }}
self.organized_data.chunks.append(OrganizedChunk(**t))
return True
return False
def _save_plots(self, data, chunk_option:ChunkOption):
figs = []
i = 1
for m in mime_extensions:
if m in data["data"]:
fig_full_path, fig_relative_path = self._build_file(mime_extensions[m], i, chunk_option.fig_caption, chunk_option.name)
figs.append(fig_relative_path)
bfig = base64.b64decode(data["data"][m])
with open(fig_full_path, "wb") as f:
f.write(bfig)
i += 1
return figs
def _build_file(self, extension, index, fig_caption= None, name =None):
fig_name = ""
if fig_caption is not None:
fig_name = fig_name + "_" + fig_caption
if name is not None:
fig_name = fig_name + "_" + name
fig_name = fig_name + "_" + str(index)
fig_name = fig_name + "." + extension
return os.path.join(self.fig_folder, fig_name), os.path.join(self.fig_folder, fig_name)
def _interactive_plots(self, data):
if data["output_type"] is None:
return False
if data["output_type"] == "display_data":
if "text/html" in data["data"]:
print(self.executed_data.global_options.output.format)
if self.executed_data.global_options.output.format != "html":
raise Exception("output format is not HTML")
else:
t = {"type": "html_data", "str_data":data["data"]["text/html"].encode().decode() }
self.organized_data.chunks.append(OrganizedChunk(**t))
return True
return False
def _organize_doc(self):
for index, chunk in enumerate(self.executed_data.chunks):
chunk_option = chunk.chunk.options
if chunk_option.name:
print(f"organizing {chunk_option.name}")
else:
print(f"organizing index {index}")
results = chunk.results
for result in results:
data = result.data
present = self._parse_raw(data, result.output_type)
if present:
continue
present = self._coder_string(data)
if present:
continue
present = self._raw_string(data)
if present:
continue
present = self._interactive_plots(data)
if present:
continue
present = self._raw_plots(data, chunk_option)
if present:
continue
print("not supported format", data)
t = []
c: OrganizedChunk
for c in self.organized_data.chunks:
last_chank: OrganizedChunk
if len(t)> 0:
last_chank = t[-1]
else:
last_chank = None
if last_chank is None:
t.append(c)
else:
if (c.type == last_chank.type) & (c.type != "plot"):
last_chank.str_data = last_chank.str_data + "\n" + c.str_data
else:
t.append(c)
self.organized_data.chunks = t
@staticmethod
def _clean_up(doc):
d = doc.replace(" ", "").replace("\n", "")
if len(d) != 0:
return doc
else:
return None
# markdown_file = self.executed_data.global_options.input_file_name.split(".")[0] + ".md"
# markdown_file = os.path.join(self.executed_data.global_options.output_file_dir , markdown_file)
# with open(markdown_file, "w") as f:
# text = "\n".join(self.formatted_doc)
# f.write(text)
| 2.109375 | 2 |
qibullet/robot_virtual.py | mcaniot/qibullet | 0 | 3699 | <filename>qibullet/robot_virtual.py
#!/usr/bin/env python
# coding: utf-8
import sys
import pybullet
from qibullet.camera import *
from qibullet.link import Link
from qibullet.joint import Joint
IS_VERSION_PYTHON_3 = sys.version_info[0] >= 3
class RobotVirtual:
"""
Mother class representing a virtual robot
"""
def __init__(self, description_file):
"""
Constructor
Parameters:
description_file - The file giving the description of the virtual
robot. For now, only URDF is handled
"""
self.description_file = description_file
self.physics_client = 0
self.active_camera = None
self.camera_dict = dict()
self.joint_dict = dict()
self.link_dict = dict()
def loadRobot(self, translation, quaternion, physicsClientId=0):
"""
Loads the robot into a simulation, loads the joints and the links
descriptions. The joints are set to 0 rad.
Parameters:
translation - List containing 3 elements, the translation [x, y, z]
of the robot in the WORLD frame
quaternion - List containing 4 elements, the quaternion
[x, y, z, q] of the robot in the WORLD frame
physicsClientId - The id of the simulated instance in which the
robot is supposed to be loaded
Returns:
boolean - True if the method ran correctly, False otherwise
"""
try:
self.physics_client = physicsClientId
self.robot_model = pybullet.loadURDF(
self.description_file,
translation,
quaternion,
useFixedBase=False,
globalScaling=1.0,
physicsClientId=self.physics_client,
flags=pybullet.URDF_USE_SELF_COLLISION |
pybullet.URDF_USE_MATERIAL_COLORS_FROM_MTL)
except pybullet.error as e:
raise pybullet.error("Cannot load robot model: " + str(e))
for i in range(pybullet.getNumJoints(
self.robot_model,
physicsClientId=self.physics_client)):
if IS_VERSION_PYTHON_3:
# PYTHON 3 version needs a conversion bytes to str
joint_info = pybullet.getJointInfo(
self.robot_model,
i,
physicsClientId=self.physics_client)
self.link_dict[joint_info[12].decode('utf-8')] =\
Link(joint_info)
if joint_info[2] == pybullet.JOINT_PRISMATIC or\
joint_info[2] == pybullet.JOINT_REVOLUTE:
self.joint_dict[joint_info[1].decode('utf-8')] =\
Joint(joint_info)
else:
# PYTHON 2 Version
joint_info = pybullet.getJointInfo(
self.robot_model,
i,
physicsClientId=self.physics_client)
self.link_dict[joint_info[12]] = Link(joint_info)
if joint_info[2] == pybullet.JOINT_PRISMATIC or\
joint_info[2] == pybullet.JOINT_REVOLUTE:
self.joint_dict[joint_info[1]] = Joint(joint_info)
def getRobotModel(self):
"""
Returns the pybullet model to which the module is associated.
Returns:
robot_model - The pybullet model of the robot
"""
return self.robot_model
def getPhysicsClientId(self):
"""
Returns the id of the simulated instance in which the module is loaded.
Returns:
physics_client - The id of the simulation in which the robot
(possessing the module) is spawned
"""
return self.physics_client
def setAngles(self, joint_names, joint_values, percentage_speeds):
"""
Set angles on the robot's joints. Tests have to be performed by the
child class to guarantee the validity of the input parameters.
Parameters:
joint_names - List of string containing the name of the joints
to be controlled
joint_values - List of values corresponding to the angles in
radians to be applied
percentage_speeds - Percentages of the max speed to be used for
each joint, has to be strictly superior to 0 and inferior or equal
to 1
"""
try:
assert len(joint_names) ==\
len(joint_values) ==\
len(percentage_speeds)
assert all(
speed >= 0.0 and speed <= 1.0 for speed in percentage_speeds)
except AssertionError:
raise pybullet.error("Error in the setAngles parameters")
for joint_name, joint_value, percentage_speed in zip(
joint_names,
joint_values,
percentage_speeds):
joint_speed =\
self.joint_dict[joint_name].getMaxVelocity() *\
percentage_speed
pybullet.setJointMotorControl2(
self.robot_model,
self.joint_dict[joint_name].getIndex(),
pybullet.POSITION_CONTROL,
targetPosition=joint_value,
maxVelocity=joint_speed,
force=self.joint_dict[joint_name].getMaxEffort(),
physicsClientId=self.physics_client)
def getAnglesPosition(self, joint_names):
"""
Gets the position of the robot's joints in radians. If one of the joint
doesn't exist, the method will raise a KeyError.
Parameters:
joint_names - List of string containing the names of the joints
Returns:
joint_positions - List of floats containing the joint's positions
"""
joint_positions = list()
for joint_name in joint_names:
joint_positions.append(pybullet.getJointState(
self.robot_model,
self.joint_dict[joint_name].getIndex(),
physicsClientId=self.physics_client)[0])
return joint_positions
def getAnglesVelocity(self, joint_names):
"""
Gets the velocity of the robot's joints in rad/s. If one of the joint
doesn't exist, the method will raise a KeyError.
Parameters:
joint_names - List of string containing the names of the joints
Returns:
joint_velocities - List of floats containing the joint's velocities
"""
joint_velocities = list()
for joint_name in joint_names:
joint_velocities.append(pybullet.getJointState(
self.robot_model,
self.joint_dict[joint_name].getIndex(),
physicsClientId=self.physics_client)[1])
return joint_velocities
def subscribeCamera(self, camera_id, resolution=Camera.K_QVGA):
"""
Subscribe to the camera holding the camera id. WARNING: at the moment,
only one camera can be subscribed.
Parameters:
camera_id - The id of the camera to be subscribed
resolution - CameraResolution object, the resolution of the camera
"""
try:
self.active_camera = self.camera_dict[camera_id]
self.active_camera.subscribe(resolution=resolution)
except KeyError:
print("This camera does not exist, use a valid camera id")
def unsubscribeCamera(self, camera_id):
"""
Unsubscribe from a camera, the one holding the camera id.
Parameters:
camera_id - The id of the camera to be unsubscribed
"""
try:
# If no active camera is found, nothing is unsubscribed
assert self.active_camera is not None
if self.active_camera.getCameraId() == camera_id:
self.active_camera.unsubscribe()
self.active_camera = None
except KeyError:
print("This camera does not exist, use a valid camera id")
except AssertionError:
pass
def getCameraFrame(self):
"""
Returns a camera frame. Be advised that the subscribeCamera method
needs to be called beforehand, otherwise a pybullet error will be
raised.
Returns:
frame - The current camera frame as a formatted numpy array,
directly exploitable from OpenCV
"""
try:
assert self.active_camera is not None
return self.active_camera.getFrame()
except AssertionError:
raise pybullet.error("No active camera, cannot retrieve any frame")
def getCameraResolution(self):
"""
Returns the resolution of the active camera. Be advised that the
subscribeCamera method needs to be called beforehand, otherwise a
pybullet error will be raised.
Returns:
resolution - a CameraResolution object describing the resolution of
the active camera
"""
try:
assert self.active_camera is not None
return self.active_camera.getResolution()
except KeyError:
raise pybullet.error("No active camera, resolution unavailable")
def getCameraLink(self):
"""
Returns the link of the active camera. Be advised that the
subscribeCamera method needs to be called beforehand, otherwise a
pybullet error will be raised.
Returns:
resolution - a Link object describing the link to which the active
camera is attached
"""
try:
assert self.active_camera is not None
return self.active_camera.getCameraLink()
except KeyError:
raise pybullet.error("No active camera, cannot retrieve any link")
def getActiveCamera(self):
"""
Returns the active camera of the robot.
Returns:
active_camera - Camera (CameraRgb or CameraDepth) object, the
active camera of the robot. If there is no active camera, a None is
returned
"""
return self.active_camera
def getPosition(self):
"""
Gets the position of the robot's base in the world frame.
Returns:
x - The position of the robot's base on the x axis, in meters
y - The positions of the robot's base on the y axis in meters
theta - The rotation of the robot's base on the z axis in meters
"""
position, quaternions = pybullet.getBasePositionAndOrientation(
self.robot_model,
physicsClientId=self.physics_client)
theta = pybullet.getEulerFromQuaternion(quaternions)[2]
return position[0], position[1], theta
def isSelfColliding(self, link_names):
"""
Specifies if a link is colliding with the rest of the virtual robot.
Parameters:
link_names - String or list of string containing the names of the
links to be checked for self collision. WARNING: only the links
with corresponding meshes should be used, otherwise the link cannot
self collide
Returns:
self_colliding - Boolean, if True at least one of the links is self
colliding
"""
try:
if type(link_names) is str:
assert link_names in self.link_dict.keys()
names = [link_names]
else:
assert set(link_names).issubset(self.link_dict.keys())
names = list(link_names)
for name in names:
contact_tuple = pybullet.getContactPoints(
bodyA=self.robot_model,
bodyB=self.robot_model,
linkIndexA=self.link_dict[name].getIndex(),
physicsClientId=self.physics_client)
contact_tuple += pybullet.getContactPoints(
bodyA=self.robot_model,
bodyB=self.robot_model,
linkIndexB=self.link_dict[name].getIndex(),
physicsClientId=self.physics_client)
if len(contact_tuple) != 0:
return True
return False
except AssertionError:
raise pybullet.error(
"Unauthorized link checking for self collisions")
| 2.578125 | 3 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.