content
stringlengths 5
1.05M
|
---|
"""
Xarray API for xhistogram.
"""
import xarray as xr
import numpy as np
from collections import OrderedDict
from .core import histogram as _histogram
def histogram(*args, bins=None, dim=None, weights=None, density=False,
block_size='auto', bin_dim_suffix='_bin',
bin_edge_suffix='_bin_edge'):
"""Histogram applied along specified dimensions.
Parameters
----------
args : xarray.DataArray objects
Input data. The number of input arguments determines the dimensonality of
the histogram. For example, two arguments prodocue a 2D histogram. All
args must be aligned and have the same dimensions.
bins : int or array_like or a list of ints or arrays, optional
If a list, there should be one entry for each item in ``args``.
The bin specification:
* If int, the number of bins for all arguments in ``args``.
* If array_like, the bin edges for all arguments in ``args``.
* If a list of ints, the number of bins for every argument in ``args``.
* If a list arrays, the bin edges for each argument in ``args``
(required format for Dask inputs).
* A combination [int, array] or [array, int], where int
is the number of bins and array is the bin edges.
A ``TypeError`` will be raised if ``args`` contains dask arrays and
``bins`` are not specified explicitly as a list of arrays.
dim : tuple of strings, optional
Dimensions over which which the histogram is computed. The default is to
compute the histogram of the flattened array.
weights : array_like, optional
An array of weights, of the same shape as `a`. Each value in
`a` only contributes its associated weight towards the bin count
(instead of 1). If `density` is True, the weights are
normalized, so that the integral of the density over the range
remains 1.
density : bool, optional
If ``False``, the result will contain the number of samples in
each bin. If ``True``, the result is the value of the
probability *density* function at the bin, normalized such that
the *integral* over the range is 1. Note that the sum of the
histogram values will not be equal to 1 unless bins of unity
width are chosen; it is not a probability *mass* function.
block_size : int or 'auto', optional
A parameter which governs the algorithm used to compute the histogram.
Using a nonzero value splits the histogram calculation over the
non-histogram axes into blocks of size ``block_size``, iterating over
them with a loop (numpy inputs) or in parallel (dask inputs). If
``'auto'``, blocks will be determined either by the underlying dask
chunks (dask inputs) or an experimental built-in heuristic (numpy inputs).
Returns
-------
hist : array
The values of the histogram.
"""
N_args = len(args)
# TODO: allow list of weights as well
N_weights = 1 if weights is not None else 0
# some sanity checks
# TODO: replace this with a more robust function
assert len(bins)==N_args
for bin in bins:
assert isinstance(bin, np.ndarray), 'all bins must be numpy arrays'
for a in args:
# TODO: make this a more robust check
assert a.name is not None, 'all arrays must have a name'
# we drop coords to simplify alignment
args = [da.reset_coords(drop=True) for da in args]
if N_weights:
args += [weights.reset_coords(drop=True)]
# explicitly broadcast so we understand what is going into apply_ufunc
# (apply_ufunc might be doing this by itself again)
args = list(xr.align(*args, join='exact'))
# what happens if we skip this?
#args = list(xr.broadcast(*args))
a0 = args[0]
a_dims = a0.dims
# roll our own broadcasting
# now manually expand the arrays
all_dims = [d for a in args for d in a.dims]
all_dims_ordered = list(OrderedDict.fromkeys(all_dims))
args_expanded = []
for a in args:
expand_keys = [d for d in all_dims_ordered if d not in a.dims]
a_expanded = a.expand_dims({k: 1 for k in expand_keys})
args_expanded.append(a_expanded)
# only transpose if necessary, to avoid creating unnecessary dask tasks
args_transposed = []
for a in args_expanded:
if a.dims != all_dims_ordered:
args_transposed.append(a.transpose(*all_dims_ordered))
else:
args.transposed.append(a)
args_data = [a.data for a in args_transposed]
if N_weights:
weights_data = args_data.pop()
else:
weights_data = None
if dim is not None:
dims_to_keep = [d for d in all_dims_ordered if d not in dim]
axis = [args_transposed[0].get_axis_num(d) for d in dim]
else:
dims_to_keep = []
axis = None
h_data = _histogram(*args_data, weights=weights_data, bins=bins, axis=axis,
block_size=block_size)
# create output dims
new_dims = [a.name + bin_dim_suffix for a in args[:N_args]]
output_dims = dims_to_keep + new_dims
# create new coords
bin_centers = [0.5*(bin[:-1] + bin[1:]) for bin in bins]
new_coords = {name: ((name,), bin_center, a.attrs)
for name, bin_center, a in zip(new_dims, bin_centers, args)}
old_coords = {name: a0[name]
for name in dims_to_keep if name in a0.coords}
all_coords = {}
all_coords.update(old_coords)
all_coords.update(new_coords)
# CF conventions tell us how to specify cell boundaries
# http://cfconventions.org/Data/cf-conventions/cf-conventions-1.7/cf-conventions.html#cell-boundaries
# However, they require introduction of an additional dimension.
# I don't like that.
edge_dims = [a.name + bin_edge_suffix for a in args[:N_args]]
edge_coords = {name: ((name,), bin_edge, a.attrs)
for name, bin_edge, a in zip(edge_dims, bins, args)}
output_name = '_'.join(['histogram'] + [a.name for a in args[:N_args]])
da_out = xr.DataArray(h_data, dims=output_dims, coords=all_coords,
name=output_name)
return da_out
# we need weights to be passed through apply_func's alignment algorithm,
# so we include it as an arg, so we create a wrapper function to do so
# this feels like a hack
# def _histogram_wrapped(*args, **kwargs):
# alist = list(args)
# weights = [alist.pop() for n in range(N_weights)]
# if N_weights == 0:
# weights = None
# elif N_weights == 1:
# weights = weights[0] # squeeze
# return _histogram(*alist, weights=weights, **kwargs)
|
from .skill import Trello
create_skill = Trello
|
########
# Copyright (c) 2015 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
import copy
import json
import logging
import os
import sys
import threading
import traceback
from cloudify_rest_client.executions import Execution
from cloudify_rest_client.exceptions import InvalidExecutionUpdateStatus
from cloudify import logs
from cloudify import exceptions
from cloudify import state
from cloudify import context
from cloudify import utils
from cloudify import constants
from cloudify._compat import queue, StringIO
from cloudify.manager import update_execution_status, get_rest_client
from cloudify.constants import LOGGING_CONFIG_FILE
from cloudify.error_handling import serialize_known_exception
try:
from cloudify.workflows import api
from cloudify.workflows import workflow_context
except ImportError:
workflow_context = None
api = None
DISPATCH_LOGGER_FORMATTER = logging.Formatter(
'%(asctime)s [%(name)s] %(levelname)s: %(message)s')
class TaskHandler(object):
NOTSET = object()
def __init__(self, cloudify_context, args, kwargs, process_registry=None):
self.cloudify_context = cloudify_context
self.args = args
self.kwargs = kwargs
self._ctx = None
self._func = self.NOTSET
self._logfiles = {}
self._process_registry = process_registry
def handle(self):
raise NotImplementedError('Implemented by subclasses')
def setup_logging(self):
logs.setup_subprocess_logger()
self._update_logging_level()
@staticmethod
def _update_logging_level():
if not os.path.isfile(LOGGING_CONFIG_FILE):
return
with open(LOGGING_CONFIG_FILE, 'r') as config_file:
config_lines = config_file.readlines()
for line in config_lines:
if not line.strip() or line.startswith('#'):
continue
level_name, logger_name = line.split()
level_id = logging.getLevelName(level_name.upper())
if not isinstance(level_id, int):
continue
logging.getLogger(logger_name).setLevel(level_id)
@property
def ctx_cls(self):
raise NotImplementedError('implemented by subclasses')
@property
def ctx(self):
if not self._ctx:
self._ctx = self.ctx_cls(self.cloudify_context)
return self._ctx
@property
def func(self):
if self._func is self.NOTSET:
try:
self._func = self.get_func()
except Exception:
self._func = None
return self._func
def get_func(self):
task_name = self.cloudify_context['task_name']
return utils.get_func(task_name)
class OperationHandler(TaskHandler):
@property
def ctx_cls(self):
return context.CloudifyContext
def _validate_operation_func(self):
if not self.func:
# if there is a problem importing/getting the operation function,
# this will raise and bubble up
self.get_func()
def _validate_operation_resumable(self):
if self.ctx.resume and not getattr(self._func, 'resumable', False):
raise exceptions.NonRecoverableError(
'Cannot resume - operation not resumable: {0}'
.format(self._func))
def handle(self):
self._validate_operation_func()
ctx = self.ctx
kwargs = self.kwargs
if not ctx.task_target:
# task is local (not through AMQP) so we need to clone kwargs
kwargs = copy.deepcopy(kwargs)
with state.current_ctx.push(ctx, kwargs):
if self.cloudify_context.get('has_intrinsic_functions'):
kwargs = ctx._endpoint.evaluate_functions(payload=kwargs)
if not self.cloudify_context.get('no_ctx_kwarg'):
kwargs['ctx'] = ctx
with state.current_ctx.push(ctx, kwargs):
self._validate_operation_resumable()
result = self._run_operation_func(ctx, kwargs)
if ctx.operation._operation_retry:
raise ctx.operation._operation_retry
return result
def _run_operation_func(self, ctx, kwargs):
try:
return self.func(*self.args, **kwargs)
finally:
if ctx.type == constants.NODE_INSTANCE:
ctx.instance.update()
elif ctx.type == constants.RELATIONSHIP_INSTANCE:
ctx.source.instance.update()
ctx.target.instance.update()
class WorkflowHandler(TaskHandler):
def __init__(self, *args, **kwargs):
if workflow_context is None or api is None:
raise RuntimeError('Dispatcher not installed')
super(WorkflowHandler, self).__init__(*args, **kwargs)
self.execution_parameters = copy.deepcopy(self.kwargs)
@property
def ctx_cls(self):
if getattr(self.func, 'workflow_system_wide', False):
return workflow_context.CloudifySystemWideWorkflowContext
return workflow_context.CloudifyWorkflowContext
def handle(self):
self.kwargs['ctx'] = self.ctx
with state.current_workflow_ctx.push(self.ctx, self.kwargs):
self._validate_workflow_func()
if self.ctx.local or self.ctx.dry_run:
return self._handle_local_workflow()
return self._handle_remote_workflow()
def _validate_workflow_func(self):
try:
if not self.func:
self.get_func()
if self.ctx.resume and not getattr(self._func, 'resumable', False):
raise exceptions.NonRecoverableError(
'Cannot resume - workflow not resumable: {0}'
.format(self._func))
except Exception as e:
self._workflow_failed(e, traceback.format_exc())
raise
@property
def update_execution_status(self):
return self.cloudify_context.get('update_execution_status', True)
def _handle_remote_workflow(self):
tenant = self.ctx._context['tenant'].get('original_name',
self.ctx.tenant_name)
rest = get_rest_client(tenant=tenant)
execution = rest.executions.get(self.ctx.execution_id,
_include=['status'])
if execution.status == Execution.STARTED:
self.ctx.resume = True
try:
try:
self._workflow_started()
except InvalidExecutionUpdateStatus:
self._workflow_cancelled()
return api.EXECUTION_CANCELLED_RESULT
result_queue = queue.Queue()
t = threading.Thread(target=self._remote_workflow_child_thread,
args=(result_queue,),
name='Workflow-Child')
t.daemon = True
t.start()
# while the child thread is executing the workflow, the parent
# thread is polling for 'cancel' requests while also waiting for
# messages from the child thread
result = None
while True:
# check if child thread sent a message
try:
data = result_queue.get(timeout=5)
if 'result' in data:
# child thread has terminated
result = data['result']
break
else:
# error occurred in child thread
raise data['error']
except queue.Empty:
pass
# A very hacky way to solve an edge case when trying to poll
# for the execution status while the DB is downgraded during
# a snapshot restore
if self.cloudify_context['workflow_id'] == 'restore_snapshot':
continue
# check for 'cancel' requests
execution = rest.executions.get(self.ctx.execution_id,
_include=['status'])
if execution.status in [
Execution.CANCELLING,
Execution.FORCE_CANCELLING,
Execution.KILL_CANCELLING]:
# send a 'cancel' message to the child thread. It is up to
# the workflow implementation to check for this message
# and act accordingly (by stopping and raising an
# api.ExecutionCancelled error, or by returning the
# deprecated api.EXECUTION_CANCELLED_RESULT as result).
# parent thread then goes back to polling for messages from
# child thread or possibly 'force-cancelling' requests
api.set_cancel_request()
if execution.status == Execution.KILL_CANCELLING:
# if a custom workflow function must attempt some cleanup,
# it might attempt to catch SIGTERM, and confirm using this
# flag that it is being kill-cancelled
api.set_kill_request()
if execution.status in [
Execution.FORCE_CANCELLING,
Execution.KILL_CANCELLING]:
# force-cancel additionally stops this loop immediately
result = api.EXECUTION_CANCELLED_RESULT
break
if result == api.EXECUTION_CANCELLED_RESULT:
self._workflow_cancelled()
else:
self._workflow_succeeded()
return result
except exceptions.WorkflowFailed as e:
self._workflow_failed(e)
raise
except BaseException as e:
self._workflow_failed(e, traceback.format_exc())
raise
def _remote_workflow_child_thread(self, queue):
# the actual execution of the workflow will run in another thread.
# this method is the entry point for that thread, and takes care of
# forwarding the result or error back to the parent thread
with state.current_workflow_ctx.push(self.ctx, self.kwargs):
try:
workflow_result = self._execute_workflow_function()
queue.put({'result': workflow_result})
except api.ExecutionCancelled:
queue.put({'result': api.EXECUTION_CANCELLED_RESULT})
except BaseException as workflow_ex:
queue.put({'error': workflow_ex})
def _handle_local_workflow(self):
try:
self._workflow_started()
result = self._execute_workflow_function()
self._workflow_succeeded()
return result
except Exception as e:
error = StringIO()
traceback.print_exc(file=error)
self._workflow_failed(e, error.getvalue())
raise
def _execute_workflow_function(self):
try:
self.ctx.internal.start_local_tasks_processing()
result = self.func(*self.args, **self.kwargs)
if not self.ctx.internal.graph_mode:
for workflow_task in self.ctx.internal.task_graph.tasks:
workflow_task.async_result.get()
return result
finally:
self.ctx.internal.stop_local_tasks_processing()
def _workflow_started(self):
self._update_execution_status(Execution.STARTED)
dry_run = ' (dry run)' if self.ctx.dry_run else ''
start_resume = 'Resuming' if self.ctx.resume else 'Starting'
self.ctx.internal.send_workflow_event(
event_type='workflow_started',
message="{0} '{1}' workflow execution{2}".format(
start_resume, self.ctx.workflow_id, dry_run),
)
def _workflow_succeeded(self):
self.ctx.cleanup(finished=True)
dry_run = ' (dry run)' if self.ctx.dry_run else ''
self.ctx.internal.send_workflow_event(
event_type='workflow_succeeded',
message="'{0}' workflow execution succeeded{1}".format(
self.ctx.workflow_id, dry_run),
)
self._update_execution_status(Execution.TERMINATED)
def _workflow_failed(self, exception, error_traceback=None):
self.ctx.cleanup(finished=True)
try:
self.ctx.internal.send_workflow_event(
event_type='workflow_failed',
message="'{0}' workflow execution failed: {1}".format(
self.ctx.workflow_id, exception),
args={'error': error_traceback},
)
self._update_execution_status(Execution.FAILED, error_traceback)
except Exception:
logger = logging.getLogger(__name__)
logger.exception('Exception raised when attempting to update '
'execution state')
raise exception
def _workflow_cancelled(self):
self.ctx.cleanup(finished=False)
self.ctx.internal.send_workflow_event(
event_type='workflow_cancelled',
message="'{0}' workflow execution cancelled".format(
self.ctx.workflow_id),
)
self._update_execution_status(Execution.CANCELLED)
def _update_execution_status(self, status, error=None):
if self.ctx.local or not self.update_execution_status:
return
return update_execution_status(self.ctx.execution_id, status, error)
TASK_HANDLERS = {
'operation': OperationHandler,
'hook': OperationHandler,
'workflow': WorkflowHandler
}
def dispatch(__cloudify_context, *args, **kwargs):
dispatch_type = __cloudify_context['type']
dispatch_handler_cls = TASK_HANDLERS.get(dispatch_type)
if not dispatch_handler_cls:
raise exceptions.NonRecoverableError('No handler for task type: {0}'
.format(dispatch_type))
handler = dispatch_handler_cls(cloudify_context=__cloudify_context,
args=args,
kwargs=kwargs)
return handler.handle()
def main():
dispatch_dir = sys.argv[1]
with open(os.path.join(dispatch_dir, 'input.json')) as f:
dispatch_inputs = json.load(f)
cloudify_context = dispatch_inputs['cloudify_context']
args = dispatch_inputs['args']
kwargs = dispatch_inputs['kwargs']
dispatch_type = cloudify_context['type']
threading.current_thread().setName('Dispatch-{0}'.format(dispatch_type))
handler_cls = TASK_HANDLERS[dispatch_type]
handler = None
try:
handler = handler_cls(cloudify_context=cloudify_context,
args=args,
kwargs=kwargs)
handler.setup_logging()
payload = handler.handle()
payload_type = 'result'
except BaseException as e:
payload_type = 'error'
payload = serialize_known_exception(e)
logger = logging.getLogger(__name__)
logger.error('Task {0}[{1}] raised:\n{2}'.format(
handler.cloudify_context['task_name'],
handler.cloudify_context.get('task_id', '<no-id>'),
payload.get('traceback')))
with open(os.path.join(dispatch_dir, 'output.json'), 'w') as f:
json.dump({
'type': payload_type,
'payload': payload
}, f)
if __name__ == '__main__':
main()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Feb 8 11:13:41 2021
@author: dpetrovykh
"""
from FIAR import FIAR, RepeatMove, OutOfBounds
from IPython.display import display
#Create a new game
game = FIAR(size=13)
game.draw_board()
#Start making moves
game.move(0,0)
game.move(1,1)
game.move(2,2)
game.move(-1,-1)
# print(game.df)
# d_to_edgeprint(f"(1,-1) is taken: {game.loc_taken((1,-1))}")
game.move(1,-1)
game.move(-1,1)
game.move(0,2)
game.move(0,1)
game.move(2,1)
game.move(-3,1)
game.move(-2,1)
game.move(-2,0)
game.undo()
game.move(-2,2)
game.move(5,3)
#Check that move verifier works
try:
assert game.loc_taken([-2,2]) #Should pass
assert not game.loc_taken([9,9]) #Should fail
except:
raise Exception('You fucked up')
#Check that Repeat moves are forbidden
try:
game.move(-2,1)
print('FAIL: Repeat move not allowed')
except RepeatMove:
print("PASS: Repeat move not allowed")
#Check that out-of-bounds moves are not allowed
try:
game.move(15,15)
print('FAIL: Out-of-bounds move not allowed')
except OutOfBounds:
print('PASS: Out-of-bounds move not allowed.')
#Check that non-int moves are not allowed
try:
game.move(6.3,5)
print('FAIL: non-int value checking')
except ValueError:
print('PASS: non-int value checking')
# Check that the distance measurements work properly
try:
for edge, dist in [['left',3],
['right',2],
['bottom',5],
['top',3]]:
calc_dist = game.d_to_edge(edge)
#print(f"edge: {edge}, dist: {dist}, calc_dist: {calc_dist}")
assert calc_dist==dist
print('PASS: d_to_edge()')
except:
print('FAIL: d_to_edge()')
## Continue game to completion
game.move(-1,0)
game.move(4,0)
game.move(-1,-2)
game.move(3,0)
game.move(-1,-3) |
# encoding=utf8
from niapy.algorithms.basic import BareBonesFireworksAlgorithm, FireworksAlgorithm, EnhancedFireworksAlgorithm, \
DynamicFireworksAlgorithm, DynamicFireworksAlgorithmGauss
from niapy.tests.test_algorithm import AlgorithmTestCase, MyBenchmark
class BBFWATestCase(AlgorithmTestCase):
def setUp(self):
AlgorithmTestCase.setUp(self)
self.algo = BareBonesFireworksAlgorithm
def test_custom(self):
bbfwa_custom = self.algo(num_sparks=10, amplification_coefficient=2, reduction_coefficient=0.5, seed=self.seed)
bbfwa_customc = self.algo(num_sparks=10, amplification_coefficient=2, reduction_coefficient=0.5, seed=self.seed)
AlgorithmTestCase.test_algorithm_run(self, bbfwa_custom, bbfwa_customc, MyBenchmark())
def test_griewank(self):
bbfwa_griewank = self.algo(num_sparks=10, amplification_coefficient=5, reduction_coefficient=0.5, seed=self.seed)
bbfwa_griewankc = self.algo(num_sparks=10, amplification_coefficient=5, reduction_coefficient=0.5, seed=self.seed)
AlgorithmTestCase.test_algorithm_run(self, bbfwa_griewank, bbfwa_griewankc)
class FWATestCase(AlgorithmTestCase):
def setUp(self):
AlgorithmTestCase.setUp(self)
self.algo = FireworksAlgorithm
def test_custom(self):
fwa_custom = self.algo(seed=self.seed)
fwa_customc = self.algo(seed=self.seed)
AlgorithmTestCase.test_algorithm_run(self, fwa_custom, fwa_customc, MyBenchmark())
def test_griewank(self):
fwa_griewank = self.algo(seed=self.seed)
fwa_griewankc = self.algo(seed=self.seed)
AlgorithmTestCase.test_algorithm_run(self, fwa_griewank, fwa_griewankc)
class EFWATestCase(AlgorithmTestCase):
def setUp(self):
AlgorithmTestCase.setUp(self)
self.algo = EnhancedFireworksAlgorithm
def test_custom(self):
fwa_custom = self.algo(seed=self.seed)
fwa_customc = self.algo(seed=self.seed)
AlgorithmTestCase.test_algorithm_run(self, fwa_custom, fwa_customc, MyBenchmark(), max_evals=12345, max_iters=17)
def test_griewank(self):
fwa_griewank = self.algo(seed=self.seed)
fwa_griewankc = self.algo(seed=self.seed)
AlgorithmTestCase.test_algorithm_run(self, fwa_griewank, fwa_griewankc)
class DFWATestCase(AlgorithmTestCase):
def setUp(self):
AlgorithmTestCase.setUp(self)
self.algo = DynamicFireworksAlgorithm
def test_custom(self):
fwa_custom = self.algo(population_size=10, C_a=2, C_r=0.5, seed=self.seed)
fwa_customc = self.algo(population_size=10, C_a=2, C_r=0.5, seed=self.seed)
AlgorithmTestCase.test_algorithm_run(self, fwa_custom, fwa_customc, MyBenchmark())
def test_griewank(self):
fwa_griewank = self.algo(population_size=10, C_a=5, C_r=0.5, seed=self.seed)
fwa_griewankc = self.algo(population_size=10, C_a=5, C_r=0.5, seed=self.seed)
AlgorithmTestCase.test_algorithm_run(self, fwa_griewank, fwa_griewankc)
class DFWAGTestCase(AlgorithmTestCase):
def setUp(self):
AlgorithmTestCase.setUp(self)
self.algo = DynamicFireworksAlgorithmGauss
def test_custom(self):
fwa_custom = self.algo(population_size=10, C_a=2, C_r=0.5, seed=self.seed)
fwa_customc = self.algo(population_size=10, C_a=2, C_r=0.5, seed=self.seed)
AlgorithmTestCase.test_algorithm_run(self, fwa_custom, fwa_customc, MyBenchmark())
def test_griewank(self):
fwa_griewank = self.algo(population_size=10, C_a=5, C_r=0.5, seed=self.seed)
fwa_griewankc = self.algo(population_size=10, C_a=5, C_r=0.5, seed=self.seed)
AlgorithmTestCase.test_algorithm_run(self, fwa_griewank, fwa_griewankc)
# vim: tabstop=3 noexpandtab shiftwidth=3 softtabstop=3
|
# coding=utf-8
# --------------------------------------------------------------------------
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ComponentsschemasmicrosoftGraphTeamstaballof1(Model):
"""teamsTab.
:param name:
:type name: str
:param display_name:
:type display_name: str
:param teams_app_id:
:type teams_app_id: str
:param sort_order_index:
:type sort_order_index: str
:param message_id:
:type message_id: str
:param web_url:
:type web_url: str
:param configuration:
:type configuration: ~users.models.MicrosoftgraphteamsTabConfiguration
:param teams_app:
:type teams_app: ~users.models.MicrosoftgraphteamsApp
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'teams_app_id': {'key': 'teamsAppId', 'type': 'str'},
'sort_order_index': {'key': 'sortOrderIndex', 'type': 'str'},
'message_id': {'key': 'messageId', 'type': 'str'},
'web_url': {'key': 'webUrl', 'type': 'str'},
'configuration': {'key': 'configuration', 'type': 'MicrosoftgraphteamsTabConfiguration'},
'teams_app': {'key': 'teamsApp', 'type': 'MicrosoftgraphteamsApp'},
}
def __init__(self, name=None, display_name=None, teams_app_id=None, sort_order_index=None, message_id=None, web_url=None, configuration=None, teams_app=None):
super(ComponentsschemasmicrosoftGraphTeamstaballof1, self).__init__()
self.name = name
self.display_name = display_name
self.teams_app_id = teams_app_id
self.sort_order_index = sort_order_index
self.message_id = message_id
self.web_url = web_url
self.configuration = configuration
self.teams_app = teams_app
|
#!/usr/bin/env python3
""" Feed parser """
from asyncio import get_event_loop, set_event_loop_policy
from config import (DATABASE_NAME, MONGO_SERVER, REDIS_NAMESPACE, get_profile,
log)
from datetime import datetime
from hashlib import sha1
from time import mktime
from traceback import format_exc
from bs4 import BeautifulSoup
from common import connect_redis, dequeue, enqueue, safe_id, publish
from feedparser import parse as feed_parse
from gensim import corpora, models
from langdetect import detect
from langkit import extract_keywords, tokenize
from motor.motor_asyncio import AsyncIOMotorClient
from uvloop import EventLoopPolicy
def get_entry_content(entry):
"""Select the best content from an entry"""
candidates = entry.get('content', [])
if 'summary_detail' in entry:
candidates.append(entry.summary_detail)
for candidate in candidates:
if hasattr(candidate, 'type'): # speedparser doesn't set this
if 'html' in candidate.type:
return candidate.value
if candidates:
try:
return candidates[0].value
except AttributeError: # speedparser does this differently
return candidates[0]['value']
return ''
def get_entry_date(entry):
"""Select the best timestamp for an entry"""
for header in ['modified', 'issued', 'created']:
when = entry.get(header+'_parsed', None)
if when:
return datetime.fromtimestamp(mktime(when))
return datetime.now()
def get_entry_id(entry):
"""Get a useful id from a feed entry"""
if 'id' in entry and entry.id:
if isinstance(entry.id, dict):
return entry.id.values()[0]
return entry.id
content = get_entry_content(entry)
if content:
return sha1(content.encode('utf-8')).hexdigest()
if 'link' in entry:
return entry.link
if 'title' in entry:
return sha1(entry.title.encode('utf-8')).hexdigest()
def get_plaintext(html):
"""Scrub out tags and extract plaintext"""
soup = BeautifulSoup(html)
for script in soup(["script", "style"]):
script.extract()
return soup.get_text()
def lda(tokens):
# Perform Latent Dirchelet Allocation
dictionary = corpora.Dictionary(tokens)
corpus = [dictionary.doc2bow(token) for token in tokens]
lda_model = gensim.models.ldamodel.LdaModel(corpus, num_topics=3, id2word=dictionary, passes=20)
return lda_model
async def parse(database, feed, redis):
"""Parse a feed into its constituent entries"""
result = feed_parse(feed['raw'])
if not len(result.entries):
log.info('%s: No valid entries', feed['_id'])
return
else:
log.info('%s: %d entries', feed['_id'], len(result.entries))
# TODO: turn this into a bulk insert
for entry in result.entries:
log.debug(entry.link)
when = get_entry_date(entry)
body = get_entry_content(entry)
plaintext = entry.title + " " + get_plaintext(body)
lang = detect(plaintext)
try:
keywords = extract_keywords(plaintext, lang, scores=True)[:10]
tokens = list(set(tokenize(plaintext, lang)))
except (KeyError, TypeError):
keywords = None
tokens = None
await publish(redis, 'ui', {'event':'new_entry', 'url':entry.link})
await database.entries.update_one({'_id': safe_id(entry.link)},
{'$set': {"date": when,
"title": entry.title,
"body": body,
"plaintext": plaintext,
"lang": lang,
"keywords": keywords,
"tokens": tokens,
"url": entry.link}},
upsert=True)
async def item_handler(database):
"""Break down feeds into individual items"""
redis = await connect_redis()
log.info("Beginning run.")
while True:
try:
job = await(dequeue(redis, 'parser'))
log.debug(job)
feed = await database.feeds.find_one({'_id': job['_id']})
if feed:
await parse(database, feed, redis)
except Exception:
log.error(format_exc())
except KeyboardInterrupt:
break
await redis.hset(REDIS_NAMESPACE + 'status', 'item_count', await database.items.count())
redis.close()
await redis.wait_closed()
def main():
"""Main loop"""
set_event_loop_policy(EventLoopPolicy())
conn = AsyncIOMotorClient(MONGO_SERVER)
database = conn[DATABASE_NAME]
loop = get_event_loop()
try:
loop.run_until_complete(item_handler(database))
finally:
loop.close()
if __name__ == '__main__':
main()
|
# Generated by Django 3.2 on 2021-05-15 20:11
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('stats', '0011_country_flag_url_shiny'),
]
operations = [
migrations.AddField(
model_name='country',
name='flag_url_big_shiny',
field=models.URLField(null=True),
),
]
|
import cgi
import codecs
from io import BytesIO
import logging
import types
from django.http import HttpRequest, parse_cookie, QueryDict
from django.utils.functional import cached_property
from django.urls import get_resolver
from django.utils.log import log_response
from .exception import response_for_exception
logger = logging.getLogger('django.request')
class ASGIRequest(HttpRequest):
def __init__(self, scope):
self.scope = scope
self.method = scope['method']
self.query_string = self.scope.get('query_string', '')
self._content_length = 0
self._post_parse_error = False
self._read_started = False
self._stream = BytesIO()
self.path = self.scope['path']
if not self.path.endswith('/'):
self.path += '/'
script_name = self.scope.get('root_path', '')
if script_name and self.path.startswith(script_name):
self.path_info = self.path[len(script_name):]
else:
self.path_info = self.path
self.META = {
'REQUEST_METHOD': self.scope['method'],
'QUERY_STRING': self.query_string,
'SCRIPT_NAME': script_name,
'PATH_INFO': self.path_info,
}
client = self.scope.get('client', None)
if client is not None:
remote_addr, remote_port = client
self.META['REMOTE_ADDR'] = remote_addr
self.META['REMOTE_HOST'] = remote_addr
self.META['REMOTE_PORT'] = remote_port
server = self.scope.get('server', None)
if server is not None:
server_name, server_port = server
else:
server_name, server_port = 'unknown', '0'
self.META['SERVER_NAME'] = server_name
self.META['SERVER_PORT'] = server_port
for k, v in self.scope.get('headers', []):
name, value = k.decode('ascii'), v.decode('ascii')
if name == 'content-length':
corrected_name = 'CONTENT_LENGTH'
elif name == 'content-type':
corrected_name = 'CONTENT_TYPE'
else:
corrected_name = 'HTTP_%s' % name.upper().replace('-', '_')
if corrected_name in self.META:
value = self.META[corrected_name] + ',' + value
self.META[corrected_name] = value
if 'CONTENT_TYPE' in self.META:
self.content_type, self.content_params = cgi.parse_header(
self.META['CONTENT_TYPE'])
if 'charset' in self.content_params:
try:
codecs.lookup(self.content_params['charset'])
except LookupError:
pass
else:
self.encoding = self.content_params['charset']
else:
self.content_type, self.content_params = '', {}
# Pull out content length info
if self.META.get('CONTENT_LENGTH', None):
try:
self._content_length = int(self.META['CONTENT_LENGTH'])
except (ValueError, TypeError):
pass
self.resolver_match = None
@cached_property
def GET(self):
return QueryDict(self.query_string)
def _get_post(self):
if not hasattr(self, '_post'):
self._read_started = False
self._load_post_and_files()
return self._post
def _set_post(self, post):
self._post = post
def _get_files(self):
if not hasattr(self, '_files'):
self._read_started = False
self._load_post_and_files()
return self._files
POST = property(_get_post, _set_post)
FILES = property(_get_files)
@cached_property
def COOKIES(self):
return parse_cookie(self.META.get('HTTP_COOKIE', ''))
# async def stream(self):
# if hasattr(self, "_body"):
# yield self._body
# return
# async def body(self):
# if not hasattr(self, "_body"):
# body = b""
# async for chunk in self.stream():
# body += chunk
# self._body = body
# return self._body
# @property
# def body(self):
# if not hasattr(self, '_body'):
# if self._read_started:
# raise RawPostDataException("You cannot access body after reading from request's data stream")
# # Limit the maximum request data size that will be handled in-memory.
# if (settings.DATA_UPLOAD_MAX_MEMORY_SIZE is not None and
# int(self.META.get('CONTENT_LENGTH') or 0) > settings.DATA_UPLOAD_MAX_MEMORY_SIZE):
# raise RequestDataTooBig('Request body exceeded settings.DATA_UPLOAD_MAX_MEMORY_SIZE.')
# try:
# self._body = self.read()
# except IOError as e:
# raise UnreadablePostError(*e.args) from e
# self._stream = BytesIO(self._body)
# return self._body
class ASGIHandler:
def __call__(self, scope):
return ASGIHandlerInstance(scope)
class ASGIHandlerInstance:
def __init__(self, scope):
if scope['type'] != 'http':
raise ValueError(
'The ASGIHandlerInstance can only handle HTTP connections, not %s' % scope['type'])
self.scope = scope
async def __call__(self, receive, send):
self.send = send
request = ASGIRequest(self.scope)
# request.body()
response = await self.get_response(request)
await self.send({
'type': 'http.response.start',
'status': response.status_code,
'headers': response.headers
})
await self.send({
'type': 'http.response.body',
'body': response.content,
'more_body': False
})
# def make_view_atomic(self, view):
# non_atomic_requests = getattr(view, '_non_atomic_requests', set())
# for db in connections.all():
# if db.settings_dict['ATOMIC_REQUESTS'] and db.alias not in non_atomic_requests:
# view = transaction.atomic(using=db.alias)(view)
# return view
async def get_response(self, request):
try:
response = await self._get_response(request)
except Exception as exc:
response = response_for_exception(request, exc)
if not getattr(response, 'is_rendered', True) and callable(getattr(response, 'render', None)):
response = response.render()
if response.status_code >= 400:
log_response(
'%s: %s', response.reason_phrase, request.path,
response=response,
request=request,
)
return response
async def _get_response(self, request):
response = None
if hasattr(request, 'urlconf'):
urlconf = request.urlconf
# set_urlconf(urlconf)
resolver = get_resolver(urlconf)
else:
resolver = get_resolver()
resolver_match = resolver.resolve(request.path_info)
callback, callback_args, callback_kwargs = resolver_match
request.resolver_match = resolver_match
if response is None:
# wrapped_callback = self.make_view_atomic(callback)
try:
response = await callback(request, *callback_args, **callback_kwargs)
except Exception as e:
return response_for_exception(request, e)
# Complain if the view returned None (a common error).
if response is None:
if isinstance(callback, types.FunctionType): # FBV
view_name = callback.__name__
else: # CBV
view_name = callback.__class__.__name__ + '.__call__'
raise ValueError(
"The view %s.%s didn't return an HttpResponse object. It "
"returned None instead." % (callback.__module__, view_name)
)
return response
|
# Generated by Django 2.1.7 on 2019-03-12 12:03
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('main', '0010_auto_20190312_1152'),
]
operations = [
migrations.AlterField(
model_name='event',
name='registration_starts',
field=models.DateField(default=datetime.datetime(2019, 3, 12, 12, 3, 40, 70563, tzinfo=utc)),
),
]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import boto3
import json
import time
from ecs_crd.defaultJSONEncoder import DefaultJSONEncoder
from ecs_crd.createStackStep import CreateStackStep
from ecs_crd.scaleUpServiceStep import ScaleUpServiceStep
from ecs_crd.sendNotificationBySnsStep import SendNotificationBySnsStep
class CreateGreenStackStep(CreateStackStep):
def __init__(self, infos, logger):
"""initializes a new instance of the class"""
super().__init__(
infos,
'Create Green Cloudformation Stack',
logger,
infos.green_infos)
self.timer = 5
def _on_success(self):
return ScaleUpServiceStep(self.infos, self.logger)
def _on_fail(self):
return SendNotificationBySnsStep(self.infos, self.logger)
|
"""The class for managing basic environment
The class requires the follow properties:
'id' (str): the suffix name of resource created
'ec2_params' (dict): the dictionary of the EC2 custom parameters
'lambda_params' (dict): the dictionary of the Lambda custom parameters
All properties are mandatory. See the unit tests for an example.
# license MIT
# support https://github.com/bilardi/aws-simple-pipeline/issues
"""
from aws_cdk import (core, aws_iam as iam,
aws_ec2 as ec2,
aws_lambda as lambda_)
class Basic(core.Stack):
ec2_name = None
lambda_name = None
def __init__(self, scope: core.Construct, id: str, ec2_params: dict, lambda_params: dict, **kwargs) -> None:
"""
deploys all AWS resources for basic environment
Resources:
AWS::EC2::Instance with your details
AWS::Lambda::Function with your policies
"""
super().__init__(scope, id, **kwargs)
# ec2
self.ec2_name = '{}-{}'.format(ec2_params['instance_name'], id)
ec2_instance = self.get_instance(ec2_params)
# lambda
self.lambda_name = lambda_params['name_prefix'] + id
lambda_params['lambda_role'] = self.get_role(lambda_params)
lambda_function = self.get_lambda(lambda_params)
def get_vpc(self, ec2_params):
return ec2.Vpc.from_lookup(self, "vpc", vpc_id=ec2_params['vpc_id'])
def get_security_group(self, ec2_params):
return ec2.SecurityGroup.from_security_group_id(self, "SecurityGroup",
security_group_id=ec2_params['security_group_id'],
mutable=False
)
def add_tags(self, name, instance, tags):
core.Tags.of(instance).add('Name', name)
for tag in tags:
core.Tags.of(instance).add(tag, tags[tag])
def get_instance(self, ec2_params):
if 'vpc' not in ec2_params:
ec2_params['vpc'] = self.get_vpc(ec2_params)
if 'security_group' not in ec2_params:
ec2_params['security_group'] = self.get_security_group(ec2_params)
ec2_instance = ec2.Instance(self, self.ec2_name,
machine_image=ec2.MachineImage.generic_linux(
ami_map={ec2_params['region']:ec2_params['ami_id']}
),
vpc=ec2_params['vpc'],
security_group=ec2_params['security_group'],
instance_type=ec2.InstanceType(ec2_params['instance_type']),
key_name=ec2_params['key_name']
)
ec2_instance.user_data.add_commands(ec2_params['user_data'])
self.add_tags(self.ec2_name, ec2_instance, ec2_params['tags'])
return ec2_instance
def get_role(self, lambda_params):
lambda_policy = iam.PolicyStatement()
for action in lambda_params['actions']:
lambda_policy.add_actions(action)
lambda_policy.add_all_resources()
lambda_role = iam.Role(self, 'LambdaRole', assumed_by=iam.ServicePrincipal('lambda.amazonaws.com'))
lambda_role.add_to_policy(lambda_policy)
return lambda_role
def get_lambda(self, lambda_params):
lambda_function = lambda_.Function(self, self.lambda_name,
handler=lambda_params['handler'],
role=lambda_params['lambda_role'],
memory_size=lambda_params['memory_size'],
runtime=lambda_params['runtime'],
timeout=core.Duration.seconds(lambda_params['timeout']),
code=lambda_.AssetCode(lambda_params['path'])
)
self.add_tags(self.lambda_name, lambda_function, lambda_params['tags'])
return lambda_function |
import os, sys
import winshell
shortcuts = {}
user_programs = winshell.programs()
for dirpath, dirnames, filenames in os.walk(user_programs):
relpath = dirpath[1 + len(user_programs):]
shortcuts.setdefault(
relpath, []
).extend(
[winshell.shortcut(os.path.join(dirpath, f)) for f in filenames]
)
all_programs = winshell.programs(common=1)
for dirpath, dirnames, filenames in os.walk(all_programs):
relpath = dirpath[1 + len(all_programs):]
shortcuts.setdefault(
relpath, []
).extend(
[winshell.shortcut(os.path.join(dirpath, f)) for f in filenames]
)
for relpath, lnks in sorted(shortcuts.items()):
level = relpath.count("\\")
if level == 0:
print("")
print("%s+ %s" % (" " * level, relpath))
for lnk in lnks:
name, _ = os.path.splitext(os.path.basename(lnk.lnk_filepath))
print("%s* %s -> %s" % (" " * (level + 1), name, lnk.path))
|
# coding: utf-8
import re
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class PublicipInstanceResp:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'id': 'str',
'project_id': 'str',
'ip_version': 'int',
'public_ip_address': 'str',
'public_ipv6_address': 'str',
'status': 'str',
'description': 'str',
'public_border_group': 'str',
'created_at': 'datetime',
'updated_at': 'datetime',
'type': 'str',
'vnic': 'VnicInfo',
'bandwidth': 'PublicipBandwidthInfo',
'enterprise_project_id': 'str',
'billing_info': 'str',
'lock_status': 'str',
'associate_instance_type': 'str',
'associate_instance_id': 'str',
'publicip_pool_id': 'str',
'publicip_pool_name': 'str',
'alias': 'str'
}
attribute_map = {
'id': 'id',
'project_id': 'project_id',
'ip_version': 'ip_version',
'public_ip_address': 'public_ip_address',
'public_ipv6_address': 'public_ipv6_address',
'status': 'status',
'description': 'description',
'public_border_group': 'public_border_group',
'created_at': 'created_at',
'updated_at': 'updated_at',
'type': 'type',
'vnic': 'vnic',
'bandwidth': 'bandwidth',
'enterprise_project_id': 'enterprise_project_id',
'billing_info': 'billing_info',
'lock_status': 'lock_status',
'associate_instance_type': 'associate_instance_type',
'associate_instance_id': 'associate_instance_id',
'publicip_pool_id': 'publicip_pool_id',
'publicip_pool_name': 'publicip_pool_name',
'alias': 'alias'
}
def __init__(self, id=None, project_id=None, ip_version=None, public_ip_address=None, public_ipv6_address=None, status=None, description=None, public_border_group=None, created_at=None, updated_at=None, type=None, vnic=None, bandwidth=None, enterprise_project_id=None, billing_info=None, lock_status=None, associate_instance_type=None, associate_instance_id=None, publicip_pool_id=None, publicip_pool_name=None, alias=None):
"""PublicipInstanceResp - a model defined in huaweicloud sdk"""
self._id = None
self._project_id = None
self._ip_version = None
self._public_ip_address = None
self._public_ipv6_address = None
self._status = None
self._description = None
self._public_border_group = None
self._created_at = None
self._updated_at = None
self._type = None
self._vnic = None
self._bandwidth = None
self._enterprise_project_id = None
self._billing_info = None
self._lock_status = None
self._associate_instance_type = None
self._associate_instance_id = None
self._publicip_pool_id = None
self._publicip_pool_name = None
self._alias = None
self.discriminator = None
if id is not None:
self.id = id
if project_id is not None:
self.project_id = project_id
if ip_version is not None:
self.ip_version = ip_version
if public_ip_address is not None:
self.public_ip_address = public_ip_address
if public_ipv6_address is not None:
self.public_ipv6_address = public_ipv6_address
if status is not None:
self.status = status
if description is not None:
self.description = description
if public_border_group is not None:
self.public_border_group = public_border_group
if created_at is not None:
self.created_at = created_at
if updated_at is not None:
self.updated_at = updated_at
if type is not None:
self.type = type
if vnic is not None:
self.vnic = vnic
if bandwidth is not None:
self.bandwidth = bandwidth
if enterprise_project_id is not None:
self.enterprise_project_id = enterprise_project_id
if billing_info is not None:
self.billing_info = billing_info
if lock_status is not None:
self.lock_status = lock_status
if associate_instance_type is not None:
self.associate_instance_type = associate_instance_type
if associate_instance_id is not None:
self.associate_instance_id = associate_instance_id
if publicip_pool_id is not None:
self.publicip_pool_id = publicip_pool_id
if publicip_pool_name is not None:
self.publicip_pool_name = publicip_pool_name
if alias is not None:
self.alias = alias
@property
def id(self):
"""Gets the id of this PublicipInstanceResp.
功能说明:弹性公网IP唯一标识
:return: The id of this PublicipInstanceResp.
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this PublicipInstanceResp.
功能说明:弹性公网IP唯一标识
:param id: The id of this PublicipInstanceResp.
:type: str
"""
self._id = id
@property
def project_id(self):
"""Gets the project_id of this PublicipInstanceResp.
功能说明:项目ID
:return: The project_id of this PublicipInstanceResp.
:rtype: str
"""
return self._project_id
@project_id.setter
def project_id(self, project_id):
"""Sets the project_id of this PublicipInstanceResp.
功能说明:项目ID
:param project_id: The project_id of this PublicipInstanceResp.
:type: str
"""
self._project_id = project_id
@property
def ip_version(self):
"""Gets the ip_version of this PublicipInstanceResp.
功能说明:IP版本信息 取值范围:4表示公网IP地址为public_ip_address地址;6表示公网IP地址为public_ipv6_address地址\"
:return: The ip_version of this PublicipInstanceResp.
:rtype: int
"""
return self._ip_version
@ip_version.setter
def ip_version(self, ip_version):
"""Sets the ip_version of this PublicipInstanceResp.
功能说明:IP版本信息 取值范围:4表示公网IP地址为public_ip_address地址;6表示公网IP地址为public_ipv6_address地址\"
:param ip_version: The ip_version of this PublicipInstanceResp.
:type: int
"""
self._ip_version = ip_version
@property
def public_ip_address(self):
"""Gets the public_ip_address of this PublicipInstanceResp.
功能说明:弹性公网IP或者IPv6端口的地址
:return: The public_ip_address of this PublicipInstanceResp.
:rtype: str
"""
return self._public_ip_address
@public_ip_address.setter
def public_ip_address(self, public_ip_address):
"""Sets the public_ip_address of this PublicipInstanceResp.
功能说明:弹性公网IP或者IPv6端口的地址
:param public_ip_address: The public_ip_address of this PublicipInstanceResp.
:type: str
"""
self._public_ip_address = public_ip_address
@property
def public_ipv6_address(self):
"""Gets the public_ipv6_address of this PublicipInstanceResp.
功能说明:IPv4时无此字段,IPv6时为申请到的弹性公网IP地址
:return: The public_ipv6_address of this PublicipInstanceResp.
:rtype: str
"""
return self._public_ipv6_address
@public_ipv6_address.setter
def public_ipv6_address(self, public_ipv6_address):
"""Sets the public_ipv6_address of this PublicipInstanceResp.
功能说明:IPv4时无此字段,IPv6时为申请到的弹性公网IP地址
:param public_ipv6_address: The public_ipv6_address of this PublicipInstanceResp.
:type: str
"""
self._public_ipv6_address = public_ipv6_address
@property
def status(self):
"""Gets the status of this PublicipInstanceResp.
功能说明:弹性公网IP的状态 取值范围:冻结FREEZED,绑定失败BIND_ERROR,绑定中BINDING,释放中PENDING_DELETE, 创建中PENDING_CREATE,创建中NOTIFYING,释放中NOTIFY_DELETE,更新中PENDING_UPDATE, 未绑定DOWN ,绑定ACTIVE,绑定ELB,绑定VPN,失败ERROR。
:return: The status of this PublicipInstanceResp.
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this PublicipInstanceResp.
功能说明:弹性公网IP的状态 取值范围:冻结FREEZED,绑定失败BIND_ERROR,绑定中BINDING,释放中PENDING_DELETE, 创建中PENDING_CREATE,创建中NOTIFYING,释放中NOTIFY_DELETE,更新中PENDING_UPDATE, 未绑定DOWN ,绑定ACTIVE,绑定ELB,绑定VPN,失败ERROR。
:param status: The status of this PublicipInstanceResp.
:type: str
"""
self._status = status
@property
def description(self):
"""Gets the description of this PublicipInstanceResp.
功能说明:弹性公网IP描述信息 约束:用户以自定义方式标识资源,系统不感知
:return: The description of this PublicipInstanceResp.
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this PublicipInstanceResp.
功能说明:弹性公网IP描述信息 约束:用户以自定义方式标识资源,系统不感知
:param description: The description of this PublicipInstanceResp.
:type: str
"""
self._description = description
@property
def public_border_group(self):
"""Gets the public_border_group of this PublicipInstanceResp.
功能说明:表示中心站点资源或者边缘站点资源 取值范围: center、边缘站点名称 约束:publicip只能绑定该字段相同的资源
:return: The public_border_group of this PublicipInstanceResp.
:rtype: str
"""
return self._public_border_group
@public_border_group.setter
def public_border_group(self, public_border_group):
"""Sets the public_border_group of this PublicipInstanceResp.
功能说明:表示中心站点资源或者边缘站点资源 取值范围: center、边缘站点名称 约束:publicip只能绑定该字段相同的资源
:param public_border_group: The public_border_group of this PublicipInstanceResp.
:type: str
"""
self._public_border_group = public_border_group
@property
def created_at(self):
"""Gets the created_at of this PublicipInstanceResp.
功能说明:资源创建UTC时间 格式:yyyy-MM-ddTHH:mm:ssZ
:return: The created_at of this PublicipInstanceResp.
:rtype: datetime
"""
return self._created_at
@created_at.setter
def created_at(self, created_at):
"""Sets the created_at of this PublicipInstanceResp.
功能说明:资源创建UTC时间 格式:yyyy-MM-ddTHH:mm:ssZ
:param created_at: The created_at of this PublicipInstanceResp.
:type: datetime
"""
self._created_at = created_at
@property
def updated_at(self):
"""Gets the updated_at of this PublicipInstanceResp.
功能说明:资源更新UTC时间 格式:yyyy-MM-ddTHH:mm:ssZ
:return: The updated_at of this PublicipInstanceResp.
:rtype: datetime
"""
return self._updated_at
@updated_at.setter
def updated_at(self, updated_at):
"""Sets the updated_at of this PublicipInstanceResp.
功能说明:资源更新UTC时间 格式:yyyy-MM-ddTHH:mm:ssZ
:param updated_at: The updated_at of this PublicipInstanceResp.
:type: datetime
"""
self._updated_at = updated_at
@property
def type(self):
"""Gets the type of this PublicipInstanceResp.
功能说明:弹性公网IP类型
:return: The type of this PublicipInstanceResp.
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this PublicipInstanceResp.
功能说明:弹性公网IP类型
:param type: The type of this PublicipInstanceResp.
:type: str
"""
self._type = type
@property
def vnic(self):
"""Gets the vnic of this PublicipInstanceResp.
:return: The vnic of this PublicipInstanceResp.
:rtype: VnicInfo
"""
return self._vnic
@vnic.setter
def vnic(self, vnic):
"""Sets the vnic of this PublicipInstanceResp.
:param vnic: The vnic of this PublicipInstanceResp.
:type: VnicInfo
"""
self._vnic = vnic
@property
def bandwidth(self):
"""Gets the bandwidth of this PublicipInstanceResp.
:return: The bandwidth of this PublicipInstanceResp.
:rtype: PublicipBandwidthInfo
"""
return self._bandwidth
@bandwidth.setter
def bandwidth(self, bandwidth):
"""Sets the bandwidth of this PublicipInstanceResp.
:param bandwidth: The bandwidth of this PublicipInstanceResp.
:type: PublicipBandwidthInfo
"""
self._bandwidth = bandwidth
@property
def enterprise_project_id(self):
"""Gets the enterprise_project_id of this PublicipInstanceResp.
功能说明:企业项目ID。最大长度36字节,带“-”连字符的UUID格式,或者是字符串“0”。创建弹性公网IP时,给弹性公网IP绑定企业项目ID。
:return: The enterprise_project_id of this PublicipInstanceResp.
:rtype: str
"""
return self._enterprise_project_id
@enterprise_project_id.setter
def enterprise_project_id(self, enterprise_project_id):
"""Sets the enterprise_project_id of this PublicipInstanceResp.
功能说明:企业项目ID。最大长度36字节,带“-”连字符的UUID格式,或者是字符串“0”。创建弹性公网IP时,给弹性公网IP绑定企业项目ID。
:param enterprise_project_id: The enterprise_project_id of this PublicipInstanceResp.
:type: str
"""
self._enterprise_project_id = enterprise_project_id
@property
def billing_info(self):
"""Gets the billing_info of this PublicipInstanceResp.
功能说明:公网IP的订单信息 约束:包周期才会有订单信息,按需资源此字段为空
:return: The billing_info of this PublicipInstanceResp.
:rtype: str
"""
return self._billing_info
@billing_info.setter
def billing_info(self, billing_info):
"""Sets the billing_info of this PublicipInstanceResp.
功能说明:公网IP的订单信息 约束:包周期才会有订单信息,按需资源此字段为空
:param billing_info: The billing_info of this PublicipInstanceResp.
:type: str
"""
self._billing_info = billing_info
@property
def lock_status(self):
"""Gets the lock_status of this PublicipInstanceResp.
功能说明:记录公网IP当前的冻结状态 约束:metadata类型,标识欠费冻结、公安冻结 取值范围:police,locked
:return: The lock_status of this PublicipInstanceResp.
:rtype: str
"""
return self._lock_status
@lock_status.setter
def lock_status(self, lock_status):
"""Sets the lock_status of this PublicipInstanceResp.
功能说明:记录公网IP当前的冻结状态 约束:metadata类型,标识欠费冻结、公安冻结 取值范围:police,locked
:param lock_status: The lock_status of this PublicipInstanceResp.
:type: str
"""
self._lock_status = lock_status
@property
def associate_instance_type(self):
"""Gets the associate_instance_type of this PublicipInstanceResp.
功能说明:公网IP绑定的实例类型 取值范围:PORT、NATGW、ELB、ELBV1、VPN、null
:return: The associate_instance_type of this PublicipInstanceResp.
:rtype: str
"""
return self._associate_instance_type
@associate_instance_type.setter
def associate_instance_type(self, associate_instance_type):
"""Sets the associate_instance_type of this PublicipInstanceResp.
功能说明:公网IP绑定的实例类型 取值范围:PORT、NATGW、ELB、ELBV1、VPN、null
:param associate_instance_type: The associate_instance_type of this PublicipInstanceResp.
:type: str
"""
self._associate_instance_type = associate_instance_type
@property
def associate_instance_id(self):
"""Gets the associate_instance_id of this PublicipInstanceResp.
功能说明:公网IP绑定的实例ID
:return: The associate_instance_id of this PublicipInstanceResp.
:rtype: str
"""
return self._associate_instance_id
@associate_instance_id.setter
def associate_instance_id(self, associate_instance_id):
"""Sets the associate_instance_id of this PublicipInstanceResp.
功能说明:公网IP绑定的实例ID
:param associate_instance_id: The associate_instance_id of this PublicipInstanceResp.
:type: str
"""
self._associate_instance_id = associate_instance_id
@property
def publicip_pool_id(self):
"""Gets the publicip_pool_id of this PublicipInstanceResp.
功能说明:公网IP所属网络的ID。publicip_pool_name对应的网络ID
:return: The publicip_pool_id of this PublicipInstanceResp.
:rtype: str
"""
return self._publicip_pool_id
@publicip_pool_id.setter
def publicip_pool_id(self, publicip_pool_id):
"""Sets the publicip_pool_id of this PublicipInstanceResp.
功能说明:公网IP所属网络的ID。publicip_pool_name对应的网络ID
:param publicip_pool_id: The publicip_pool_id of this PublicipInstanceResp.
:type: str
"""
self._publicip_pool_id = publicip_pool_id
@property
def publicip_pool_name(self):
"""Gets the publicip_pool_name of this PublicipInstanceResp.
功能说明:弹性公网IP的网络类型, 包括公共池类型,如5_bgp/5_sbgp...,和用户购买的专属池。 专属池见publcip_pool相关接口
:return: The publicip_pool_name of this PublicipInstanceResp.
:rtype: str
"""
return self._publicip_pool_name
@publicip_pool_name.setter
def publicip_pool_name(self, publicip_pool_name):
"""Sets the publicip_pool_name of this PublicipInstanceResp.
功能说明:弹性公网IP的网络类型, 包括公共池类型,如5_bgp/5_sbgp...,和用户购买的专属池。 专属池见publcip_pool相关接口
:param publicip_pool_name: The publicip_pool_name of this PublicipInstanceResp.
:type: str
"""
self._publicip_pool_name = publicip_pool_name
@property
def alias(self):
"""Gets the alias of this PublicipInstanceResp.
功能说明:弹性公网IP名称
:return: The alias of this PublicipInstanceResp.
:rtype: str
"""
return self._alias
@alias.setter
def alias(self, alias):
"""Sets the alias of this PublicipInstanceResp.
功能说明:弹性公网IP名称
:param alias: The alias of this PublicipInstanceResp.
:type: str
"""
self._alias = alias
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, PublicipInstanceResp):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
"""
Package name: imagee
Version: 1.1
Description: Tool for optimizing image
Github repo: https://github/dev-muhammad/imagee
Author: Muhammad (https://github/dev-muhammad)
"""
import os.path
import sys
from PIL import Image
from io import BytesIO
import base64
class Imagee():
"""
Main Imagee class
Attribute:
- image (PIL.Image): current image
- size (int): current image size
- format (str): current image format
- path (str): current image path
- optimized_size (int): optimized image size in bytes
- optimization_rate (float): optimization rate
- quality (int): quality of optimized image | default = 85
- optimized (io.BytesIO): optimized image in buffer
Methods:
- read(path: str) -> none: select/read image from path
- optimaze(quality: int) -> none: optimize selected image (0<quality<100)
- save(path: str) -> none: save optimized image to path
- getBase64() -> str: get optimized image in string with base64 decoded format
"""
image = None
size = None
format = None
path = None
optimized_size = None
optimization_rate = None
quality = 85
optimized = BytesIO()
SUPPORTED_FORMATS = {'jpg':"JPEG", 'jpeg':"JPEG", 'png':"PNG"} # other formats not tested
def __init__(self) -> None:
self.image = None
self.size = None
self.format = None
self.path = None
self.optimized_size = None
self.optimization_rate = None
self.quality = 85
self.optimized = BytesIO()
def read(self, path: str) -> None:
"""Read image from path"""
self.format = self._validateImage(path)
self.size = self._checkSize(path)
self.image = Image.open(path)
self.path = path
def optimaze(self, quality=85) -> None:
"""
Optimaze method for image optimization
"""
if self.image is None:
sys.exit('Any image not read for optimization. Use .read() method to select file, then optimize it!')
self.quality = quality
self.image.save(
self.optimized,
self.format,
optimize=True,
quality=self.quality)
self.optimized_size = self.optimized.getbuffer().nbytes # get image size after optimizing
self.optimization_rate = round((self.size-self.optimized_size)/float(self.size), 2)
def save(self, path: str) -> None:
"""Save image on path"""
if self.optimized_size is None:
sys.exit('Any image for saving. Use .read() method to select file, then optimize it!')
image = Image.open(BytesIO(self.optimized.getbuffer()))
image.save(path)
def getBase64(self) -> str:
"""Return image in base64 format in string"""
if self.optimized_size is None:
sys.exit('Any image not optimized. Use .read() method to select file, then optimize it!')
return "data:image/"+self.format+";base64," + base64.b64encode(self.optimized.getvalue()).decode("utf-8")
def _validateImage(self, path: str) -> str:
"""
Local method to validating image
- path: image path (str)
returns file extensions (str)
"""
if os.path.exists(path):
_, file_extension = os.path.splitext(path)
file_extension = file_extension[1:]
if file_extension in self.SUPPORTED_FORMATS.keys():
return self.SUPPORTED_FORMATS[file_extension]
else:
sys.exit(f'Format {file_extension} not supported! Use {self.SUPPORTED_FORMATS}')
else:
sys.exit(f'File not exist in {path}')
def _checkSize(self, path: str) -> int:
"""
Local method for checking file size
- path: image path (str)
returns file size (int)
"""
return os.stat(path).st_size
|
import FWCore.ParameterSet.Config as cms
## test for electronId
simpleEleId70cIso = cms.EDProducer(
"EleIdCutBasedExtProducer",
src = cms.InputTag("gedGsfElectrons"),
reducedBarrelRecHitCollection = cms.InputTag("reducedEcalRecHitsEB"),
reducedEndcapRecHitCollection = cms.InputTag("reducedEcalRecHitsEE"),
verticesCollection = cms.InputTag("offlineBeamSpot"),
dataMagneticFieldSetUp = cms.bool(False),
dcsTag = cms.InputTag("scalersRawToDigi"),
algorithm = cms.string('eIDCB'),
electronIDType = cms.string('robust'),
electronQuality = cms.string('70cIso'),
electronVersion = cms.string('V04'),
## 70% point modified with restricting cuts to physical values
robust70cIsoEleIDCutsV04 = cms.PSet(
barrel = cms.vdouble(2.5e-02, 1.0e-02, 3.0e-02, 4.0e-03, -1, -1, 9999., 9999., 9999., 9999., 9999., 9999.,
9999., 9999., 9999., 9999., 9999., 4.0e-02, 0.0, -9999., 9999., 9999., 0, -1, 0.02, 0.02, ),
endcap = cms.vdouble(2.5e-02, 3.0e-02, 2.0e-02, 5.0e-03, -1, -1, 9999., 9999., 9999., 9999., 9999., 9999.,
9999., 9999., 9999., 9999., 9999., 3.0e-02, 0.0, -9999., 9999., 9999., 0, -1, 0.02, 0.02, ),
),
)
|
from __future__ import absolute_import, division, print_function
import stripe
TEST_RESOURCE_ID = "po_123"
class TestPayout(object):
def test_is_listable(self, request_mock):
resources = stripe.Payout.list()
request_mock.assert_requested("get", "/v1/payouts")
assert isinstance(resources.data, list)
assert isinstance(resources.data[0], stripe.Payout)
def test_is_retrievable(self, request_mock):
resource = stripe.Payout.retrieve(TEST_RESOURCE_ID)
request_mock.assert_requested(
"get", "/v1/payouts/%s" % TEST_RESOURCE_ID
)
assert isinstance(resource, stripe.Payout)
def test_is_creatable(self, request_mock):
resource = stripe.Payout.create(amount=100, currency="usd")
request_mock.assert_requested("post", "/v1/payouts")
assert isinstance(resource, stripe.Payout)
def test_is_saveable(self, request_mock):
resource = stripe.Payout.retrieve(TEST_RESOURCE_ID)
resource.metadata["key"] = "value"
resource.save()
request_mock.assert_requested(
"post", "/v1/payouts/%s" % TEST_RESOURCE_ID
)
def test_is_modifiable(self, request_mock):
resource = stripe.Payout.modify(
TEST_RESOURCE_ID, metadata={"key": "value"}
)
request_mock.assert_requested(
"post", "/v1/payouts/%s" % TEST_RESOURCE_ID
)
assert isinstance(resource, stripe.Payout)
def test_can_cancel(self, request_mock):
payout = stripe.Payout.retrieve(TEST_RESOURCE_ID)
resource = payout.cancel()
request_mock.assert_requested(
"post", "/v1/payouts/%s/cancel" % TEST_RESOURCE_ID
)
assert isinstance(resource, stripe.Payout)
def test_can_cancel_classmethod(self, request_mock):
resource = stripe.Payout.cancel(TEST_RESOURCE_ID)
request_mock.assert_requested(
"post", "/v1/payouts/%s/cancel" % TEST_RESOURCE_ID
)
assert isinstance(resource, stripe.Payout)
|
import datetime
import json
from secrets import token_hex
from typing import List, Dict, Any
from flask_login import UserMixin
from markupsafe import Markup
from passlib.handlers.sha2_crypt import sha512_crypt
from passlib.hash import sha256_crypt
from peewee import CharField, BooleanField, ForeignKeyField, IntegerField, \
DateTimeField, \
Model, IntegrityError, BlobField
from pywebpush import WebPusher
from home.core.utils import random_string
from home.settings import GOOGLE_API_KEY, USE_LDAP, db, DEBUG, PUBLIC_GROUPS
grants = []
def db_init() -> None:
db.connect()
try:
db.create_tables([FIDOToken,
User,
Subscriber,
SecurityController,
SecurityEvent,
APIClient,
OAuthClient,
])
print('Creating tables...')
if DEBUG:
u = User.create(username='root', password="")
User.create(username='guest', password="")
u.set_password('root')
u.admin = True
u.save()
SecurityController.create()
except IntegrityError:
pass
db.close()
class BaseModel(Model):
class Meta:
database = db
def gen_token() -> str:
return token_hex(16)
class User(BaseModel, UserMixin):
username = CharField(unique=True)
authenticated = BooleanField(default=False)
password = CharField()
admin = BooleanField(default=False)
_groups = CharField(default='')
ldap = BooleanField(default=False)
token = CharField(default=gen_token)
def get_id(self) -> str:
return self.token
def check_password(self, password: str) -> bool:
if self.ldap and USE_LDAP:
from home.web.utils import ldap_auth
return ldap_auth(self.username, password)
try:
return sha512_crypt.verify(password, self.password)
except ValueError:
return sha256_crypt.verify(password, self.password)
def set_password(self, password: str) -> None:
self.password = sha512_crypt.encrypt(password)
@property
def groups(self) -> List[str]:
return self._groups.split(',')
def has_permission(self, obj: Any = None, group: str = "") -> bool:
if obj:
return obj.group in PUBLIC_GROUPS or obj.group in self.groups or self.admin
elif group:
return group in PUBLIC_GROUPS or group in self.groups or self.admin
def has_fido(self) -> bool:
return len(self.fido_tokens) > 0
def __repr__(self):
return self.username
class FIDOToken(BaseModel):
name = CharField()
added = DateTimeField(default=datetime.datetime.now)
user = ForeignKeyField(User, related_name='fido_tokens')
data = BlobField()
def to_dict(self) -> Dict:
return {
'id': self.id,
'name': Markup.escape(self.name),
'added': self.added.isoformat()
}
class APIClient(BaseModel):
name = CharField(unique=True)
token = CharField(default=random_string)
permissions = CharField(default='')
def has_permission(self, permission: str) -> bool:
return permission in self.permissions.split(',')
def add_permission(self, permission: str) -> None:
permission = permission.replace(' ', '')
if not self.has_permission(permission):
if self.permissions and not self.permissions[-1] == ',':
self.permissions += ','
self.permissions += permission + ','
self.save()
class Subscriber(BaseModel):
endpoint = CharField(unique=True)
auth = CharField()
p256dh = CharField()
user = ForeignKeyField(User, related_name='subscribers')
def to_dict(self) -> Dict[str, str]:
return {
'endpoint': self.endpoint,
'keys': {'auth': self.auth,
'p256dh': self.p256dh
}
}
def push(self, message: str, icon: str = '/static/favicon.ico') -> None:
WebPusher(self.to_dict()).send(
json.dumps({'body': message,
'icon': icon}),
gcm_key=GOOGLE_API_KEY)
class SecurityController(BaseModel):
state = CharField(default='disabled')
def arm(self) -> None:
self.state = 'armed'
self.save()
def occupied(self) -> None:
self.state = 'occupied'
self.save()
def alert(self) -> None:
self.state = 'alert'
self.save()
def disable(self) -> None:
self.state = 'disabled'
self.save()
def is_alert(self) -> bool:
return self.state == 'alert'
def is_armed(self) -> bool:
return self.state == 'armed'
class SecurityEvent(BaseModel):
controller = ForeignKeyField(SecurityController, related_name='events')
device = CharField()
in_progress = BooleanField(default=True)
datetime = DateTimeField(default=datetime.datetime.now)
duration = IntegerField(null=True)
# new = BooleanField(default=True)
class OAuthClient(BaseModel):
name = CharField()
user = ForeignKeyField(User, related_name='oauth_clients')
client_id = CharField(primary_key=True)
client_secret = CharField(unique=True)
class Token(BaseModel):
client = ForeignKeyField(OAuthClient, related_name='tokens')
user = ForeignKeyField(User, related_name='tokens')
token_type = CharField()
access_token = CharField(unique=True)
refresh_token = CharField(unique=True)
expires = DateTimeField()
_scopes = CharField(null=True)
def delete(self):
db.session.delete(self)
db.session.commit()
return self
@property
def scopes(self):
if self._scopes:
return self._scopes.split()
return []
class Grant:
def __init(self, user: User, client_id: str, client: OAuthClient, code: str, redirect_uri: str, _scopes: str,
expires: datetime.date):
self.user = user
self.client_id = client_id
self.client = client
self.code = code
self.redirect_uri = redirect_uri
self._scopes = _scopes
self.expires = expires
|
from __future__ import unicode_literals
import dataent
from dataent.utils import cint, flt
def execute():
for doctype in dataent.get_all("DocType", filters={"issingle": 0}):
doctype = doctype.name
meta = dataent.get_meta(doctype)
for column in dataent.db.sql("desc `tab{doctype}`".format(doctype=doctype), as_dict=True):
fieldname = column["Field"]
column_type = column["Type"]
if not (column_type.startswith("int") or column_type.startswith("decimal")):
continue
dataent.db.sql("""update `tab{doctype}` set `{fieldname}`=0 where `{fieldname}` is null"""\
.format(doctype=doctype, fieldname=fieldname))
# alter table
if column["Null"]=='YES':
if not meta.get_field(fieldname):
continue
default = cint(column["Default"]) if column_type.startswith("int") else flt(column["Default"])
dataent.db.sql_ddl("""alter table `tab{doctype}`
change `{fieldname}` `{fieldname}` {column_type} not null default '{default}'""".format(
doctype=doctype, fieldname=fieldname, column_type=column_type, default=default))
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'ActionHttpRunAfterArgs',
'TriggerRecurrenceScheduleArgs',
]
@pulumi.input_type
class ActionHttpRunAfterArgs:
def __init__(__self__, *,
action_name: pulumi.Input[str],
action_result: pulumi.Input[str]):
"""
:param pulumi.Input[str] action_name: Specifies the name of the precedent HTTP Action.
:param pulumi.Input[str] action_result: Specifies the expected result of the precedent HTTP Action, only after which the current HTTP Action will be triggered. Possible values include `Succeeded`, `Failed`, `Skipped` and `TimedOut`.
"""
pulumi.set(__self__, "action_name", action_name)
pulumi.set(__self__, "action_result", action_result)
@property
@pulumi.getter(name="actionName")
def action_name(self) -> pulumi.Input[str]:
"""
Specifies the name of the precedent HTTP Action.
"""
return pulumi.get(self, "action_name")
@action_name.setter
def action_name(self, value: pulumi.Input[str]):
pulumi.set(self, "action_name", value)
@property
@pulumi.getter(name="actionResult")
def action_result(self) -> pulumi.Input[str]:
"""
Specifies the expected result of the precedent HTTP Action, only after which the current HTTP Action will be triggered. Possible values include `Succeeded`, `Failed`, `Skipped` and `TimedOut`.
"""
return pulumi.get(self, "action_result")
@action_result.setter
def action_result(self, value: pulumi.Input[str]):
pulumi.set(self, "action_result", value)
@pulumi.input_type
class TriggerRecurrenceScheduleArgs:
def __init__(__self__, *,
at_these_hours: Optional[pulumi.Input[Sequence[pulumi.Input[int]]]] = None,
at_these_minutes: Optional[pulumi.Input[Sequence[pulumi.Input[int]]]] = None,
on_these_days: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
:param pulumi.Input[Sequence[pulumi.Input[int]]] at_these_hours: Specifies a list of hours when the trigger should run. Valid values are between 0 and 23.
:param pulumi.Input[Sequence[pulumi.Input[int]]] at_these_minutes: Specifies a list of minutes when the trigger should run. Valid values are between 0 and 59.
:param pulumi.Input[Sequence[pulumi.Input[str]]] on_these_days: Specifies a list of days when the trigger should run. Valid values include `Monday`, `Tuesday`, `Wednesday`, `Thursday`, `Friday`, `Saturday`, and `Sunday`.
"""
if at_these_hours is not None:
pulumi.set(__self__, "at_these_hours", at_these_hours)
if at_these_minutes is not None:
pulumi.set(__self__, "at_these_minutes", at_these_minutes)
if on_these_days is not None:
pulumi.set(__self__, "on_these_days", on_these_days)
@property
@pulumi.getter(name="atTheseHours")
def at_these_hours(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[int]]]]:
"""
Specifies a list of hours when the trigger should run. Valid values are between 0 and 23.
"""
return pulumi.get(self, "at_these_hours")
@at_these_hours.setter
def at_these_hours(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[int]]]]):
pulumi.set(self, "at_these_hours", value)
@property
@pulumi.getter(name="atTheseMinutes")
def at_these_minutes(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[int]]]]:
"""
Specifies a list of minutes when the trigger should run. Valid values are between 0 and 59.
"""
return pulumi.get(self, "at_these_minutes")
@at_these_minutes.setter
def at_these_minutes(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[int]]]]):
pulumi.set(self, "at_these_minutes", value)
@property
@pulumi.getter(name="onTheseDays")
def on_these_days(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Specifies a list of days when the trigger should run. Valid values include `Monday`, `Tuesday`, `Wednesday`, `Thursday`, `Friday`, `Saturday`, and `Sunday`.
"""
return pulumi.get(self, "on_these_days")
@on_these_days.setter
def on_these_days(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "on_these_days", value)
|
from pathlib import Path
from typing import List, Any, Dict, Union, Tuple
import yaml
from genson import SchemaBuilder
from .apidoc import Endpoint, APIDoc
from .postmanapidoc import PostmanAPIDoc
def str_presenter(dumper, data):
if len(data.splitlines()) > 1: # check for multiline string
return dumper.represent_scalar('tag:yaml.org,2002:str', data, style='|')
return dumper.represent_scalar('tag:yaml.org,2002:str', data)
yaml.add_representer(str, str_presenter)
class OpenAPIDoc(APIDoc):
class OpenAPIEndpoint(Endpoint):
_PREFIX_COMPONENTS_SCHEMAS = '#/components/schemas/'
_PREFIX_COMPONENTS_RESPONSES = '#/components/responses/'
_REF_PREFIX = '#/components/schemas/'
def __init__(self,
*,
name: str,
path: str,
method: str,
tags: Tuple[str] = None,
request_query_params: Union[Dict[str, Any], None],
request_path_params: Union[Dict[str, Any], None],
request_body: Union[Dict[str, Any], None],
response_example: Dict[str, Any]):
super().__init__(name=name,
path=path,
method=method,
tags=tags,
request_query_params=request_query_params,
request_path_params=request_path_params,
request_body=request_body,
response_example=response_example)
@staticmethod
def _split_schema_in_subschemas(json_schema: Dict[str, Any],
prefix: str, schema_name, schemas):
properties = {}
if json_schema['type'] == 'object':
props = json_schema['properties']
for prop_name in props:
if props[prop_name]['type'] == 'object':
sub_component_name = f'{prefix}{prop_name[0].capitalize() + prop_name[1:]}'
properties[prop_name] = {
'$ref': f'{OpenAPIDoc.OpenAPIEndpoint._REF_PREFIX}{sub_component_name}'
}
OpenAPIDoc.OpenAPIEndpoint._split_schema_in_subschemas(props[prop_name], prefix, sub_component_name,
schemas)
elif props[prop_name]['type'] == 'array' and 'items' not in props[prop_name]:
print(
f'WARNING: Can not determine items type of {prop_name} in {prefix} default choose string type')
properties[prop_name] = props[prop_name]
properties[prop_name]['items'] = {'type': 'string'}
elif props[prop_name]['type'] == 'array' and (
props[prop_name]['items']['type'] == 'array' or props[prop_name]['items']['type'] == 'object'):
sub_component_name = f'{prefix}{prop_name[0].capitalize() + prop_name[1:]}'
properties[prop_name] = {
'type': 'array',
'items': {
'$ref': f'{OpenAPIDoc.OpenAPIEndpoint._REF_PREFIX}{sub_component_name}'
}
}
OpenAPIDoc.OpenAPIEndpoint._split_schema_in_subschemas(props[prop_name]['items'], prefix,
sub_component_name, schemas)
elif props[prop_name]['type'] == 'null':
properties[prop_name] = {'type': 'object'}
else:
properties[prop_name] = props[prop_name]
if schema_name in schemas:
print(f'WARNING: {schema_name} is already present in schemas please fix it manually')
schemas[schema_name] = dict()
# if 'required' in json_schema:
# schemas[schema_name]['required'] = json_schema['required']
schemas[schema_name]['type'] = json_schema['type']
schemas[schema_name]['properties'] = properties
schemas[schema_name]['additionalProperties'] = False
else:
OpenAPIDoc.OpenAPIEndpoint._split_schema_in_subschemas(json_schema['items'], prefix, f'{prefix}Item', schemas)
schemas[schema_name] = {
'type': 'array',
'items': {
'$ref': f'{OpenAPIDoc.OpenAPIEndpoint._REF_PREFIX}{prefix}Item'
}
}
@staticmethod
def generate_schemas_from_example(prefix: str,
example: Dict[str, Any]) -> Dict[str, Any]:
if example:
builder = SchemaBuilder()
builder.add_object(example)
json_schema = builder.to_schema()
del json_schema['$schema']
schemas = {}
OpenAPIDoc.OpenAPIEndpoint._split_schema_in_subschemas(json_schema, prefix,
prefix, schemas)
return schemas
return {}
@property
def request_schemas(self) -> Dict[str, Any]:
cap_name = f'{self.name[0].capitalize()}{self.name[1:]}'
cap_method = f'{self.method[0].capitalize()}{self.method[1:]}'
request_schema_ref = f'{cap_method}{cap_name}Request'
return OpenAPIDoc.OpenAPIEndpoint.generate_schemas_from_example(request_schema_ref, self.request_body)
def _get_response_200(self) -> Dict[str, Any]:
return {
'description': 'OK',
'content': {
'application/json': {
'schema': {
'$ref': f'{OpenAPIDoc.OpenAPIEndpoint._PREFIX_COMPONENTS_SCHEMAS}{self.method[0].capitalize()}{self.method[1:]}{self.name[0].capitalize()}{self.name[1:]}Response'
},
'example': self.response_example
}
}
}
def _get_parameters(self, position: str) -> List[Dict[str, Any]]:
if position == 'query':
params = self.request_query_params
elif position == 'path':
params = self.request_path_params
parameters = []
for param_name in params:
param = dict()
param['name'] = param_name
param['in'] = position
param['schema'] = {'type': 'string'}
param['example'] = params[param_name]
parameters.append(param)
return parameters
def _get_request_body(self) -> Dict[str, Any]:
return {
'content':
{
'application/json':
{
'schema':
{
'$ref': f'{OpenAPIDoc.OpenAPIEndpoint._PREFIX_COMPONENTS_SCHEMAS}{self.method[0].capitalize()}{self.method[1:]}{self.name[0].capitalize()}{self.name[1:]}Request'
},
'example': self.request_body
}
},
'required': True
}
def _insert_request_struct(self,
openapi_path: Dict[str, Any]):
if self.request_query_params or self.request_path_params:
openapi_path['parameters'] = []
if self.request_query_params:
openapi_path['parameters'].extend(self._get_parameters('query'))
if self.request_path_params:
openapi_path['parameters'].extend(self._get_parameters('path'))
if self.request_body:
request_body = self._get_request_body()
openapi_path['requestBody'] = request_body
def _insert_responses_struct(self,
openapi_path: Dict[str, Any]):
responses = dict()
responses['200'] = self._get_response_200()
openapi_path['responses'] = responses
def get_openapi_path(self) -> Dict[str, Any]:
openapi_path = dict()
openapi_path[self.path] = {}
openapi_path[self.path][self.method] = {}
if self.tags:
openapi_path[self.path][self.method]['tags'] = list(self.tags)
self._insert_request_struct(openapi_path[self.path][self.method])
self._insert_responses_struct(openapi_path[self.path][self.method])
return openapi_path
@staticmethod
def _build_path_and_endpoint_schemas(endpoint: OpenAPIEndpoint) -> Tuple[Dict[str, Any], Dict[str, Any]]:
cap_name = f'{endpoint.name[0].capitalize()}{endpoint.name[1:]}'
cap_method = f'{endpoint.method[0].capitalize()}{endpoint.method[1:]}'
schemas = {}
if endpoint.request_body:
schemas.update(endpoint.request_schemas)
response_schema_ref = f'{cap_method}{cap_name}Response'
response_schema = OpenAPIDoc.OpenAPIEndpoint.generate_schemas_from_example(response_schema_ref,
endpoint.response_example)
schemas.update(response_schema)
return endpoint.get_openapi_path(), schemas
@classmethod
def create_from_postman_doc(cls,
openapi_doc: PostmanAPIDoc):
return cls(openapi_doc.name,
[OpenAPIDoc.OpenAPIEndpoint(name=endpoint.name,
path=endpoint.path,
method=endpoint.method,
tags=endpoint.tags,
request_query_params=endpoint.request_query_params,
request_path_params=endpoint.request_path_params,
request_body=endpoint.request_body,
response_example=endpoint.response_example) for endpoint in
openapi_doc.endpoints])
def to_yaml(self, file_path: Path):
openapi_doc = {
"openapi": "3.0.2",
"info": {
"title": self.name,
"version": "0.5.0"
},
"paths": {},
"components": {
"schemas": {}
}
}
for endpoint in self.endpoints:
path, endpoint_schemas = OpenAPIDoc._build_path_and_endpoint_schemas(endpoint)
if endpoint.path in openapi_doc["paths"]:
openapi_doc["paths"][endpoint.path].update(path[endpoint.path])
else:
openapi_doc['paths'].update(path)
openapi_doc['components']['schemas'].update(endpoint_schemas)
with open(file_path, 'w') as f:
f.write(yaml.dump(openapi_doc, sort_keys=False))
|
from sqlalchemy import MetaData, Table, Column, insert, delete, select
from sqlalchemy.dialects.mysql import CHAR
from .database import Database
from ..utils.verify import uuid_verify
metadata = MetaData()
subscriptions = Table(
"subscriptions",
metadata,
Column("UserUUID", CHAR(32), nullable=False),
Column("MarkListUUID", CHAR(32), nullable=False)
)
db = Database()
class Subscriptions:
@staticmethod
async def add(userUUID, markListUUID):
uuid_verify(userUUID)
uuid_verify(markListUUID)
await db.execute(insert(subscriptions).
values(UserUUID=userUUID, MarkListUUID=markListUUID))
@staticmethod
async def remove(userUUID, markListUUID):
uuid_verify(userUUID)
uuid_verify(markListUUID)
await db.execute(delete(subscriptions).
where(subscriptions.c.UserUUID == userUUID).
where(subscriptions.c.MarkListUUID == markListUUID))
@staticmethod
async def get(userUUID):
uuid_verify(userUUID)
result = await db.execute(select(subscriptions).
where(subscriptions.c.UserUUID == userUUID))
return result.all()
|
# -*- coding: utf-8 -*-
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('auth', '0001_initial'),
('events', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='GroupRestriction',
fields=[
('event', models.OneToOneField(related_name='group_restriction', primary_key=True, serialize=False, to='events.Event', on_delete=models.CASCADE)),
('groups', models.ManyToManyField(help_text='Legg til de gruppene som skal ha tilgang til arrangementet', to='auth.Group', null=True, blank=True)),
],
options={
'verbose_name': 'restriksjon',
'verbose_name_plural': 'restriksjoner',
'permissions': (('view_restriction', 'View Restriction'),),
},
bases=(models.Model,),
),
]
|
import sys
import re
# from Client import *
file = open(sys.argv[1],'r')
fileName = file.name[:-4]
tipos = ['int', 'bool']
tiposEspecias = {}
def numBits(numero):
if numero > 2:
resto = numero % 2
if resto == 0:
r = numero/2
return r
else:
r = (numero/2) + 1
return r
else:
return 1
def startWithNode(line):
if line[0:4] == 'node':
return True
else:
return False
def searchForTypes(line):
if line[0:4] == 'type':
line = line.split(' ')
tipoName = line[1]
tipoTipos = line[-1].split('|')
tipos.append(tipoName)
tiposEspecias[tipoName] = numBits(len(tipoTipos))
lines = file.readlines()
list(map(searchForTypes, lines))
lines = filter(startWithNode, lines)
automatonsList = []
def lineToAutomaton(line):
line = line[5:-1]
line = line.split("returns")
name = line[0].split('(')[0]
print (name)
entradas = line[0].split('(')[1]
entradas = entradas[0:-2]
entradas = entradas.split(":")[0]
entradas = entradas.split(",")
print (entradas)
def tirarEspacos(x):
return x.strip()
entradas = list(map(tirarEspacos,entradas))
saidas = line[1].strip()[1:-1]
print (saidas)
def subst(x):
if ('int' in x):
x = re.sub('\:.*','',x)
return x
elif ('bool' in x):
x = re.sub('\:.*','',x)
return x
else:
name = x.split(':')
multiplicador = tiposEspecias[name[1].strip()]
x = ''
for i in range(1, multiplicador+1):
x += name[0].strip()+'_'+str(i)+','
print x
return x[0:-1]
saidas = saidas.split(';')
saidas = map(subst, saidas)
if ('' in saidas):
saidas.remove('')
saidas = ','.join(saidas)
saidas = saidas.split(",")
saidas = [i.replace(" ", "") for i in saidas]
print (saidas)
print("----------")
automaton = {'name':name,'entradas': entradas,'saidas':saidas}
automatonsList.append(automaton)
list(map(lineToAutomaton,lines))
task = automatonsList[-1]
constFunc = fileName[0].upper() + fileName[1:] + "__" + task['name']
mem = constFunc+"_mem mem;"
out = constFunc+"_out _res;"
step = constFunc+"_step("
reset = constFunc+"_reset(&mem);"
declaracoes ="int step_c;int step_max;"
def concateDec(x):
global declaracoes
declaracoes = declaracoes+"int %s;"%(x)
list(map(concateDec,task['entradas']))
ifs = ""
def concateIf(x):
global ifs
ifs += 'if (strcmp(dict[i].key, "%s") == 0){ %s= atoi(dict[i].value);} else '%(x,x)
list(map(concateIf,task['entradas']))
def concateStep(x):
global step
step += '%s,'%(x)
list(map(concateStep,task['entradas']))
resposta='strcat(json, "{");'
def concateRes(x):
global resposta
string1 = 'strcat(json, ", \''+x
string2 = '\':");'
resposta += string1 + string2 + 'sprintf(aux, "%d", _res.'+x+');strcat(json, aux);'
resposta.replace("{,", "{")
list(map(concateRes,task['saidas']))
resposta+='strcat(json, "}");'
part1 = '''
#include <sys/types.h>
#ifndef _WIN32
#include <sys/select.h>
#include <sys/socket.h>
#else
#include <winsock2.h>
#endif
#include <string.h>
#include <microhttpd.h>
#include <stdio.h>
#include <stdlib.h>
#include "_main.h"
#define PORT 8082
#define n 57
%s
int glob_cnt=0;
struct item
{
const char *key;
const char *value;
};
struct item dict[n];
int save_items (void *cls, enum MHD_ValueKind kind,
const char *key, const char *value)
{
dict[glob_cnt].key = key;
dict[glob_cnt].value = value;
glob_cnt += 1;
return MHD_YES;
}
'''%(mem)
part2 = '''
static int
answer_to_connection (void *cls, struct MHD_Connection *connection,
const char *url, const char *method,
const char *version, const char *upload_data,
size_t *upload_data_size, void **con_cls)
{
char *json = malloc( 10000 );
struct MHD_Response *response;
int ret;
MHD_get_connection_values(connection, MHD_GET_ARGUMENT_KIND, &save_items, NULL);
//printf("Counter:%d", glob_cnt);
glob_cnt = 0; ''' + declaracoes + out
part3 = '''
int i;
for(i= 0; i < n; i++){
''' + ifs + '''
{
//printf("Not implemented %s", dict[i].key);
}
}
for(i= 0; i < n; i++)
{
//printf("%s, %s",dict[i].key, dict[i].value);
} ''' + step + '''
&_res, &mem);
char aux[1];
''' + resposta +'''
response =
MHD_create_response_from_buffer (strlen (json), (void *) json,
MHD_RESPMEM_PERSISTENT);
ret = MHD_queue_response (connection, MHD_HTTP_OK, response);
MHD_destroy_response (response);
return ret;
}
int main(int argc, char** argv) {
struct MHD_Daemon *daemon;
''' + reset + '''
int i;
for(i= 0; i < n; i++)
{
dict[i].key = "";
dict[i].value = "";
}
daemon = MHD_start_daemon (MHD_USE_SELECT_INTERNALLY, PORT, NULL, NULL,
&answer_to_connection, NULL, MHD_OPTION_END);
if (NULL == daemon)
return 1;
(void) getchar ();
MHD_stop_daemon (daemon);
return 0;
} '''
webservice = part1+part2+part3
file.close()
file2=open('webservices.c','w')
file2.write(webservice)
file2.close()
|
import re
import spacy
import statistics
import en_core_web_lg
from functools import lru_cache
#nlp = spacy.load("en_core_web_sm")
nlp = en_core_web_lg.load()
#text_listにリストとして読み込む
with open('book/book59.txt', 'r') as f:
#改行("\n")を""に変換
#text_list = f.read().splitlines()
text = f.read()
#正規表現で"を削除
text = re.sub('"', '', text)
#隣接する文とのコサインの類似度
cos_ruizido=[]
#文区切りの文を入れるリスト
bunsyou=[]
#文章を文ごと区切り,リストに入れる
doc = nlp(text)
for sent in doc.sents:
sent=sent.lemma_
bunsyou.append(str(sent))
##for token in doc:
# print(token.text+', '+token.lemma_) # テキスト, レンマ化
a=0
b=0
@lru_cache(maxsize=4096)
def ld(s, t):
if not s: return len(t)
if not t: return len(s)
if s[0] == t[0]: return ld(s[1:], t[1:])
l1 = ld(s, t[1:])
l2 = ld(s[1:], t)
l3 = ld(s[1:], t[1:])
return 1 + min(l1, l2, l3)
bun_1=""
bun_2=""
kazu=0
a=0
wariai=[]
if len(bunsyou) <= 100:
while kazu < len(bunsyou):
print(kazu)
bun_1=bunsyou[kazu]
bun_2=bunsyou[kazu+1]
kyori = ld(bun_1,bun_2)
#最小編集距離/(bun_1の文字数+bun_2の文字数)
wariai.append(kyori/(len(bunsyou[kazu])+len(bunsyou[kazu+1])))
kazu+=1
else:
while kazu < 2:
print(kazu)
bun_1=bunsyou[kazu]
bun_2=bunsyou[kazu+1]
kyori = ld(bun_1,bun_2)
#最小編集距離/(bun_1の文字数+bun_2の文字数)
wariai.append(kyori/(len(bunsyou[kazu])+len(bunsyou[kazu+1])))
kazu+=1
#リスト内の平均値計算
hasseiritu = statistics.mean(wariai)
print(hasseiritu)
#print(ld('vintner', 'writers'))
#0.820 0.907
#0.894 0.874
|
# -*- coding: utf-8 -*-
'''
Dummy Payment Gateway Transaction
Untested code is broken code. Testing web services are painful, testing
payment gateways are even more painful. This module adds a dummy
credit crd processor for unit and integrations tests to use.
In production use this payment provider does not appear. To enable the
dummy provider in your tests, add 'use_dummy'=True to the transaction
context.
.. code-block:: python
with Transaction().set_context(use_dummy=True):
PaymentGateway.create([{
'name': 'A dummy gateway',
'journal': cash_journal.id,
'provider': 'dummy',
'method': 'credit_card',
}])
:copyright: (c) 2013-2014 by Openlabs Technologies & Consulting (P) Ltd.
:license: BSD, see LICENSE for more details
'''
from trytond.pool import PoolMeta
from trytond.transaction import Transaction
__all__ = [
'PaymentGatewayDummy', 'AddPaymentProfileViewDummy',
'AddPaymentProfileDummy', 'DummyTransaction',
]
__metaclass__ = PoolMeta
class PaymentGatewayDummy:
"A Dummy Credit Card Processor for writing tests"
__name__ = 'payment_gateway.gateway'
@classmethod
def get_providers(cls, values=None):
"""
Downstream modules can add to the list
"""
rv = super(PaymentGatewayDummy, cls).get_providers()
self_record = ('dummy', 'Dummy')
if Transaction().context.get('use_dummy') and self_record not in rv:
rv.append(self_record)
return rv
def get_methods(self):
if self.provider == 'dummy':
return [
('credit_card', 'Dummy Credit Card Processor'),
]
return super(PaymentGatewayDummy, self).get_methods()
class DummyTransaction:
"""
Implement the authorize and capture methods
"""
__name__ = 'payment_gateway.transaction'
def authorize_dummy(self, card_info=None):
"""
Authorize with a dummy card
"""
succeed = Transaction().context.get('dummy_succeed', True)
if succeed:
self.state = 'authorized'
else:
self.state = 'failed'
self.save()
def settle_dummy(self):
"""
Settle a dummy transaction
"""
succeed = Transaction().context.get('dummy_succeed', True)
if succeed:
self.state = 'completed'
self.save()
self.safe_post()
else:
self.state = 'failed'
self.save()
def capture_dummy(self):
"""
Capture a dummy transaction
"""
succeed = Transaction().context.get('dummy_succeed', True)
if succeed:
self.state = 'completed'
self.save()
self.safe_post()
else:
self.state = 'failed'
self.save()
def cancel_dummy(self):
"""
Cancel a dummy transaction
"""
if self.state != 'authorized':
self.raise_user_error('cancel_only_authorized')
succeed = Transaction().context.get('dummy_succeed', True)
if succeed:
self.state = 'cancel'
self.save()
class AddPaymentProfileViewDummy:
__name__ = 'party.payment_profile.add_view'
@classmethod
def get_providers(cls):
"""
Return the list of providers who support credit card profiles.
"""
res = super(AddPaymentProfileViewDummy, cls).get_providers()
if Transaction().context.get('use_dummy'):
res.append(('dummy', 'Dummy Gateway'))
return res
class AddPaymentProfileDummy:
"""
Add a payment profile
"""
__name__ = 'party.party.payment_profile.add'
def transition_add_dummy(self):
"""
Handle the case if the profile should be added for dummy
"""
succeed = Transaction().context.get('dummy_succeed', True)
if succeed:
return self.create_profile(self.card_info.csc)
|
# pylint: disable=no-self-use,invalid-name
from deep_qa.data.data_indexer import DataIndexer
from deep_qa.data.instances.instance import TextInstance
from deep_qa.data.tokenizers import tokenizers
# pylint: disable=line-too-long
from deep_qa.data.instances.text_classification.text_classification_instance import IndexedTextClassificationInstance
from deep_qa.data.instances.text_classification.text_classification_instance import TextClassificationInstance
# pylint: enable=line-too-long
from deep_qa.common.params import Params
from ...common.test_case import DeepQaTestCase
class TestTextInstance(DeepQaTestCase):
"""
The point of this test class is to test the TextEncoder used by the TextInstance, to be sure
that we get what we expect when using character encoders, or word-and-character encoders.
"""
def tearDown(self):
super(TestTextInstance, self).tearDown()
TextInstance.tokenizer = tokenizers['words'](Params({}))
def test_words_tokenizes_the_sentence_correctly(self):
t = TextClassificationInstance("This is a sentence.", None)
assert t.words() == {'words': ['this', 'is', 'a', 'sentence', '.']}
TextInstance.tokenizer = tokenizers['characters'](Params({}))
assert t.words() == {'characters': ['T', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 's',
'e', 'n', 't', 'e', 'n', 'c', 'e', '.']}
TextInstance.tokenizer = tokenizers['words and characters'](Params({}))
assert t.words() == {'words': ['this', 'is', 'a', 'sentence', '.'],
'characters': ['t', 'h', 'i', 's', 'i', 's', 'a', 's', 'e', 'n', 't',
'e', 'n', 'c', 'e', '.']}
def test_to_indexed_instance_converts_correctly(self):
data_indexer = DataIndexer()
a_word_index = data_indexer.add_word_to_index("a", namespace='words')
sentence_index = data_indexer.add_word_to_index("sentence", namespace='words')
capital_a_index = data_indexer.add_word_to_index("A", namespace='characters')
space_index = data_indexer.add_word_to_index(" ", namespace='characters')
a_index = data_indexer.add_word_to_index("a", namespace='characters')
s_index = data_indexer.add_word_to_index("s", namespace='characters')
e_index = data_indexer.add_word_to_index("e", namespace='characters')
n_index = data_indexer.add_word_to_index("n", namespace='characters')
t_index = data_indexer.add_word_to_index("t", namespace='characters')
c_index = data_indexer.add_word_to_index("c", namespace='characters')
instance = TextClassificationInstance("A sentence", None).to_indexed_instance(data_indexer)
assert instance.word_indices == [a_word_index, sentence_index]
TextInstance.tokenizer = tokenizers['characters'](Params({}))
instance = TextClassificationInstance("A sentence", None).to_indexed_instance(data_indexer)
assert instance.word_indices == [capital_a_index, space_index, s_index, e_index, n_index, t_index,
e_index, n_index, c_index, e_index]
TextInstance.tokenizer = tokenizers['words and characters'](Params({}))
instance = TextClassificationInstance("A sentence", None).to_indexed_instance(data_indexer)
assert instance.word_indices == [[a_word_index, a_index],
[sentence_index, s_index, e_index, n_index, t_index,
e_index, n_index, c_index, e_index]]
class TestIndexedInstance(DeepQaTestCase):
def test_get_padding_lengths_works_with_words_and_characters(self):
instance = IndexedTextClassificationInstance([[1, 2], [3, 1, 2]], True)
assert instance.get_padding_lengths() == {'num_sentence_words': 2, 'num_word_characters': 3}
def test_pad_word_sequence_handles_words_and_characters_less(self):
instance = IndexedTextClassificationInstance([[1, 2], [3, 1, 2]], True)
padded = instance.pad_word_sequence(instance.word_indices,
{'num_sentence_words': 3, 'num_word_characters': 4})
assert padded == [[0, 0, 0, 0], [1, 2, 0, 0], [3, 1, 2, 0]]
def test_pad_word_sequence_handles_words_and_characters_greater(self):
instance = IndexedTextClassificationInstance([[1, 2], [3, 1, 2]], True)
padded = instance.pad_word_sequence(instance.word_indices,
{'num_sentence_words': 5, 'num_word_characters': 4})
assert padded == [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [1, 2, 0, 0], [3, 1, 2, 0]]
|
import unittest
from name_function import get_formatted_name
class NamesTestCase(unittest.TestCase):
"""Tets for 'name_function.py'"""
def test_first_last_name(self):
formatted_name = get_formatted_name('seymour', 'skinner')
self.assertEqual(formatted_name, 'Seymour Skinner')
def test_first_last_middle_name(self):
formatted_name = get_formatted_name('lisa', 'simpson', 'marie')
self.assertEqual(formatted_name, 'Lisa Marie Simpson')
if __name__ == '__main__':
unittest.main()
|
import torch
import torch.nn.functional as F
def slicedWassersteinLoss(x, y, num_projections=1000):
'''random projections of 1D features to compute sliced wasserstein distance
sliced wasserstein is more memory consuming than PDL loss
'''
x = x.reshape(x.shape[0], -1) # B,L
y = y.reshape(y.shape[0], -1)
W = torch.randn((num_projections,x.shape[1]), device=x.device) # this may be improved by uniformly sampling from hypersphere
W = W / torch.sqrt(torch.sum(W ** 2, dim=1, keepdim=True)) # each row is norm=1
e_x = torch.matmul(x, W.t()) # B,N
e_y = torch.matmul(y, W.t())
e_x_s = torch.sort(e_x, dim=1)[0]
e_y_s = torch.sort(e_y, dim=1)[0]
loss = F.l1_loss(e_x_s, e_y_s)
return loss
|
## User vs Computer
#User will be player 1 and Computer will be player 2
import random
def checkBoard(board):
for player in range(1,3):
if player==1:
symbol="X"
else:
symbol="O"
for i in range(0,3):
if (board[i][0]==symbol) and (board[i][1]==symbol) and (board[i][2]==symbol):
return player+1
for i in range(0,3):
if (board[0][i]==symbol) and (board[1][i]==symbol) and (board[2][i]==symbol):
return player+1
if (board[0][0]==symbol) and (board[1][1]==symbol) and (board[2][2]==symbol):
return player+1
if (board[0][2]==symbol) and (board[1][1]==symbol) and (board[2][0]==symbol):
return player+1
for i in range(0,3):
for j in range(0,3):
if board[i][j]=="":
return 0
return 1
def initializeBoard(board):
for i in range(0,3):
for j in range(0,3):
board[i][j]=""
def printBoard(board):
#write code to print the current board of the game
cellstr=""
for i in range(0,3):
for j in range(0,3):
if board[i][j]=="":
cellstr=" "
elif board[i][j]=="X":
cellstr="X"
else:
cellstr="O"
print("|",cellstr,end=" ")
print("|")
if i<2:
print("|---|---|---|")
print()
def whoWillStart():
#returns who will start the game
return random.randint(1, 2)
def startGame(board,players,player):
initializeBoard(board)
players[1]=input("Enter name of the player (symbol X): ")
#players[2]=input("Enter name of the Player 2 (symbol O): ")
print()
print(players[player],"won the toss. So", players[player], "will start first.")
print()
def playMove(board,players,player):
print(players[player]," will take move now.")
row=int(input("Choose Row where you want to put your bet: "))
column=int(input("Choose Column where you want to put your bet: "))
board[row-1][column-1]="X"
printBoard(board)
def computerMove(board,players,player):
print(players[player], "has taken the move. Check below: ")
#checking row for winning
for i in range(3):
if board[i].count("O")==2:
for j in range(3):
if board[i][j]=="":
board[i][j]="O"
printBoard(board)
return
for i in range(3):
count=0
for j in range(3):
if board[j][i]=="O":
count+=1
if count==2:
for j in range(3):
if board[j][i]=="":
board[j][i]="O"
printBoard(board)
return
#Check for primary diagonal
countO=0
locationE=-1
for i in range(3):
if board[i][i]=="O":
countO+=1
if board[i][i]=="":
locationE=i
if countO==2 and locationE!=-1:
board[locationE][locationE]="O"
printBoard(board)
return
#Check other diagonal
countO=0
locationE=-1
for i in range(3):
if board[i][2-i]=="O":
countO+=1
if board[i][2-i]=="":
locationE=i
if countO==2 and locationE!=-1:
board[locationE][2-locationE]="O"
printBoard(board)
return
#Counter Move
for i in range(3):
if board[i].count("X")==2:
for j in range(3):
if board[i][j]=="":
board[i][j]="O"
printBoard(board)
return
for i in range(3):
count=0
placed=0
for j in range(3):
if board[j][i]=="X":
count+=1
if count==2:
for j in range(3):
if board[j][i]=="":
board[j][i]="O"
printBoard(board)
return
#Check for primary diagonal
countO=0
locationE=-1
for i in range(3):
if board[i][i]=="X":
countO+=1
if board[i][i]=="":
locationE=i
if countO==2 and locationE!=-1:
board[locationE][locationE]="O"
printBoard(board)
return
#Check other diagonal
countO=0
locationE=-1
for i in range(3):
if board[i][2-i]=="X":
countO+=1
if board[i][2-i]=="":
locationE=i
if countO==2 and locationE!=-1:
board[locationE][2-locationE]="O"
printBoard(board)
return
#computer has to place her non-critical bet
#preferred positions are center and then corners
if board[1][1]=="":
board[1][1]="O"
printBoard(board)
return
if board[0][0]=="":
board[0][0]="O"
printBoard(board)
return
if board[0][2]=="":
board[0][2]="O"
printBoard(board)
return
if board[2][0]=="":
board[2][0]="O"
printBoard(board)
return
if board[2][2]=="":
board[2][2]="O"
printBoard(board)
return
for i in range(3):
for j in range(3):
if board[i][j]=="":
board[i][j]="O"
printBoard(board)
return
def togglePlayer(playerInGame):
if playerInGame==1:
return 2
else:
return 1
def announceResult(state,states,players):
if states[state]=="DRAW":
print("Game results in a draw.")
elif states[state]=="P1-WIN":
print(players[1], "won the game. Congratulations!!")
elif states[state]=="P2-WIN":
print(players[2], "won the game. Congratulations!!")
print()
return int(input("Do you want to play again? (Enter 1 for yes, 0 for no): "))
def restartGame(board,players,whoStarted):
initializeBoard(board)
whoStarted=togglePlayer(whoStarted)
print()
print("In this game", players[whoStarted], " will start the game.")
print()
return whoStarted
#Main Program
# Variables
board=[["","X",""],["X","O","X"],["","","O"]]
players=["","P1","Computer"]
states=["PLAY", "DRAW", "P1-WIN", "P2-WIN"]
playerInGame=0
state=0
whoStarted=0
# Main Program
playerInGame=whoWillStart()
whoStarted=playerInGame
startGame(board,players,whoStarted)
# Game Loop
while True:
# check whose turn is to put the bet and then take the move
if playerInGame==1:
playMove(board,players,playerInGame)
else:
computerMove(board,players,playerInGame)
#check the condition of the board
state=checkBoard(board)
if states[state]=="PLAY":
playerInGame=togglePlayer(playerInGame)
else:
playMore=announceResult(state,states,players)
if playMore==1:
playerInGame=restartGame(board,players,whoStarted)
whoStarted=playerInGame
else:
print("Thanks for playing game!")
break |
"""Base Connection Class."""
# Standard Library
from typing import Dict, Union, Sequence
# Project
from hyperglass.log import log
from hyperglass.models.api import Query
from hyperglass.parsing.nos import scrape_parsers, structured_parsers
from hyperglass.parsing.common import parsers
from hyperglass.models.config.devices import Device
# Local
from ._construct import Construct
class Connection:
"""Base transport driver class."""
def __init__(self, device: Device, query_data: Query) -> None:
"""Initialize connection to device."""
self.device = device
self.query_data = query_data
self.query_type = self.query_data.query_type
self.query_target = self.query_data.query_target
self._query = Construct(device=self.device, query_data=self.query_data)
self.query = self._query.queries()
async def parsed_response( # noqa: C901 ("too complex")
self, output: Sequence[str]
) -> Union[str, Sequence[Dict]]:
"""Send output through common parsers."""
log.debug("Pre-parsed responses:\n{}", output)
parsed = ()
response = None
structured_nos = structured_parsers.keys()
structured_query_types = structured_parsers.get(self.device.nos, {}).keys()
scrape_nos = scrape_parsers.keys()
scrape_query_types = scrape_parsers.get(self.device.nos, {}).keys()
if not self.device.structured_output:
_parsed = ()
for func in parsers:
for response in output:
_output = func(commands=self.query, output=response)
_parsed += (_output,)
if self.device.nos in scrape_nos and self.query_type in scrape_query_types:
func = scrape_parsers[self.device.nos][self.query_type]
for response in _parsed:
_output = func(response)
parsed += (_output,)
else:
parsed += _parsed
response = "\n\n".join(parsed)
elif (
self.device.structured_output
and self.device.nos in structured_nos
and self.query_type not in structured_query_types
):
for func in parsers:
for response in output:
_output = func(commands=self.query, output=response)
parsed += (_output,)
response = "\n\n".join(parsed)
elif (
self.device.structured_output
and self.device.nos in structured_nos
and self.query_type in structured_query_types
):
func = structured_parsers[self.device.nos][self.query_type]
response = func(output)
if response is None:
response = "\n\n".join(output)
log.debug("Post-parsed responses:\n{}", response)
return response
|
import dadi
import dadi.DFE as DFE
import pickle, glob
import numpy as np
from src.Models import get_dadi_model_func
def generate_cache(model, grids, popt,
gamma_bounds, gamma_pts, additional_gammas,
output, sample_sizes, mp, cuda, single_gamma):
if cuda:
dadi.cuda_enabled(True)
func = get_dadi_model_func(model, True, single_gamma)
if grids == None:
grids = [sample_sizes[0]+10, sample_sizes[0]+20, sample_sizes[0]+30]
if single_gamma:
spectra = DFE.Cache1D(popt, sample_sizes, func, pts_l=grids, additional_gammas=additional_gammas, gamma_bounds=gamma_bounds, gamma_pts=gamma_pts, mp=mp)
else:
spectra = DFE.Cache2D(popt, sample_sizes, func, pts=grids, additional_gammas=additional_gammas, gamma_bounds=gamma_bounds, gamma_pts=gamma_pts, mp=mp)
fid = open(output, 'wb')
pickle.dump(spectra, fid, protocol=2)
fid.close()
|
# Bangla Natural Language Toolkit: Parts of Speech Tagger
#
# Copyright (C) 2019 BNLTK Project
# Author: Ashraf Hossain <[email protected]>
from keras.models import load_model
from string import punctuation
import numpy as np
from sklearn.feature_extraction import DictVectorizer
from sklearn.preprocessing import LabelEncoder
import platform
import getpass
import os
import sys
import logging
logging.getLogger('tensorflow').disabled = True
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
class Loader:
texts = ''
sentences = []
model = ''
model_path = None
tagged_data_path = None
def __init__(self):
self.texts = ''
self.sentences = []
self.model = None
self.model_path = None
self.tagged_data_path = None
def path_generator(self):
isFiles_exist = True
if platform.system() == 'Windows':
self.model_path = "C:\\Users\\"+getpass.getuser()+"\\bnltk_data\\pos_data\\keras_mlp_bangla.h5"
self.tagged_data_path = "C:\\Users\\"+getpass.getuser()+"\\bnltk_data\\pos_data\\bn_tagged_mod.txt"
else:
self.model_path = "/Users/"+getpass.getuser()+"/bnltk_data/pos_data/keras_mlp_bangla.h5"
self.tagged_data_path = "/Users/"+getpass.getuser()+"/bnltk_data/pos_data/bn_tagged_mod.txt"
def load_keras_model(self):
self.path_generator()
self.model = load_model(self.model_path)
self.model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
self.load_corpus()
self.data_manipulator()
def load_corpus(self):
#file = '/Users/ashrafhossain/bnltk_data/pos_data/bn_tagged_mod.txt'
self.texts = open(self.tagged_data_path, encoding="utf8").readlines()
def tuple_maker(self, line):
line = line.split(' ')
sentence = []
for x in line:
if x == '':
continue
else:
x = x.split('\\')
tup = []
for y in x:
tup.append(y);
sentence.append(tuple(tup))
return sentence
def data_manipulator(self):
for i in self.texts:
self.sentences.append(self.tuple_maker(i))
class BanglaPosTagger:
sentences = []
mod_elements = []
model = ''
dict_vectorizer = None
label_encoder = None
def __init__(self):
self.sentences = []
self.mod_elements = []
self.model = ''
self.dict_vectorizer = DictVectorizer(sparse=False)
self.label_encoder = LabelEncoder()
def load(self):
loader_ = Loader()
loader_.load_keras_model()
self.model = loader_.model
self.sentences = loader_.sentences
#print(self.sentences[0])
#print(self.mod_elements)
train_test_cutoff = int(.80 * len(self.sentences))
training_sentences = self.sentences[:train_test_cutoff]
testing_sentences = self.sentences[train_test_cutoff:]
train_val_cutoff = int(.25 * len(training_sentences))
validation_sentences = training_sentences[:train_val_cutoff]
training_sentences = training_sentences[train_val_cutoff:]
X_train, y_train = self.transform_to_dataset(training_sentences)
X_test, y_test = self.transform_to_dataset(testing_sentences)
X_val, y_val = self.transform_to_dataset(validation_sentences)
#dict_vectorizer = DictVectorizer(sparse=False)
self.dict_vectorizer.fit(X_train + X_test + X_val)
self.label_encoder.fit(y_train + y_test + y_val)
def bn_pos_tag(self, input):
self.load()
self.bn_tokenizer(input)
t_list = self.training_transform_to_dataset([self.mod_elements])
t_list = self.dict_vectorizer.transform(t_list)
#print(t_list)
predictions = self.model.predict(t_list)
list_ = []
for x in range(0, len(predictions)):
list_.append(np.argmax(predictions[x]))
#label_encoder = LabelEncoder()
labels = self.label_encoder.inverse_transform(list_)
result = []
for i in range(0, len(labels)):
tup = []
tup.append(self.mod_elements[i])
tup.append(labels[i])
result.append(tuple(tup))
return result
def bn_tokenizer(self, input_):
words = input_.split(' ')
words = [x.strip(' ') for x in words]
words = [i for i in words if i]
dict_ = {}
dict_['।'] = True
for p in punctuation:
dict_[p] = True
for n in words:
if dict_.get(n[-1]):
self.mod_elements.append(n[:-1])
self.mod_elements.append(n[-1])
else:
self.mod_elements.append(n)
self.mod_elements = [i for i in self.mod_elements if i]
def add_basic_features(self, sentence_terms, index):
#print(sentence_terms[index])
""" Compute some very basic word features.
:param sentence_terms: [w1, w2, ...]
:type sentence_terms: list
:param index: the index of the word
:type index: int
:return: dict containing features
:rtype: dict
"""
term = sentence_terms[index]
return {
'nb_terms': len(sentence_terms),
'term': term,
'is_first': index == 0,
'is_last': index == len(sentence_terms) - 1,
'prefix-1': term[0],
'prefix-2': term[:2],
'prefix-3': term[:3],
'suffix-1': term[-1],
'suffix-2': term[-2:],
'suffix-3': term[-3:],
'prev_word': '' if index == 0 else sentence_terms[index - 1],
'next_word': '' if index == len(sentence_terms) - 1 else sentence_terms[index + 1]
}
def training_transform_to_dataset(self, tagged_sentences):
"""
Split tagged sentences to X and y datasets and append some basic features.
:param tagged_sentences: a list of POS tagged sentences
:param tagged_sentences: list of list of tuples (term_i, tag_i)
:return:
"""
X = []
#print(len(tagged_sentences))
for pos_tags in tagged_sentences:
#print(pos_tags)
for index in range(len(pos_tags)):
# Add basic NLP features for each sentence term
X.append(self.add_basic_features(pos_tags, index))
return X
def untag(self, tagged_sentence):
"""
Remove the tag for each tagged term.
:param tagged_sentence: a POS tagged sentence
:type tagged_sentence: list
:return: a list of tags
:rtype: list of strings
"""
return [w for w, _ in tagged_sentence]
def transform_to_dataset(self, tagged_sentences):
"""
Split tagged sentences to X and y datasets and append some basic features.
:param tagged_sentences: a list of POS tagged sentences
:param tagged_sentences: list of list of tuples (term_i, tag_i)
:return:
"""
X, y = [], []
for pos_tags in tagged_sentences:
for index, (term, class_) in enumerate(pos_tags):
# Add basic NLP features for each sentence term
X.append(self.add_basic_features(self.untag(pos_tags), index))
y.append(class_)
return X, y
'''
t = BanglaPosTagger()
t.load()
print(t.bn_pos_tag(' আমার সোনার বাংলা । আমি তোমায় ভালোবাসি । '))
'''
|
from plexapi.server import PlexServer
from dotenv import load_dotenv
import os
load_dotenv()
PLEX_URL = os.getenv('PLEX_URL')
PLEX_TOKEN = os.getenv('PLEX_TOKEN')
print("connecting...")
plex = PlexServer(PLEX_URL, PLEX_TOKEN)
plexacc = plex.myPlexAccount()
print("getting users...")
users = plexacc.users()
user_total = len(users)
print(f"looping over {user_total} users...")
for u in users:
print(f"{u.username} - {u.email}")
|
#!/usr/bin/env python
### IMPORTS ###
from utils import makeCelery
from math import add_together
### GLOBALS ###
### FUNCTIONS ###
### CLASSES ###
### MAIN ###
if __name__ == '__main__':
pass
|
from unittest import TestCase
from leetcodepy.binary_tree_preorder_traversal import *
from leetcodepy.utils import trees
solution1 = Solution1()
solution2 = Solution2()
root = trees.from_values(1, None, 2, 3)
expected = [1, 2, 3]
class TestBinaryTreePreorderTraversal(TestCase):
def test1(self):
self.assertListEqual(expected, solution1.preorderTraversal(root))
def test2(self):
self.assertListEqual(expected, solution2.preorderTraversal(root))
|
from typing import Any, Dict
import scanpy as sc
from anndata import AnnData
def process(adata: AnnData, step: Dict[str, Any], output: Dict[str, Any]):
"""
Compute a neighborhood graph of observations
"""
output["neighbors"] = True
n_neighbors = step.get("nNeighbors")
metric = step.get("metric")
random_state = step.get("randomState")
result = sc.pp.neighbors(
adata, n_neighbors=n_neighbors, knn=True, method="umap", random_state=random_state, metric=metric, copy=True
)
return result
|
from .parser import Parser
from .telemetry import Telemetry
from .packet import Packet
from .element import Element, UnknownElement
from .elements import TimestampElement, DatetimeElement
import xml.etree.ElementTree as ET
from dateutil import parser as dup
import logging
# Reference: http://www.topografix.com/GPX/1/1/
class GPXParser(Parser):
tel_type = 'gpx'
def __init__(self, source,
convert_to_epoch: bool = False,
require_timestamp: bool = False):
super().__init__(source,
convert_to_epoch = convert_to_epoch,
require_timestamp = require_timestamp)
self.logger = logging.getLogger("OTK.GPXParser")
def read(self):
tree = ET.parse(self.source)
tel = Telemetry()
self._traverse_tree(tree.getroot(), tel)
if len(tel) == 0:
self.logger.warn("No telemetry was found. Returning empty Telemetry()")
return tel
def _traverse_tree(self, node, tel):
for child in node:
# Ignore namespace if it exists
tag = child.tag[child.tag.find('}')+1:]
# These are all of the tags that contain the data we care about
if tag in {"trkpt", "metadata", "rtept", "wpt"}:
packet = Packet()
self._extract_node(child, packet)
if self.require_timestamp and TimestampElement.name not in packet \
and DatetimeElement.name not in packet:
self.logger.critical("Could not find any time elements when require_timestamp was set")
if len(packet) > 0:
self.logger.info("Adding new packet.")
tel.append(packet)
else:
self.logger.warn("No telemetry was found in node. Packet is empty, skipping.")
self._traverse_tree(child, tel)
def _extract_node(self, node, packet):
for key, val in node.items():
self._add_element(packet, key, val)
if node.text and not node.text.isspace():
tag = node.tag[node.tag.find('}')+1:]
self._add_element(packet, tag, node.text.strip())
for child in node:
self._extract_node(child, packet)
def _add_element(self, packet, key, val):
if key in self.element_dict:
element_cls = self.element_dict[key]
if element_cls == DatetimeElement and self.convert_to_epoch:
val = dup.parse(val).timestamp()
packet[TimestampElement.name] = TimestampElement(val)
else:
packet[element_cls.name] = element_cls(val)
else:
self.logger.warn("Adding unknown element ({} : {})".format(key, val))
packet[key] = UnknownElement(val)
|
lista = [1, 10]
arquivo = open('teste.txt', 'r')
try:
texto = arquivo.read()
divisao = 10 / 1
numero = lista[1]
except ZeroDivisionError:
print('Não é possivel realizar a divisão por 0')
except ArithmeticError:
print('Houve um erro ao realizar uma operação aritimetica.')
except IndexError:
print('Erro ao acessar um indice invalido da lista.')
except BaseException as ex:
print('Erro desconhecido. Erro {}'.format(ex))
else:
print('Executa quando não ocorre exceção.')
finally:
print('Sempre executa.')
print('Fechando arquivo.')
arquivo.close()
|
from bs4 import BeautifulSoup
import requests
#
# Run this Second!
#
# From the get_urls_from_sitemap.py, we get a list of all of the Category Pages. Now we're going to scrape those
# pages to get the URL's for any Product Detail Pages.#
#
# The target's name has been redacted
domain = 'http://www.redacted.com'
#
# More laziness. Deduped and narrowed down.
#
urls = [
'/River2Sea/catpage-RIVERSEA.html',
'/River2Sea_Crankbaits/catpage-CRNKR2S.html',
'/River2Sea_Top_Water/catpage-TOPR2S.html',
'/River2Sea_Swimbaits/catpage-SWIMRIVER.html',
'/River2Sea_Spinner_Baits/catpage-SPINR2S.html',
'/River2Sea_Buzz_Baits/catpage-BZZR2S.html',
'/River2Sea_Hollow_Body_Frogs/catpage-HBFR2S.html',
'/River2Sea_Jigs/catpage-JIGR2S.html',
'/River2Sea_Terminal_Tackle/catpage-RIVERSTERM.html',
'/River2Sea_Soft_Baits/catpage-RIVERSOFT.html',
'/River2Sea_Apparel/catpage-R2SEAAPPARE.html',
'/Roboworm/catpage-ROBO.html',
'/Roboworm_Worms/catpage-SFWROBO.html',
'/Roboworm_Swimbaits/catpage-RSWIM.html',
'/Roboworm_Terminal_Tackle/catpage-TERMROBO.html'
]
#
# For each of these URL's, look for the product image link (I'm grabbing the link associated with the item image) and
# then picking out the target url from the href.
#
for xurl in urls:
url = "{}{}".format(domain, xurl)
r = requests.get(url).text
soup = BeautifulSoup(r, 'html.parser')
links = soup.findAll("a", {"class": "image_wrap"})
for link in links:
if link is None:
continue
# print("Link: {}".format(link))
# print("---------------------------------------")
urlpath = link["href"]
# print("Urlpath: {}".format(urlpath))
if not 'descpage' in urlpath:
continue
print("{}".format(urlpath))
#
# The output list of URL's from this get provided to the scrape_test.py program
#
|
import unittest
from hstest.check_result import correct
from hstest.dynamic.dynamic_test import dynamic_test
from hstest.stage_test import StageTest
class TestRepeatingWrongAmount(StageTest):
@dynamic_test(repeat=-1)
def test(self, x):
return correct()
class Test(unittest.TestCase):
def test(self):
status, feedback = TestRepeatingWrongAmount('main').run_tests()
self.assertNotEqual(status, 0)
self.assertIn("UnexpectedError: Dynamic test \"test\" should not "
"be repeated < 0 times, found -1", feedback)
if __name__ == '__main__':
Test().test()
|
import os
import sys
from PIL import Image
import numpy as np
import matplotlib.image as img
import matplotlib.pyplot as plt
import cv2
import tqdm
def get_labels_from_json(dir_labels, dir_annotated, format="tif", no_label=False):
if not os.path.exists(dir_labels):
print("New Label Directory:", dir_labels)
os.makedirs(dir_labels)
else:
print("Existing Label Directory:", dir_labels)
print("Attention: Overwriting...")
print("Create labels in directory ", dir_labels)
filenames = [
file for file in os.listdir(dir_annotated) if file.endswith("." + format)
]
for i in tqdm.trange(len(filenames)):
filename = filenames[i]
# for filename in filenames:
full_path = os.path.join(dir_annotated, filename)
full_path_label = os.path.join(dir_labels, filename)
image = img.imread(full_path)
label = extract_label(
image, labelcolors=["red", "green", "blue"], fill=True, no_label=no_label
)
Image.fromarray(np.uint8(label * 255.0)).save(full_path_label)
import matplotlib.gridspec as gridspec
plt.figure(figsize=(16, 8))
gs1 = gridspec.GridSpec(1, 2)
gs1.update(wspace=0.05, hspace=0.05)
ax0 = plt.subplot(gs1[0])
ax0.imshow(image, cmap='gray')
ax0.set_xticks([])
ax0.set_yticks([])
ax1 = plt.subplot(gs1[1])
ax1.imshow(label, cmap='gray')
ax1.set_xticks([])
ax1.set_yticks([])
plt.show()
def extract_label(
image,
labelcolors=["red", "green", "blue"],
fill=True,
noise_suppression=True,
no_label=False,
):
"""
For an annotated image, extracts binary label containing the label contour.
:param image_path: Path to grayscale image with drawn labels.
:param labelcolors: Color of the label. So far only red is possible
:param fill: boolean. If true, fills out contours.
:param noise_suppression: If true, filters binary mask with Gaussian kernel to reduce noise.
:param no_label: If true, creates labels that are all zeros (black). Good&fast for non-defective images.
:return: np array with shape [height, width]
"""
height, width, channels = image.shape
label = np.zeros([height, width], dtype=np.uint8)
if no_label:
return label
for color in labelcolors:
if color == "red":
rgb_threshold = [220, -200, -200]
elif color == "green":
rgb_threshold = [-200, 220, -200]
elif color == "blue":
rgb_threshold = [-200, -200, 220]
else:
print("Label color should be 'red', 'green' or 'blue'.")
return
s = []
for color_th in rgb_threshold:
s.append(color_th / abs(color_th))
for py in range(height):
for px in range(width):
if (
s[0] * image[py, px, 0] > rgb_threshold[0]
and s[1] * image[py, px, 1] > rgb_threshold[1]
and s[2] * image[py, px, 2] > rgb_threshold[2]
):
label[py, px] = 1
if not fill:
if noise_suppression:
label = cv2.medianBlur(label, 3)
pass
return label
else:
label_filled = fill_holes(label.copy())
if noise_suppression:
label_filled = cv2.medianBlur(label_filled, 3)
pass
label_filled = label_filled * 1.0 / 255.0
return label_filled
def fill_holes(im_th):
"""
Fill holes in contour using the floodfill algorithm.
:param im_th: Binary thresholded image.
:return: Output image.
"""
# Copy the thresholded image.
im_floodfill = im_th.copy() * 254
# Mask used to flood filling.
# Notice the size needs to be 2 pixels larger than the image.
h, w = im_th.shape[:2]
mask = np.zeros((h + 2, w + 2), np.uint8)
# Floodfill from point (0, 0)
cv2.floodFill(im_floodfill, mask, (0, 0), 255)
# Invert floodfilled image
im_floodfill_inv = cv2.bitwise_not(im_floodfill)
# Combine the two images to get the foreground.
im_out = im_th | im_floodfill_inv
return im_out
def get_labels(dir_labels, dir_annotated, format="tif", no_label=False):
"""
Creates binary pixel-wise labels from annotations.
:param dir_labels: Target directory where labels will be saved. Will be created if not existing.
:param dir_annotated: Directory where annotations are located.
ATTENTION: Annotations must ...
* ... be of the color RED ([255, 0, 0]), GREEN ([0, 255, 0]) or BLUE ([0, 0, 255])
* ... be continuous lines (no dashes!) that form a closed loop
* ... not touch the walls or be so close to the walls that there is no space from them to flow.
Let me explain this further. You can imagine the algorithm to work like water flowing from the walls,
only halting of borders of a specific color.
Everything covered by the water will be black (zeros), everything else white (ones).
If it didn't work for your annotation, try again for them fulfilling above points.
:param format: Image format, tried with "tif"
:param no_label: If true, creates labels that are all zeros (black). Good&fast for non-defective images.
:return:
"""
if not os.path.exists(dir_labels):
print("New Label Directory:", dir_labels)
os.makedirs(dir_labels)
else:
print("Existing Label Directory:", dir_labels)
print("Attention: Overwriting...")
print("Create labels in directory ", dir_labels)
filenames = [
file for file in os.listdir(dir_annotated) if file.endswith("." + format)
]
for i in tqdm.trange(len(filenames)):
filename = filenames[i]
# for filename in filenames:
full_path = os.path.join(dir_annotated, filename)
full_path_label = os.path.join(dir_labels, filename)
image = img.imread(full_path)
label = extract_label(
image, labelcolors=["red", "green", "blue"], fill=True, no_label=no_label
)
Image.fromarray(np.uint8(label * 255.0)).save(full_path_label)
import matplotlib.gridspec as gridspec
plt.figure(figsize=(16, 8))
gs1 = gridspec.GridSpec(1, 2)
gs1.update(wspace=0.05, hspace=0.05)
ax0 = plt.subplot(gs1[0])
ax0.imshow(image, cmap='gray')
ax0.set_xticks([])
ax0.set_yticks([])
ax1 = plt.subplot(gs1[1])
ax1.imshow(label, cmap='gray')
ax1.set_xticks([])
ax1.set_yticks([])
plt.show()
if __name__ == "__main__":
# Get labels for defective images (will take a while)
get_labels(
dir_labels="../../data/cubic/defective/labels/",
dir_annotated="../../data/cubic/defective/annotations",
no_label=False,
)
# Get labels for non-defective images (should be fast)
get_labels(
dir_labels="../../data/cubic/non_defective/labels/",
dir_annotated="../../data/cubic/non_defective/annotations",
no_label=True,
)
|
"""
## Reference from: Multi-Stage Progressive Image Restoration
## Syed Waqas Zamir, Aditya Arora, Salman Khan, Munawar Hayat, Fahad Shahbaz Khan, Ming-Hsuan Yang, and Ling Shao
## https://arxiv.org/abs/2102.02808
"""
import numpy as np
import os
import argparse
from tqdm import tqdm
import torch
import utils
from SRMNet import SRMNet
from skimage import img_as_ubyte
import scipy.io as sio
parser = argparse.ArgumentParser(description='Image Denoising using SRMNet')
parser.add_argument('--input_dir', default='D:/NCHU/Dataset/Denoise/Real-world noise/SIDD/test', type=str,
help='Directory of validation images')
parser.add_argument('--result_dir', default='./test_results/SIDD/', type=str, help='Directory for results')
parser.add_argument('--weights', default='./checkpoints/SRMNet_real_denoise/models/model_bestPSNR.pth', type=str,
help='Path to weights')
parser.add_argument('--gpus', default='0', type=str, help='CUDA_VISIBLE_DEVICES')
parser.add_argument('--save_images', default=False, help='Save denoised images in result directory')
args = parser.parse_args()
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpus
result_dir = os.path.join(args.result_dir, 'mat')
utils.mkdir(result_dir)
if args.save_images:
result_dir_img = os.path.join(args.result_dir, 'png')
utils.mkdir(result_dir_img)
model_restoration = SRMNet()
utils.load_checkpoint(model_restoration, args.weights)
print("===>Testing using weights: ", args.weights)
model_restoration.cuda()
# model_restoration = nn.DataParallel(model_restoration)
model_restoration.eval()
# Process data # BenchmarkNoisyBlocksSrgb.mat ValidationNoisyBlocksSrgb.mat
filepath = os.path.join(args.input_dir, 'ValidationNoisyBlocksSrgb.mat')
img = sio.loadmat(filepath)
Inoisy = np.float32(np.array(img['ValidationNoisyBlocksSrgb'])) # ValidationNoisyBlocksSrgb
Inoisy /= 255.
restored = np.zeros_like(Inoisy)
import time
start = time.time()
with torch.no_grad():
for i in tqdm(range(40)):
for k in range(32): # 32
noisy_patch = torch.from_numpy(Inoisy[i, k, :, :, :]).unsqueeze(0).permute(0, 3, 1, 2).cuda()
restored_patch = model_restoration(noisy_patch)
restored_patch = torch.clamp(restored_patch, 0, 1).cpu().detach().permute(0, 2, 3, 1).squeeze(0)
restored[i, k, :, :, :] = restored_patch
if args.save_images:
save_file = os.path.join(result_dir_img, '%04d_%02d.png' % (i + 1, k + 1))
utils.save_img(save_file, img_as_ubyte(restored_patch))
# save denoised data
print('Process time each patch:', (time.time() - start)/1280)
sio.savemat(os.path.join(result_dir, 'Idenoised.mat'), {"Idenoised": restored, })
|
import numpy as np
from .non_uniform_mutation import non_uniform_mutation
from .uniform_mutation import uniform_mutation
def mutation(pool, area, probability, method, dist="cauchy"):
"""Apply mutation over the whole pool.
Args:
pool: (list of) plans to apply mutation over.
area: (int) area of interest.
probability: (int) probability of mutation.
method: (str) mutation method.
- uniform
- non_uniform
dist: (str) type of distribution to be used (needed only in non_uniform_mutation).
- cauchy (default)
- gaussian
Returns:
None
"""
for plan in pool:
# apply for non_fixed cells only.
for cell in plan.get_cells("non_fixed"):
random_number = np.random.random()
if random_number <= probability:
if method == "uniform":
uniform_mutation(cell, area)
elif method == "non_uniform":
non_uniform_mutation(cell, area, dist)
|
from disco.test import TestCase, TestJob
special = '--special_test_string--'
def isspecial(offset__time_node_message):
return special in offset__time_node_message[1][2]
class DiscoAPIJob(TestJob):
@staticmethod
def map(e, params):
for i in range(3):
print(special)
yield e.split('_')
class DiscoAPITestCase(TestCase):
def runTest(self):
self.job = DiscoAPIJob().run(input=['raw://disco_api'])
self.assertResults(self.job, [('disco', 'api')])
self.assertEquals(len(list(filter(isspecial, self.job.events()))), 3)
|
# coding: utf-8
""
import sys
sys.path.append("..")
""
from jobtimize.scrapers.scrapmonster import MonsterScrap, scrapMonsterID, dicoFromJson
import pytest
""
class TestMonster:
searchList = ["Data Analyst"]
countryList = ["FR"]
monsterID = scrapMonsterID(searchList, countryList)
scraped = MonsterScrap(searchList, countryList)
dicojson = {
'country': str(),
'url': str(),
'description': str(),
'header': str(),
'city': str(),
'company': str(),
'type': str(),
'category': str(),
'posted': str()
}
if len(list(monsterID)) > 0:
dicojson = dicoFromJson((list(monsterID)[0], None))
def test_scrapID(self):
assert isinstance(self.monsterID, (set, list))
def test_dicojson(self):
assert len(self.dicojson) == 9
# def test_scraped_len(self):
# assert len(self.monsterID) == len(self.scraped) |
#!/usr/bin/env python
class Program:
@staticmethod
def debug(values, log=None):
values = [int(x) for x in values.split(",")]
variables = {}
off = 0
log.show(" Offset Val Op Code 'Description'")
while off < len(values):
off += Program.debug_line(log, values, off, variables=variables)
@staticmethod
def _to_string(value):
ret = ""
chars = "abcdefghijklmnopqrstuvwxyz"
while True:
ret += chars[int(value % 26)]
value = int(value / 26)
if value <= 0:
break
return ret[::-1]
@staticmethod
def debug_line(log, values, off, variables=None):
temp = Program([], 0)
if values[off] % 100 not in temp.ops:
log.show("{:7d} {:6d}".format(off, values[off]))
return 1
else:
_func, name, params, desc = temp.ops[values[off] % 100]
info = []
for i in range(params):
mode = (values[off] // [100, 1000, 10000][i]) % 10
if mode == 1:
info.append(str(values[off + 1 + i]))
elif mode == 0:
if variables is not None and values[off + 1 + i] >= len(values):
if values[off + 1 + i] not in variables:
variables[values[off + 1 + i]] = Program._to_string(len(variables))
info.append("[{}]".format(variables[values[off + 1 + i]]))
else:
info.append("[{}]".format(values[off + 1 + i]))
elif mode == 2:
info.append("[{}+rel]".format(values[off + 1 + i]))
desc = desc.format(*info)
log.show("{:7d} {:6d} {:14s} {:30s} {}".format(
off,
values[off],
name,
"'" + desc + "'",
",".join([str(values[x + off]) for x in range(params + 1)]),
))
return params + 1
def __init__(self, ticker, log, debug=False):
from collections import deque, defaultdict
self.debug_frames = False
self.frames = []
self.show_debug = debug
self.log = log
self.ticker = defaultdict(int, [(i, ticker[i]) for i in range(len(ticker))])
self.off = 0
self.ops = {
1: (self.op_add, "add", 3, "{} + {} -> {}"),
2: (self.op_mult, "mult", 3, "{} * {} -> {}"),
3: (self.op_input, "input", 1, "input -> {}"),
4: (self.op_output, "output", 1, "{} -> output"),
5: (self.op_jump_if_true, "jump_if", 2, "if {} != 0 then goto {}"),
6: (self.op_jump_if_false, "jump_not", 2, "if {} == 0 then goto {}"),
7: (self.op_less_than, "less_than", 3, "if {0} < {1} then 1 -> {2}, else 0 -> {2}"),
8: (self.op_equals, "equals", 3, "if {0} == {1} then 1 -> {2}, else 0 -> {2}"),
9: (self.op_relative, "set_relative", 1, "relative += {}"),
99: (self.op_terminate, "terminate", 0, "exit"),
}
self.input = deque()
self.output = deque()
self.last_output = None
self.source_output = None
self.flag_running = True
self.flag_input_dry = False
self.relative = 0
def save_frames(self):
self.debug_frames = True
def hook_up_output(self, source_output):
self.source_output = source_output
def add_to_input(self, value):
self.input.appendleft(value)
def tick_till_end(self):
while self.tick():
pass
def tick(self):
if self.flag_running:
self.input_dry = False
if self.show_debug:
Program.debug_line(self.log, self.ticker, self.off)
if self.debug_frames:
self.frames.append([self.off, self.ticker.copy()])
self.ops[self.ticker[self.off] % 100][0]()
if self.debug_frames:
if self.input_dry:
self.frames.pop(-1)
return self.flag_running == True and self.input_dry == False
def is_on_input(self):
return self.ticker[self.off] % 100 == 3
def is_on_output(self):
return self.ticker[self.off] % 100 == 4
def op_relative(self):
self.relative += self.get_value(1)
self.off += 2
def op_jump_if_true(self):
if self.get_value(1) != 0:
self.off = self.get_value(2)
else:
self.off += 3
def op_jump_if_false(self):
if self.get_value(1) == 0:
self.off = self.get_value(2)
else:
self.off += 3
def op_less_than(self):
if self.get_value(1) < self.get_value(2):
self.set_value(3, 1)
else:
self.set_value(3, 0)
self.off += 4
def op_equals(self):
if self.get_value(1) == self.get_value(2):
self.set_value(3, 1)
else:
self.set_value(3, 0)
self.off += 4
def op_input(self):
if self.source_output is not None:
while len(self.source_output.output) > 0:
self.input.appendleft(self.source_output.output.pop())
if len(self.input) > 0:
temp = self.input.pop()
if self.debug_frames:
self.frames.append(["input", temp])
self.set_value(1, temp)
self.off += 2
else:
self.input_dry = True
def op_output(self):
self.last_output = self.get_value(1)
if self.debug_frames:
self.frames.append(["output", self.last_output])
self.output.appendleft(self.last_output)
self.off += 2
def op_terminate(self):
self.off += 1
self.flag_running = False
def op_add(self):
self.set_value(3, self.get_value(1) + self.get_value(2))
self.off += 4
def op_mult(self):
self.set_value(3, self.get_value(1) * self.get_value(2))
self.off += 4
def get_value(self, index):
mode = (self.ticker[self.off] // [0, 100, 1000, 10000][index]) % 10
if mode == 2: # Relative
return self.ticker[self.ticker[self.off + index] + self.relative]
elif mode == 1: # Immediate
return self.ticker[self.off + index]
elif mode == 0: # Position
return self.ticker[self.ticker[self.off + index]]
else:
raise Exception("Invalid get mode: " + str(mode))
def set_value(self, index, value):
mode = (self.ticker[self.off] // [0, 100, 1000, 10000][index]) % 10
if mode == 2: # Relative
self.ticker[self.ticker[self.off + index]+self.relative] = value
elif mode == 0: # Position
self.ticker[self.ticker[self.off + index]] = value
else:
raise Exception("Invalid set mode: " + str(mode))
|
def dimensionalIterator(dimensions, maxItems=-1):
"""
Given a list of n positive integers, return a generator that yields
n-tuples of coordinates to 'fill' the dimensions. This is like an
odometer in a car, but the dimensions do not each have to be 10.
For example: dimensionalIterator((2, 3)) will yield in order
(0, 0), (0, 1), (0, 2), (1, 0), (1, 1), (1, 2). See the tests in
test_dimension.py for many more examples.
A dimension may also be given as '*', to provide a dimension that is
never exhausted. For example, dimensionalIterator(('*', 2)) yields the
infinite series (0, 0), (0, 1), (1, 0), (1, 1), (2, 0), (2, 1), ....
maxItems can be used to limit the number of tuples yielded.
"""
nDimensions = len(dimensions)
if nDimensions == 0 or maxItems == 0:
return
if any(map(lambda x: x != '*' and x <= 0, dimensions)):
raise ValueError('Dimensions not all positive! %r' % (dimensions,))
odometer = [0, ] * nDimensions
while maxItems != 0:
yield tuple(odometer)
maxItems -= 1
wheel = nDimensions - 1
while (dimensions[wheel] != '*' and
odometer[wheel] == dimensions[wheel] - 1 and
wheel >= 0):
odometer[wheel] = 0
wheel -= 1
if wheel < 0:
return
odometer[wheel] += 1
|
"""Support for Transports Metropolitans de Barcelona."""
|
# -*- coding: utf-8 -*-
from urlparse import urljoin
from flask import Blueprint, render_template, request, flash, abort, redirect, \
g, url_for, jsonify
from werkzeug.contrib.atom import AtomFeed
from blog.utils import requires_login, requires_admin, \
format_creole, request_wants_json
from blog.database import Category, Post, Comment, db_session
from datetime import datetime
mod = Blueprint('posts', __name__, url_prefix='/posts')
@mod.route('/')
def index():
return render_template('posts/index.html',
categories=Category.query.order_by(Category.name).all(),
recent=Post.query.order_by(Post.update_time.desc()).limit(5).all())
@mod.route('/new/', methods=['GET', 'POST'])
@requires_login
def new():
category_id = None
preview = None
if 'category' in request.args:
rv = Category.query.filter_by(slug=request.args['category']).first()
if rv is not None:
category_id = rv.id
if request.method == 'POST':
category_id = request.form.get('category', type=int)
if 'preview' in request.form:
preview = format_creole(request.form['body'])
else:
title = request.form['title']
body = request.form['body']
if not body:
flash(u'Error: you have to enter a post')
else:
category = Category.query.get(category_id)
if category is not None:
post = Post(g.user, title, body, category)
db_session.add(post)
db_session.commit()
flash(u'Your post was added')
return redirect(post.url)
return render_template('posts/new.html',
categories=Category.query.order_by(Category.name).all(),
active_category=category_id, preview=preview)
@mod.route('/list')
def list():
posts = Post.query.order_by(Post.update_time.desc()).limit(5).all()
return render_template('posts/list.html',posts=posts)
@mod.route('/<int:id>/', methods=['GET', 'POST'])
def show(id):
post = Post.query.get(id)
if post is None:
abort(404)
if request_wants_json():
return jsonify(post=post.to_json())
if request.method == 'POST':
title = request.form['title']
text = request.form['text']
if text:
db_session.add(Comment(post, g.user, title, text))
db_session.commit()
flash(u'Your comment was added')
return redirect(post.url)
else:
flash(u'Your comment was not added')
return render_template('posts/show.html', post=post)
@mod.route('/comments/<int:id>/', methods=['GET', 'POST'])
@requires_admin
def edit_comment(id):
comment = Comment.query.get(id)
if comment is None:
abort(404)
form = dict(title=comment.title, text=comment.text)
if request.method == 'POST':
if 'delete' in request.form:
db_session.delete(comment)
db_session.commit()
flash(u'Comment was deleted.')
return redirect(comment.post.url)
elif 'cancel' in request.form:
return redirect(comment.post.url)
form['title'] = request.form['title']
form['text'] = request.form['text']
if not form['text']:
flash(u'Error: comment text is required.')
else:
comment.title = form['title']
comment.text = form['text']
db_session.commit()
flash(u'Comment was updated.')
return redirect(comment.post.url)
return render_template('posts/edit_comment.html', form=form,
comment=comment)
@mod.route('/edit/<int:id>/', methods=['GET', 'POST'])
@requires_login
def edit(id):
post = Post.query.get(id)
if post is None:
abort(404)
if g.user is None or (not g.user.is_admin and post.author != g.user):
abort(401)
preview = None
form = dict(title=post.title, body=post.body,
category=post.category.id)
if request.method == 'POST':
form['title'] = request.form['title']
form['body'] = request.form['body']
form['category'] = request.form.get('category', type=int)
if 'preview' in request.form:
preview = format_creole(request.form['body'])
elif 'delete' in request.form:
for comment in post.comments:
db_session.delete(comment)
db_session.delete(post)
db_session.commit()
flash(u'Your post was deleted')
return redirect(url_for('posts.index'))
else:
category_id = request.form.get('category', type=int)
if not form['body']:
flash(u'Error: you have to enter a post')
else:
category = Category.query.get(category_id)
if category is not None:
post.title = form['title']
post.body = form['body']
post.category = category
post.update_time=datetime.utcnow()
db_session.commit()
flash(u'Your post was modified')
return redirect(post.url)
return render_template('posts/edit.html',
post=post, preview=preview, form=form,
categories=Category.query.order_by(Category.name).all())
@mod.route('/category/<slug>/')
def category(slug):
category = Category.query.filter_by(slug=slug).first()
if category is None:
abort(404)
posts = category.posts.order_by(Post.title).all()
if request_wants_json():
return jsonify(category=category.to_json(),
posts=[s.id for s in posts])
return render_template('posts/category.html', category=category,
posts=posts)
@mod.route('/manage-categories/', methods=['GET', 'POST'])
@requires_admin
def manage_categories():
categories = Category.query.order_by(Category.name).all()
if request.method == 'POST':
for category in categories:
category.name = request.form['name.%d' % category.id]
category.slug = request.form['slug.%d' % category.id]
db_session.commit()
flash(u'Categories updated')
return redirect(url_for('.manage_categories'))
return render_template('posts/manage_categories.html',
categories=categories)
@mod.route('/new-category/', methods=['POST'])
@requires_admin
def new_category():
category = Category(name=request.form['name'])
db_session.add(category)
db_session.commit()
flash(u'Category %s created.' % category.name)
return redirect(url_for('.manage_categories'))
@mod.route('/delete-category/<int:id>/', methods=['GET', 'POST'])
@requires_admin
def delete_category(id):
category = Category.query.get(id)
if category is None:
abort(404)
if request.method == 'POST':
if 'cancel' in request.form:
flash(u'Deletion was aborted')
return redirect(url_for('.manage_categories'))
move_to_id = request.form.get('move_to', type=int)
if move_to_id:
move_to = Category.query.get(move_to_id)
if move_to is None:
flash(u'Category was removed in the meantime')
else:
for post in category.posts.all():
post.category = move_to
db_session.delete(category)
flash(u'Category %s deleted and entries moved to %s.' %
(category.name, move_to.name))
else:
category.posts.delete()
db_session.delete(category)
flash(u'Category %s deleted' % category.name)
db_session.commit()
return redirect(url_for('.manage_categories'))
return render_template('posts/delete_category.html',
category=category,
other_categories=Category.query
.filter(Category.id != category.id).all())
@mod.route('/recent.atom')
def recent_feed():
feed = AtomFeed(u'Recent Flask posts',
subtitle=u'Recent additions to the Flask post archive',
feed_url=request.url, url=request.url_root)
posts = Post.query.order_by(Post.pub_date.desc()).limit(15)
for post in posts:
feed.add(post.title, unicode(post.rendered_body),
content_type='html', author=post.author.name,
url=urljoin(request.url_root, post.url),
updated=post.pub_date)
return feed.get_response()
@mod.route('/posts/<int:id>/comments.atom')
def comments_feed(id):
post = Post.query.get(id)
if post is None:
abort(404)
feed = AtomFeed(u'Comments for post “%s”' % post.title,
feed_url=request.url, url=request.url_root)
for comment in post.comments:
feed.add(comment.title or u'Untitled Comment',
unicode(comment.rendered_text),
content_type='html', author=comment.author.name,
url=request.url, updated=comment.pub_date)
return feed.get_response()
|
#!/usr/bin/env python3
from itertools import combinations
n = int(input())
s = input().split()
k = int(input())
total = 0
match = 0
for _ in combinations(s, k):
total += 1
if 'a' in _:
match += 1
print(match/total)
|
#!/usr/bin/env python
# encoding: utf-8
"""
@author: zhanghe
@software: PyCharm
@file: purchase.py
@time: 2018-08-31 15:41
"""
from __future__ import unicode_literals
from datetime import datetime
from flask import (
request,
flash,
render_template,
url_for,
redirect,
abort,
jsonify,
Blueprint,
g)
from flask_babel import gettext as _
from flask_login import login_required, current_user
from flask_weasyprint import render_pdf, HTML, CSS
from werkzeug import exceptions
from app_backend import (
app,
excel,
)
from app_backend.api.purchase import add_purchase, get_purchase_user_list_choices, get_purchase_rows, \
get_purchase_pagination, edit_purchase, get_purchase_row_by_id, audit_purchase, cancel_audit_purchase
from app_backend.api.purchase_items import add_purchase_items, edit_purchase_items, get_purchase_items_rows, \
delete_purchase_items
from app_backend.api.rack import get_rack_choices
from app_backend.api.supplier import get_supplier_row_by_id
from app_backend.api.supplier_contact import get_supplier_contact_row_by_id
from app_backend.api.user import get_user_choices, get_user_row_by_id
from app_backend.api.warehouse import get_warehouse_choices
from app_backend.forms.purchase import PurchaseAddForm
from app_backend.forms.purchase import PurchaseSearchForm, PurchaseEditForm, PurchaseItemsEditForm
from app_backend.models.model_bearing import Purchase
from app_backend.permissions.buyer_purchase import (
permission_purchase_section_export,
permission_purchase_section_del,
permission_purchase_section_audit, permission_purchase_section_search, permission_purchase_section_add,
permission_purchase_section_edit, permission_purchase_section_get, permission_purchase_section_print)
from app_backend.signals.purchase import signal_purchase_status_delete
from app_common.maps.default import DEFAULT_SEARCH_CHOICES_INT_OPTION
from app_common.maps.operations import OPERATION_EXPORT, OPERATION_DELETE
from app_common.maps.status_audit import STATUS_AUDIT_NO, STATUS_AUDIT_OK
from app_common.maps.status_delete import (
STATUS_DEL_OK,
STATUS_DEL_NO)
# 定义蓝图
from app_common.tools.date_time import time_utc_to_local
bp_purchase = Blueprint('purchase', __name__, url_prefix='/purchase')
# 加载配置
DOCUMENT_INFO = app.config.get('DOCUMENT_INFO', {})
PER_PAGE_BACKEND = app.config.get('PER_PAGE_BACKEND', 20)
AJAX_SUCCESS_MSG = app.config.get('AJAX_SUCCESS_MSG', {'result': True})
AJAX_FAILURE_MSG = app.config.get('AJAX_FAILURE_MSG', {'result': False})
@bp_purchase.route('/lists.html', methods=['GET', 'POST'])
@login_required
@permission_purchase_section_search.require(http_exception=403)
def lists():
template_name = 'purchase/lists.html'
# 文档信息
document_info = DOCUMENT_INFO.copy()
document_info['TITLE'] = _('purchase lists')
# 搜索条件
form = PurchaseSearchForm(request.form)
form.uid.choices = get_purchase_user_list_choices()
# app.logger.info('')
search_condition = [
Purchase.status_delete == STATUS_DEL_NO,
]
if request.method == 'POST':
# 表单校验失败
if not form.validate_on_submit():
flash(_('Search Failure'), 'danger')
# 单独处理csrf_token
if hasattr(form, 'csrf_token') and getattr(form, 'csrf_token').errors:
map(lambda x: flash(x, 'danger'), form.csrf_token.errors)
else:
if form.uid.data != DEFAULT_SEARCH_CHOICES_INT_OPTION:
search_condition.append(Purchase.uid == form.uid.data)
if form.supplier_cid.data and form.supplier_company_name.data:
search_condition.append(Purchase.supplier_cid == form.supplier_cid.data)
if form.start_create_time.data:
search_condition.append(Purchase.create_time >= form.start_create_time.data)
if form.end_create_time.data:
search_condition.append(Purchase.create_time <= form.end_create_time.data)
# 处理导出
if form.op.data == OPERATION_EXPORT:
# 检查导出权限
if not permission_purchase_section_export.can():
abort(403)
column_names = Purchase.__table__.columns.keys()
query_sets = get_purchase_rows(*search_condition)
return excel.make_response_from_query_sets(
query_sets=query_sets,
column_names=column_names,
file_type='csv',
file_name='%s.csv' % _('purchase lists')
)
# 批量删除
if form.op.data == OPERATION_DELETE:
# 检查删除权限
if not permission_purchase_section_del.can():
abort(403)
purchase_ids = request.form.getlist('purchase_id')
# 检查删除权限
permitted = True
for purchase_id in purchase_ids:
# TODO 资源删除权限验证
if False:
ext_msg = _('Permission Denied')
flash(_('Del Failure, %(ext_msg)s', ext_msg=ext_msg), 'danger')
permitted = False
break
if permitted:
result_total = True
for purchase_id in purchase_ids:
current_time = datetime.utcnow()
purchase_data = {
'status_delete': STATUS_DEL_OK,
'delete_time': current_time,
'update_time': current_time,
}
result = edit_purchase(purchase_id, purchase_data)
if result:
# 发送删除信号
signal_data = {
'purchase_id': purchase_id,
'status_delete': STATUS_DEL_OK,
'current_time': current_time,
}
signal_purchase_status_delete.send(app, **signal_data)
result_total = result_total and result
if result_total:
flash(_('Del Success'), 'success')
else:
flash(_('Del Failure'), 'danger')
# 翻页数据
pagination = get_purchase_pagination(form.page.data, PER_PAGE_BACKEND, *search_condition)
# 渲染模板
return render_template(
template_name,
form=form,
pagination=pagination,
**document_info
)
@bp_purchase.route('/add.html', methods=['GET', 'POST'])
@login_required
@permission_purchase_section_add.require(http_exception=403)
def add():
"""
采购进货
:return:
"""
# return jsonify({})
template_name = 'purchase/add.html'
# 文档信息
document_info = DOCUMENT_INFO.copy()
document_info['TITLE'] = _('purchase add')
# 加载创建表单
form = PurchaseAddForm(request.form)
form.uid.choices = get_user_choices()
form.uid.data = current_user.id
form.warehouse_id.choices = get_warehouse_choices(option_type='create')
# 内嵌表单货架选项
for item_form in form.purchase_items:
item_form.rack_id.choices = get_rack_choices(form.warehouse_id.data, option_type='create')
# 进入创建页面
if request.method == 'GET':
# 渲染页面
return render_template(
template_name,
form=form,
**document_info
)
# 处理创建请求
if request.method == 'POST':
# 修改仓库 - 不做校验
if form.warehouse_changed.data:
form.warehouse_changed.data = ''
return render_template(
template_name,
form=form,
**document_info
)
# 表单新增空行
if form.data_line_add.data is not None:
if form.purchase_items.max_entries and len(
form.purchase_items.entries) >= form.purchase_items.max_entries:
flash('最多创建%s条记录' % form.purchase_items.max_entries, 'danger')
else:
form.purchase_items.append_entry()
# 内嵌表单货架选项
for item_form in form.purchase_items:
item_form.rack_id.choices = get_rack_choices(form.warehouse_id.data, option_type='create')
return render_template(
template_name,
form=form,
**document_info
)
# 表单删除一行
if form.data_line_del.data is not None:
if form.purchase_items.min_entries and len(
form.purchase_items.entries) <= form.purchase_items.min_entries:
flash('最少保留%s条记录' % form.purchase_items.min_entries, 'danger')
else:
data_line_index = form.data_line_del.data
form.purchase_items.entries.pop(data_line_index)
return render_template(
template_name,
form=form,
**document_info
)
# 表单校验失败
if not form.validate_on_submit():
flash(_('Add Failure'), 'danger')
# flash(form.errors, 'danger')
return render_template(
template_name,
form=form,
**document_info
)
# 表单校验成功
# 创建采购进货
current_time = datetime.utcnow()
purchase_data = {
'uid': form.uid.data,
'supplier_cid': form.supplier_cid.data,
'supplier_contact_id': form.supplier_contact_id.data,
# 'type_purchase': form.type_purchase.data,
'warehouse_id': form.warehouse_id.data,
'create_time': current_time,
'update_time': current_time,
}
purchase_id = add_purchase(purchase_data)
amount_purchase = 0
for purchase_item in form.purchase_items.entries:
current_time = datetime.utcnow()
purchase_item_data = {
'purchase_id': purchase_id,
'uid': form.uid.data,
'supplier_cid': form.supplier_cid.data,
'supplier_company_name': get_supplier_row_by_id(form.supplier_cid.data).company_name,
'production_id': purchase_item.form.production_id.data,
'production_brand': purchase_item.form.production_brand.data,
'production_model': purchase_item.form.production_model.data,
'production_sku': purchase_item.form.production_sku.data,
'warehouse_id': form.warehouse_id.data,
'rack_id': purchase_item.form.rack_id.data,
'note': purchase_item.form.note.data,
'quantity': purchase_item.form.quantity.data,
'unit_price': purchase_item.form.unit_price.data,
'create_time': current_time,
'update_time': current_time,
}
# 新增
add_purchase_items(purchase_item_data)
amount_purchase += (purchase_item_data['quantity'] or 0) * (purchase_item_data['unit_price'] or 0)
# 更新报价
purchase_data = {
'amount_production': amount_purchase,
'amount_purchase': amount_purchase,
'update_time': current_time,
}
result = edit_purchase(purchase_id, purchase_data)
# todo 事务
# 明细保存
# 总表保存
# 创建操作成功
if result:
flash(_('Add Success'), 'success')
return redirect(request.args.get('next') or url_for('purchase.lists'))
# 创建操作失败
else:
flash(_('Add Failure'), 'danger')
return render_template(
template_name,
form=form,
**document_info
)
@bp_purchase.route('/<int:purchase_id>/edit.html', methods=['GET', 'POST'])
@login_required
@permission_purchase_section_edit.require(http_exception=403)
def edit(purchase_id):
"""
采购进货编辑
"""
# 检查编辑权限
# enquiry_item_edit_permission = EnquiryItemEditPermission(enquiry_id)
# if not enquiry_item_edit_permission.can():
# abort(403)
purchase_info = get_purchase_row_by_id(purchase_id)
# 检查资源是否存在
if not purchase_info:
abort(404)
# 检查资源是否删除
if purchase_info.status_delete == STATUS_DEL_OK:
abort(410)
# 检查资源是否核准
if purchase_info.status_audit == STATUS_AUDIT_OK:
resource = _('Purchase')
abort(exceptions.Locked.code,
_('The %(resource)s has been approved, it cannot be modified', resource=resource))
template_name = 'purchase/edit.html'
# 加载编辑表单
form = PurchaseEditForm(request.form)
form.uid.choices = get_user_choices()
form.warehouse_id.choices = get_warehouse_choices(option_type='update')
# 内嵌表单货架选项
for item_form in form.purchase_items:
item_form.rack_id.choices = get_rack_choices(form.warehouse_id.data, option_type='update')
# 文档信息
document_info = DOCUMENT_INFO.copy()
document_info['TITLE'] = _('purchase edit')
# 进入编辑页面
if request.method == 'GET':
# 获取明细
purchase_items = get_purchase_items_rows(purchase_id=purchase_id)
# 表单赋值
form.uid.data = purchase_info.uid
form.supplier_cid.data = purchase_info.supplier_cid
form.supplier_contact_id.data = purchase_info.supplier_contact_id
form.type_tax.data = purchase_info.type_tax
form.warehouse_id.data = purchase_info.warehouse_id
form.amount_purchase.data = purchase_info.amount_purchase
# form.buyer_order_items = buyer_order_items
while len(form.purchase_items) > 0:
form.purchase_items.pop_entry()
for purchase_item in purchase_items:
purchase_item_form = PurchaseItemsEditForm()
purchase_item_form.id = purchase_item.id
purchase_item_form.purchase_id = purchase_item.purchase_id
purchase_item_form.uid = purchase_item.uid
purchase_item_form.production_id = purchase_item.production_id
purchase_item_form.production_brand = purchase_item.production_brand
purchase_item_form.production_model = purchase_item.production_model
purchase_item_form.production_sku = purchase_item.production_sku
purchase_item_form.quantity = purchase_item.quantity
purchase_item_form.unit_price = purchase_item.unit_price
purchase_item_form.rack_id = purchase_item.rack_id
purchase_item_form.note = purchase_item.note
purchase_item_form.type_tax = purchase_item.type_tax
form.purchase_items.append_entry(purchase_item_form)
# 内嵌表单货架选项
for item_form in form.purchase_items:
item_form.rack_id.choices = get_rack_choices(form.warehouse_id.data, option_type='update')
# 渲染页面
return render_template(
template_name,
purchase_id=purchase_id,
form=form,
**document_info
)
# 处理编辑请求
if request.method == 'POST':
# 修改仓库 - 不做校验
if form.warehouse_changed.data:
form.warehouse_changed.data = ''
return render_template(
template_name,
form=form,
**document_info
)
# 增删数据行不需要校验表单
# 表单新增空行
if form.data_line_add.data is not None:
if form.purchase_items.max_entries and len(
form.purchase_items.entries) >= form.purchase_items.max_entries:
flash('最多创建%s条记录' % form.purchase_items.max_entries, 'danger')
else:
form.purchase_items.append_entry()
# 内嵌表单货架选项
for item_form in form.purchase_items:
item_form.rack_id.choices = get_rack_choices(form.warehouse_id.data, option_type='update')
return render_template(
template_name,
purchase_id=purchase_id,
form=form,
**document_info
)
# 表单删除一行
if form.data_line_del.data is not None:
if form.purchase_items.min_entries and len(
form.purchase_items.entries) <= form.purchase_items.min_entries:
flash('最少保留%s条记录' % form.purchase_items.min_entries, 'danger')
else:
data_line_index = form.data_line_del.data
form.purchase_items.entries.pop(data_line_index)
return render_template(
template_name,
purchase_id=purchase_id,
form=form,
**document_info
)
# 表单校验失败
if not form.validate_on_submit():
flash(_('Edit Failure'), 'danger')
# flash(form.errors, 'danger')
# flash(form.purchase_items.errors, 'danger')
return render_template(
template_name,
purchase_id=purchase_id,
form=form,
**document_info
)
# 表单校验成功
# 获取明细
purchase_items = get_purchase_items_rows(purchase_id=purchase_id)
purchase_items_ids = [item.id for item in purchase_items]
# 数据新增、数据删除、数据修改
purchase_items_ids_new = []
amount_purchase = 0
for purchase_item in form.purchase_items.entries:
# 错误
if purchase_item.form.id.data and purchase_item.form.id.data not in purchase_items_ids:
continue
purchase_item_data = {
'purchase_id': purchase_id,
'uid': form.uid.data,
'supplier_cid': form.supplier_cid.data,
'supplier_company_name': get_supplier_row_by_id(form.supplier_cid.data).company_name,
'production_id': purchase_item.form.production_id.data,
'production_brand': purchase_item.form.production_brand.data,
'production_model': purchase_item.form.production_model.data,
'production_sku': purchase_item.form.production_sku.data,
'quantity': purchase_item.form.quantity.data,
'unit_price': purchase_item.form.unit_price.data,
'warehouse_id': form.warehouse_id.data,
'rack_id': purchase_item.form.rack_id.data,
'note': purchase_item.form.note.data,
'type_tax': form.type_tax.data,
}
if not purchase_item.form.id.data:
# 新增
add_purchase_items(purchase_item_data)
amount_purchase += purchase_item_data['quantity'] * purchase_item_data['unit_price']
else:
# 修改
edit_purchase_items(purchase_item.form.id.data, purchase_item_data)
amount_purchase += purchase_item_data['quantity'] * purchase_item_data['unit_price']
purchase_items_ids_new.append(purchase_item.form.id.data)
# 删除
purchase_items_ids_del = list(set(purchase_items_ids) - set(purchase_items_ids_new))
for purchase_items_id in purchase_items_ids_del:
delete_purchase_items(purchase_items_id)
# 更新销售出货
current_time = datetime.utcnow()
purchase_data = {
'uid': form.uid.data,
'supplier_cid': form.supplier_cid.data,
'supplier_contact_id': form.supplier_contact_id.data,
'type_tax': form.type_tax.data,
'amount_production': amount_purchase,
'amount_purchase': amount_purchase,
'warehouse_id': form.warehouse_id.data,
'update_time': current_time,
}
result = edit_purchase(purchase_id, purchase_data)
# 编辑操作成功
if result:
flash(_('Edit Success'), 'success')
return redirect(request.args.get('next') or url_for('purchase.lists'))
# 编辑操作失败
else:
flash(_('Edit Failure'), 'danger')
return render_template(
template_name,
purchase_id=purchase_id,
form=form,
**document_info
)
@bp_purchase.route('/<int:purchase_id>/info.html')
@login_required
@permission_purchase_section_get.require(http_exception=403)
def info(purchase_id):
"""
出货详情
:param purchase_id:
:return:
"""
purchase_info = get_purchase_row_by_id(purchase_id)
# 检查资源是否存在
if not purchase_info:
abort(404)
# 检查资源是否删除
if purchase_info.status_delete == STATUS_DEL_OK:
abort(410)
purchase_print_date = time_utc_to_local(purchase_info.update_time).strftime('%Y-%m-%d')
purchase_code = '%s%s' % (g.ENQUIRIES_PREFIX, time_utc_to_local(purchase_info.create_time).strftime('%y%m%d%H%M%S'))
# 获取渠道公司信息
supplier_info = get_supplier_row_by_id(purchase_info.supplier_cid)
# 获取渠道联系方式
supplier_contact_info = get_supplier_contact_row_by_id(purchase_info.supplier_contact_id)
# 获取进货人员信息
user_info = get_user_row_by_id(purchase_info.uid)
purchase_items = get_purchase_items_rows(purchase_id=purchase_id)
# 文档信息
document_info = DOCUMENT_INFO.copy()
document_info['TITLE'] = _('purchase info')
template_name = 'purchase/info.html'
return render_template(
template_name,
purchase_id=purchase_id,
purchase_info=purchase_info,
supplier_info=supplier_info,
supplier_contact_info=supplier_contact_info,
user_info=user_info,
purchase_items=purchase_items,
purchase_print_date=purchase_print_date,
purchase_code=purchase_code,
**document_info
)
@bp_purchase.route('/<int:purchase_id>/preview.html')
@login_required
@permission_purchase_section_print.require(http_exception=403)
def preview(purchase_id):
"""
打印预览
:param purchase_id:
:return:
"""
purchase_info = get_purchase_row_by_id(purchase_id)
# 检查资源是否存在
if not purchase_info:
abort(404)
# 检查资源是否删除
if purchase_info.status_delete == STATUS_DEL_OK:
abort(410)
purchase_print_date = time_utc_to_local(purchase_info.update_time).strftime('%Y-%m-%d')
purchase_code = '%s%s' % (g.ENQUIRIES_PREFIX, time_utc_to_local(purchase_info.create_time).strftime('%y%m%d%H%M%S'))
# 获取渠道公司信息
supplier_info = get_supplier_row_by_id(purchase_info.supplier_cid)
# 获取渠道联系方式
supplier_contact_info = get_supplier_contact_row_by_id(purchase_info.supplier_contact_id)
# 获取进货人员信息
user_info = get_user_row_by_id(purchase_info.uid)
purchase_items = get_purchase_items_rows(purchase_id=purchase_id)
# 文档信息
document_info = DOCUMENT_INFO.copy()
document_info['TITLE'] = _('purchase preview')
template_name = 'purchase/preview.html'
return render_template(
template_name,
purchase_id=purchase_id,
purchase_info=purchase_info,
supplier_info=supplier_info,
supplier_contact_info=supplier_contact_info,
user_info=user_info,
purchase_items=purchase_items,
purchase_print_date=purchase_print_date,
purchase_code=purchase_code,
**document_info
)
@bp_purchase.route('/<int:purchase_id>.pdf')
@login_required
@permission_purchase_section_print.require(http_exception=403)
def pdf(purchase_id):
"""
文件下载
:param purchase_id:
:return:
"""
purchase_info = get_purchase_row_by_id(purchase_id)
# 检查资源是否存在
if not purchase_info:
abort(404)
# 检查资源是否删除
if purchase_info.status_delete == STATUS_DEL_OK:
abort(410)
purchase_print_date = time_utc_to_local(purchase_info.update_time).strftime('%Y-%m-%d')
purchase_code = '%s%s' % (g.ENQUIRIES_PREFIX, time_utc_to_local(purchase_info.create_time).strftime('%y%m%d%H%M%S'))
# 获取渠道公司信息
supplier_info = get_supplier_row_by_id(purchase_info.supplier_cid)
# 获取渠道联系方式
supplier_contact_info = get_supplier_contact_row_by_id(purchase_info.supplier_contact_id)
# 获取进货人员信息
user_info = get_user_row_by_id(purchase_info.uid)
purchase_items = get_purchase_items_rows(purchase_id=purchase_id)
# 文档信息
document_info = DOCUMENT_INFO.copy()
document_info['TITLE'] = _('purchase pdf')
template_name = 'purchase/pdf.html'
html = render_template(
template_name,
purchase_id=purchase_id,
purchase_info=purchase_info,
supplier_info=supplier_info,
supplier_contact_info=supplier_contact_info,
user_info=user_info,
purchase_items=purchase_items,
purchase_print_date=purchase_print_date,
purchase_code=purchase_code,
**document_info
)
# return html
return render_pdf(
html=HTML(string=html),
stylesheets=[CSS(string='@page {size:A4; margin:35px;}')],
download_filename='销售出货.pdf'.encode('utf-8')
)
@bp_purchase.route('/ajax/del', methods=['GET', 'POST'])
@login_required
def ajax_delete():
"""
采购进货删除
:return:
"""
ajax_success_msg = AJAX_SUCCESS_MSG.copy()
ajax_failure_msg = AJAX_FAILURE_MSG.copy()
# 检查删除权限
if not permission_purchase_section_del.can():
ext_msg = _('Permission Denied')
ajax_failure_msg['msg'] = _('Del Failure, %(ext_msg)s', ext_msg=ext_msg)
return jsonify(ajax_failure_msg)
# 检查请求方法
if not (request.method == 'GET' and request.is_xhr):
ext_msg = _('Method Not Allowed')
ajax_failure_msg['msg'] = _('Del Failure, %(ext_msg)s', ext_msg=ext_msg)
return jsonify(ajax_failure_msg)
# 检查请求参数
purchase_id = request.args.get('purchase_id', 0, type=int)
if not purchase_id:
ext_msg = _('ID does not exist')
ajax_failure_msg['msg'] = _('Del Failure, %(ext_msg)s', ext_msg=ext_msg)
return jsonify(ajax_failure_msg)
purchase_info = get_purchase_row_by_id(purchase_id)
# 检查资源是否存在
if not purchase_info:
ext_msg = _('ID does not exist')
ajax_failure_msg['msg'] = _('Del Failure, %(ext_msg)s', ext_msg=ext_msg)
return jsonify(ajax_failure_msg)
# 检查资源是否删除
if purchase_info.status_delete == STATUS_DEL_OK:
ext_msg = _('Already deleted')
ajax_failure_msg['msg'] = _('Del Failure, %(ext_msg)s', ext_msg=ext_msg)
return jsonify(ajax_success_msg)
current_time = datetime.utcnow()
purchase_data = {
'status_delete': STATUS_DEL_OK,
'delete_time': current_time,
'update_time': current_time,
}
result = edit_purchase(purchase_id, purchase_data)
if result:
# 发送删除信号
signal_data = {
'purchase_id': purchase_id,
'status_delete': STATUS_DEL_OK,
'current_time': current_time,
}
signal_purchase_status_delete.send(app, **signal_data)
ajax_success_msg['msg'] = _('Del Success')
return jsonify(ajax_success_msg)
else:
ajax_failure_msg['msg'] = _('Del Failure')
return jsonify(ajax_failure_msg)
@bp_purchase.route('/ajax/audit', methods=['GET', 'POST'])
@login_required
def ajax_audit():
"""
销售进货审核
:return:
"""
ajax_success_msg = AJAX_SUCCESS_MSG.copy()
ajax_failure_msg = AJAX_FAILURE_MSG.copy()
# 检查审核权限
if not permission_purchase_section_audit.can():
ext_msg = _('Permission Denied')
ajax_failure_msg['msg'] = _('Audit Failure, %(ext_msg)s', ext_msg=ext_msg)
return jsonify(ajax_failure_msg)
# 检查请求方法
if not (request.method == 'GET' and request.is_xhr):
ext_msg = _('Method Not Allowed')
ajax_failure_msg['msg'] = _('Audit Failure, %(ext_msg)s', ext_msg=ext_msg)
return jsonify(ajax_failure_msg)
# 检查请求参数
purchase_id = request.args.get('purchase_id', 0, type=int)
audit_status = request.args.get('audit_status', 0, type=int)
if not purchase_id:
ext_msg = _('ID does not exist')
ajax_failure_msg['msg'] = _('Audit Failure, %(ext_msg)s', ext_msg=ext_msg)
return jsonify(ajax_failure_msg)
if audit_status not in [STATUS_AUDIT_NO, STATUS_AUDIT_OK]:
ext_msg = _('Status not exist')
ajax_failure_msg['msg'] = _('Audit Failure, %(ext_msg)s', ext_msg=ext_msg)
return jsonify(ajax_failure_msg)
purchase_info = get_purchase_row_by_id(purchase_id)
# 检查资源是否存在
if not purchase_info:
ext_msg = _('ID does not exist')
ajax_failure_msg['msg'] = _('Audit Failure, %(ext_msg)s', ext_msg=ext_msg)
return jsonify(ajax_failure_msg)
# 检查资源是否删除
if purchase_info.status_delete == STATUS_DEL_OK:
ext_msg = _('Already deleted')
ajax_failure_msg['msg'] = _('Audit Failure, %(ext_msg)s', ext_msg=ext_msg)
return jsonify(ajax_failure_msg)
# 检查审核状态是否变化
if purchase_info.status_audit == audit_status:
ext_msg = _('Already audited')
ajax_failure_msg['msg'] = _('Audit Failure, %(ext_msg)s', ext_msg=ext_msg)
return jsonify(ajax_failure_msg)
try:
if audit_status == STATUS_AUDIT_OK:
result = audit_purchase(purchase_id)
else:
result = cancel_audit_purchase(purchase_id)
if result:
ajax_success_msg['msg'] = _('Audit Success')
return jsonify(ajax_success_msg)
else:
ajax_failure_msg['msg'] = _('Audit Failure')
return jsonify(ajax_failure_msg)
except Exception as e:
ajax_failure_msg['msg'] = e.message
return jsonify(ajax_failure_msg)
|
from Common.Predictors.AbstractPredictor import AbstractPredictor, abstractmethod
class AbstractSvrPredictor(AbstractPredictor):
@abstractmethod
def _setData(self):
pass
@abstractmethod
def _setForecast(self):
pass
@abstractmethod
def _setIndependent(self):
pass
@abstractmethod
def _setDependent(self):
pass
|
# -*- coding: utf-8 -*-
## @package inversetoon.geometry.ellipsoids
#
# Implementation of 2D ellipsoids.
# @author tody
# @date 2015/08/13
import math
import numpy as np
from numpy.linalg import eig, inv
## Ellipsoids
class Ellipsoids:
## Constructor
def __init__(self, points = []):
self._A = None
self._center = None
self._phi = None
self._dU = None
self._dV = None
self._axes = None
self._thetas = None
if len(points) > 0:
self.fit(points)
def fit(self, points):
self.fitParameters(points)
self.computeCenter()
self.computeRotation()
self.computeAxes()
self.computeThetas(points)
def fitParameters(self, points):
points = np.array(points)
x = points[:, 0]
y = points[:, 1]
D = np.array([x * x, x * y, y * y, x, y, np.ones_like(x)])
S = np.dot(D, D.T)
C = np.zeros((6, 6))
C[0, 2] = C[2, 0] = 2
C[1, 1] = -1
eigen_values, eigen_vectors = eig(np.dot(inv(S), C))
maxID = np.argmax(np.abs(eigen_values))
self._A = eigen_vectors[:, maxID]
def computeCenter(self):
A = self._A
a, b, c, d, f, g = A[0], A[1] / 2, A[2], A[3] / 2, A[4] / 2, A[5]
num = b * b - a * c
x0 = (c * d - b * f) / num
y0 = (a * f - b * d) / num
self._center = np.array([x0, y0])
def computeRotation(self):
A = self._A
a, b, c, d, f, g = A[0], A[1] / 2, A[2], A[3] / 2, A[4] / 2, A[5]
phi = 0.5 * np.arctan(2 * b / (a - c))
self._phi = phi
dU = np.array([np.cos(phi), np.sin(phi)])
dV = np.array([-np.sin(phi), np.cos(phi)])
self._dU = dU
self._dV = dV
def computeAxes(self):
A = self._A
a, b, c, d, f, g = A[0], A[1] / 2, A[2], A[3] / 2, A[4] / 2, A[5]
up = 2*(a*f*f+c*d*d+g*b*b-2*b*d*f-a*c*g)
down1=(b*b-a*c)*( (c-a)*np.sqrt(1+4*b*b/((a-c)*(a-c)))-(c+a))
down2=(b*b-a*c)*( (a-c)*np.sqrt(1+4*b*b/((a-c)*(a-c)))-(c+a))
res1 = res2 = up
res1 /= down1
res2 /= down2
res1=np.max([0.00001, res1])
res1=np.sqrt(res1)
res2=np.max([0.00001, res2])
res2=np.sqrt(res2)
self._axes = [res1, res2]
def computeTheta(self, p):
a, b = self._axes
phi = self._phi
c = self._center
dU = np.array([np.cos(phi), np.sin(phi)])
dV = np.array([-np.sin(phi), np.cos(phi)])
cp = p - c
u, v = [np.dot(cp, dU), np.dot(cp, dV)]
u /= a
v /= b
theta = np.arctan2(v, u)
return theta
def computeThetas(self, points):
self._thetas = [self.computeTheta(p) for p in points]
def pointAt(self, t):
a, b = self._axes
dU = self._dU
dV = self._dV
c = self._center
p = c + a * np.cos(t) * dU + b * np.sin(t) * dV
return p
def pointsAt(self, t):
a, b = self._axes
dU = self._dU
dV = self._dV
a_dU = a * dU
b_dV = b * dV
c = self._center
U = np.array([np.cos(t), np.cos(t)]).T
V = np.array([np.sin(t), np.sin(t)]).T
P = c + U * a_dU + V * b_dV
return P
def points(self):
return self.pointsAt(self._thetas)
def curvatureAt(self, t):
a, b = self._axes
u = np.cos(t)
v = np.sin(t)
up = a * b
down = b * b * u * u + a * a * v * v
down = math.pow(down, 1.5)
k = up / down
return k
def curvatures(self):
K = [self.curvatureAt(t) for t in self._thetas]
return K
def plotCenter(self, plt, color="g"):
plt.scatter(self._center[0], self._center[1], color=color)
def plotAxes(self, plt, color=[0.0, 0.2, 0.2]):
a, b = self._axes
dU = self._dU
dV = self._dV
a_dU = a * dU
b_dV = b * dV
c = self._center
a_axis = np.array([c - a_dU, c + a_dU])
b_axis = np.array([c - b_dV, c + b_dV])
plt.plot(a_axis[:,0], a_axis[:,1], "-", color=color)
plt.plot(b_axis[:,0], b_axis[:,1], "-", color=color)
def plotEllipsoids(self, plt, color="r"):
P = self.points()
plt.plot(P[:,0], P[:,1], "-", color=color)
def plotCurvatures(self, plt):
K = self.curvatures()
x = np.arange(len(K))
plt.plot(x, K, "-")
if __name__ == '__main__':
import matplotlib.pyplot as plt
from inversetoon.plot.window import showMaximize
a, b = 4.0, 3.0
c = 0.2 * np.random.rand(2)
t_min, t_max = [0.2 * np.pi, 1.2 * np.pi]
t = np.linspace(t_min, t_max, 100)
phi = 0.3 * np.pi
dU = np.array([np.cos(phi), np.sin(phi)])
dV = np.array([-np.sin(phi), np.cos(phi)])
U = np.array([np.cos(t), np.cos(t)]).T
V = np.array([np.sin(t), np.sin(t)]).T
points = c + a * U * dU + b * V * dV
points[:, 0] += 0.1 * np.random.rand(len(t))
points[:, 1] += 0.1 * np.random.rand(len(t))
ax = plt.subplot(121)
ax.set_aspect('1.0')
ax.scatter(points[:, 0], points[:, 1])
el = Ellipsoids(points)
el.plotCenter(ax)
el.plotAxes(ax)
el.plotEllipsoids(ax)
ax2 = plt.subplot(122)
el.plotCurvatures(ax2)
showMaximize() |
import asyncore
import logging
import socket
from io import BytesIO
from http.server import BaseHTTPRequestHandler
from http import HTTPStatus
import urllib.parse as urlparse
from urllib.parse import parse_qs
logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.DEBUG)
log = logging.getLogger(__name__)
class HttpServer(asyncore.dispatcher):
"""Receives connections and establishes handlers for each client.
"""
def __init__(self, address, handler):
asyncore.dispatcher.__init__(self)
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.bind(address)
self.handler = handler
self.address = self.socket.getsockname()
log.debug('binding to %s', self.address)
self.listen(5)
return
def handle_accept(self):
# Called when a client connects to our socket
client_info = self.accept()
log.debug('handle_accept() -> %s', client_info[1])
ProcessHandler(sock=client_info[0], address=client_info[1], handler=self.handler)
return
def handle_close(self):
log.debug('handle_close()')
self.close()
return
class HTTPRequest(BaseHTTPRequestHandler):
def __init__(self, request_text, client_address):
self.rfile = BytesIO(request_text)
self.wfile = BytesIO()
self.raw_requestline = self.rfile.readline()
self.error_code = self.error_message = None
self.client_address = client_address
self.parse_request()
mname = 'do_' + self.command
if not hasattr(self, mname):
self.send_error(
HTTPStatus.NOT_IMPLEMENTED,
"Unsupported method (%r)" % self.command)
return
method = getattr(self, mname)
parsed = urlparse.urlparse(self.path)
self.path = parsed.path
self.query_string = parse_qs(parsed.query)
try:
method()
except Exception as e:
self.send_error(500, "Internal server error")
log.error(e)
self.end_headers()
self.wfile.flush() # actually send the response if not already done.
def send_error(self, code, message=None, expliane=None):
self.send_response(code, message)
self.wfile.flush()
class ProcessHandler(asyncore.dispatcher):
"""Handles echoing messages from a single client.
"""
def __init__(self, sock, handler=None, address=None, chunk_size=9182):
self.chunk_size = chunk_size
self.handler = handler
asyncore.dispatcher.__init__(self, sock=sock)
self.client_address = address
self.data_to_write = []
return
def writable(self):
"""We want to write if we have received data."""
response = bool(self.data_to_write)
log.debug('writable() -> %s', response)
return response
def handle_write(self):
"""Write as much as possible of the most recent message we have received."""
data = self.data_to_write.pop()
sent = self.send(data[:self.chunk_size])
if sent < len(data):
remaining = data[sent:]
self.data_to_write.append(remaining)
log.debug('handle_write() -> (%d) "%s"', sent, data[:sent])
if not self.writable():
self.handle_close()
def handle_read(self):
"""Read an incoming message from the client and put it into our outgoing queue."""
data = self.recv(self.chunk_size)
log.debug('handle_read() -> (%d) "%s"', len(data), data)
if self.handler and data:
request = self.handler(data, self.client_address)
self.data_to_write.insert(0, request.wfile.getvalue())
else:
self.data_to_write.insert(0, '')
def handle_close(self):
log.debug('handle_close()')
self.close()
|
# coding: utf-8
"""
Automox Console API
API for use with the Automox Console # noqa: E501
OpenAPI spec version: 2021-08-10
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import automox_console_sdk
from automox_console_sdk.api.worklets_api import WorkletsApi # noqa: E501
from automox_console_sdk.rest import ApiException
class TestWorkletsApi(unittest.TestCase):
"""WorkletsApi unit test stubs"""
def setUp(self):
self.api = WorkletsApi() # noqa: E501
def tearDown(self):
pass
def test_get_community_worklet(self):
"""Test case for get_community_worklet
Retrieve Community Worklet by ID # noqa: E501
"""
pass
def test_get_community_worklets(self):
"""Test case for get_community_worklets
Retrieve Community Worklets # noqa: E501
"""
pass
if __name__ == '__main__':
unittest.main()
|
from typing import Type, List, Dict
from re import compile
from dataclasses import dataclass
from py2puml.domain.umlitem import UmlItem
from py2puml.domain.umlclass import UmlClass, UmlAttribute
from py2puml.domain.umlrelation import UmlRelation, RelType
from py2puml.parsing.parseclassconstructor import parse_class_constructor
from py2puml.utils import inspect_domain_definition
CONCRETE_TYPE_PATTERN = compile("^<(?:class|enum) '([\\.|\\w]+)'>$")
def get_type_name(type: Type, root_module_name: str):
if type.__module__.startswith(root_module_name):
return type.__name__
else:
return f'{type.__module__}.{type.__name__}'
def handle_inheritance_relation(
class_type: Type,
class_fqn: str,
root_module_name: str,
domain_relations: List[UmlRelation]
):
for base_type in getattr(class_type, '__bases__', ()):
base_type_fqn = f'{base_type.__module__}.{base_type.__name__}'
if base_type_fqn.startswith(root_module_name):
domain_relations.append(
UmlRelation(base_type_fqn, class_fqn, RelType.INHERITANCE)
)
def inspect_static_attributes(
class_type: Type,
class_type_fqn: str,
root_module_name: str,
domain_items_by_fqn: Dict[str, UmlItem],
domain_relations: List[UmlRelation]
) -> List[UmlAttribute]:
definition_attrs: List[UmlAttribute] = []
uml_class = UmlClass(
name=class_type.__name__,
fqn=class_type_fqn,
attributes=definition_attrs
)
domain_items_by_fqn[class_type_fqn] = uml_class
# inspect_domain_definition(class_type)
type_annotations = getattr(class_type, '__annotations__', None)
if type_annotations is not None:
for attr_name, attr_class in type_annotations.items():
attr_raw_type = str(attr_class)
concrete_type_match = CONCRETE_TYPE_PATTERN.search(attr_raw_type)
if concrete_type_match:
concrete_type = concrete_type_match.group(1)
if attr_class.__module__.startswith(root_module_name):
attr_type = attr_class.__name__
domain_relations.append(
UmlRelation(uml_class.fqn, f'{attr_class.__module__}.{attr_class.__name__}', RelType.COMPOSITION)
)
else:
attr_type = concrete_type
else:
composition_rel = getattr(attr_class, '_name', None)
component_classes = getattr(attr_class, '__args__', None)
if composition_rel and component_classes:
component_names = [
get_type_name(component_class, root_module_name)
for component_class in component_classes
# filters out forward refs
if getattr(component_class, '__name__', None) is not None
]
domain_relations.extend([
UmlRelation(uml_class.fqn, f'{component_class.__module__}.{component_class.__name__}', RelType.COMPOSITION)
for component_class in component_classes
if component_class.__module__.startswith(root_module_name)
])
attr_type = f"{composition_rel}[{', '.join(component_names)}]"
else:
attr_type = attr_raw_type
uml_attr = UmlAttribute(attr_name, attr_type, static=True)
definition_attrs.append(uml_attr)
return definition_attrs
def inspect_class_type(
class_type: Type,
class_type_fqn: str,
root_module_name: str,
domain_items_by_fqn: Dict[str, UmlItem],
domain_relations: List[UmlRelation]
):
attributes = inspect_static_attributes(
class_type, class_type_fqn, root_module_name,
domain_items_by_fqn, domain_relations
)
instance_attributes, compositions = parse_class_constructor(class_type, class_type_fqn, root_module_name)
attributes.extend(instance_attributes)
domain_relations.extend(compositions.values())
handle_inheritance_relation(class_type, class_type_fqn, root_module_name, domain_relations)
def inspect_dataclass_type(
class_type: Type[dataclass],
class_type_fqn: str,
root_module_name: str,
domain_items_by_fqn: Dict[str, UmlItem],
domain_relations: List[UmlRelation]
):
for attribute in inspect_static_attributes(
class_type,
class_type_fqn,
root_module_name,
domain_items_by_fqn,
domain_relations
):
attribute.static = False
handle_inheritance_relation(class_type, class_type_fqn, root_module_name, domain_relations) |
import zipfile
if __name__ == '__main__':
filename = 'data/alphamatting/input_lowres.zip'
print('Extracting {}...'.format(filename))
with zipfile.ZipFile(filename, 'r') as zip_ref:
zip_ref.extractall('data/alphamatting/')
filename = 'data/alphamatting/trimap_lowres.zip'
print('Extracting {}...'.format(filename))
with zipfile.ZipFile(filename, 'r') as zip_ref:
zip_ref.extractall('data/alphamatting/')
|
# Licensed to Elasticsearch B.V. under one or more contributor
# license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright
# ownership. Elasticsearch B.V. licenses this file to you under
# the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import collections
import logging
import os
import sys
import tabulate
import thespian.actors
from esrally import actor, config, doc_link, driver, exceptions, mechanic, metrics, reporter, track, PROGRAM_NAME
from esrally.utils import console, opts
# benchmarks with external candidates are really scary and we should warn users.
BOGUS_RESULTS_WARNING = """
************************************************************************
************** WARNING: A dark dungeon lies ahead of you **************
************************************************************************
Rally does not have control over the configuration of the benchmarked
Elasticsearch cluster.
Be aware that results may be misleading due to problems with the setup.
Rally is also not able to gather lots of metrics at all (like CPU usage
of the benchmarked cluster) or may even produce misleading metrics (like
the index size).
************************************************************************
****** Use this pipeline only if you are aware of the tradeoffs. ******
*************************** Watch your step! ***************************
************************************************************************
"""
pipelines = collections.OrderedDict()
class Pipeline:
"""
Describes a whole execution pipeline. A pipeline can consist of one or more steps. Each pipeline should contain roughly of the following
steps:
* Prepare the benchmark candidate: It can build Elasticsearch from sources, download a ZIP from somewhere etc.
* Launch the benchmark candidate: This can be done directly, with tools like Ansible or it can assume the candidate is already launched
* Run the benchmark
* Report results
"""
def __init__(self, name, description, target, stable=True):
"""
Creates a new pipeline.
:param name: A short name of the pipeline. This name will be used to reference it from the command line.
:param description: A human-readable description what the pipeline does.
:param target: A function that implements this pipeline
:param stable True iff the pipeline is considered production quality.
"""
self.name = name
self.description = description
self.target = target
self.stable = stable
pipelines[name] = self
def __call__(self, cfg):
self.target(cfg)
class Setup:
def __init__(self, cfg, sources=False, build=False, distribution=False, external=False, docker=False):
self.cfg = cfg
self.sources = sources
self.build = build
self.distribution = distribution
self.external = external
self.docker = docker
class Success:
pass
class BenchmarkActor(actor.RallyActor):
def __init__(self):
super().__init__()
self.cfg = None
self.race = None
self.metrics_store = None
self.race_store = None
self.cancelled = False
self.error = False
self.start_sender = None
self.mechanic = None
self.main_driver = None
self.track_revision = None
def receiveMsg_PoisonMessage(self, msg, sender):
self.logger.info("BenchmarkActor got notified of poison message [%s] (forwarding).", (str(msg)))
self.error = True
self.send(self.start_sender, msg)
def receiveUnrecognizedMessage(self, msg, sender):
self.logger.info("BenchmarkActor received unknown message [%s] (ignoring).", (str(msg)))
@actor.no_retry("race control")
def receiveMsg_Setup(self, msg, sender):
self.setup(msg, sender)
@actor.no_retry("race control")
def receiveMsg_EngineStarted(self, msg, sender):
self.logger.info("Mechanic has started engine successfully.")
self.metrics_store.meta_info = msg.system_meta_info
self.race.team_revision = msg.team_revision
self.main_driver = self.createActor(driver.DriverActor, targetActorRequirements={"coordinator": True})
self.logger.info("Telling driver to prepare for benchmarking.")
self.send(self.main_driver, driver.PrepareBenchmark(self.cfg, self.race.track, self.metrics_store.meta_info))
@actor.no_retry("race control")
def receiveMsg_PreparationComplete(self, msg, sender):
self.race.distribution_flavor = msg.distribution_flavor
self.race.distribution_version = msg.distribution_version
self.race.revision = msg.revision
if self.race.challenge.auto_generated:
console.info("Racing on track [{}] and car {} with version [{}].\n"
.format(self.race.track_name, self.race.car, self.race.distribution_version))
else:
console.info("Racing on track [{}], challenge [{}] and car {} with version [{}].\n"
.format(self.race.track_name, self.race.challenge_name, self.race.car, self.race.distribution_version))
self.run()
@actor.no_retry("race control")
def receiveMsg_TaskFinished(self, msg, sender):
self.logger.info("Task has finished.")
self.logger.info("Bulk adding request metrics to metrics store.")
self.metrics_store.bulk_add(msg.metrics)
# We choose *NOT* to reset our own metrics store's timer as this one is only used to collect complete metrics records from
# other stores (used by driver and mechanic). Hence there is no need to reset the timer in our own metrics store.
self.send(self.mechanic, mechanic.ResetRelativeTime(msg.next_task_scheduled_in))
@actor.no_retry("race control")
def receiveMsg_BenchmarkCancelled(self, msg, sender):
self.cancelled = True
# even notify the start sender if it is the originator. The reason is that we call #ask() which waits for a reply.
# We also need to ask in order to avoid races between this notification and the following ActorExitRequest.
self.send(self.start_sender, msg)
@actor.no_retry("race control")
def receiveMsg_BenchmarkFailure(self, msg, sender):
self.logger.info("Received a benchmark failure from [%s] and will forward it now.", sender)
self.error = True
self.send(self.start_sender, msg)
@actor.no_retry("race control")
def receiveMsg_BenchmarkComplete(self, msg, sender):
self.logger.info("Benchmark is complete.")
self.logger.info("Bulk adding request metrics to metrics store.")
self.metrics_store.bulk_add(msg.metrics)
self.send(self.main_driver, thespian.actors.ActorExitRequest())
self.main_driver = None
self.teardown()
@actor.no_retry("race control")
def receiveMsg_EngineStopped(self, msg, sender):
self.logger.info("Mechanic has stopped engine successfully.")
self.logger.info("Bulk adding system metrics to metrics store.")
self.metrics_store.bulk_add(msg.system_metrics)
self.metrics_store.flush()
if not self.cancelled and not self.error:
final_results = reporter.calculate_results(self.metrics_store, self.race)
self.race.add_results(final_results)
reporter.summarize(self.race, self.cfg)
self.race_store.store_race(self.race)
else:
self.logger.info("Suppressing output of summary report. Cancelled = [%r], Error = [%r].", self.cancelled, self.error)
self.metrics_store.close()
self.send(self.start_sender, Success())
def setup(self, msg, sender):
self.start_sender = sender
self.cfg = msg.cfg
# to load the track we need to know the correct cluster distribution version. Usually, this value should be set but there are rare
# cases (external pipeline and user did not specify the distribution version) where we need to derive it ourselves. For source
# builds we always assume "master"
if not msg.sources and not self.cfg.exists("mechanic", "distribution.version"):
distribution_version = mechanic.cluster_distribution_version(self.cfg)
if not distribution_version:
raise exceptions.SystemSetupError("A distribution version is required. Please specify it with --distribution-version.")
self.logger.info("Automatically derived distribution version [%s]", distribution_version)
self.cfg.add(config.Scope.benchmark, "mechanic", "distribution.version", distribution_version)
t = track.load_track(self.cfg)
self.track_revision = self.cfg.opts("track", "repository.revision", mandatory=False)
challenge_name = self.cfg.opts("track", "challenge.name")
challenge = t.find_challenge_or_default(challenge_name)
if challenge is None:
raise exceptions.SystemSetupError("Track [%s] does not provide challenge [%s]. List the available tracks with %s list tracks."
% (t.name, challenge_name, PROGRAM_NAME))
if challenge.user_info:
console.info(challenge.user_info)
self.race = metrics.create_race(self.cfg, t, challenge, self.track_revision)
self.metrics_store = metrics.metrics_store(
self.cfg,
track=self.race.track_name,
challenge=self.race.challenge_name,
read_only=False
)
self.race_store = metrics.race_store(self.cfg)
self.logger.info("Asking mechanic to start the engine.")
cluster_settings = challenge.cluster_settings
self.mechanic = self.createActor(mechanic.MechanicActor, targetActorRequirements={"coordinator": True})
self.send(self.mechanic, mechanic.StartEngine(self.cfg, self.metrics_store.open_context, cluster_settings, msg.sources, msg.build,
msg.distribution, msg.external, msg.docker))
def run(self):
self.logger.info("Telling driver to start benchmark.")
self.send(self.main_driver, driver.StartBenchmark())
def teardown(self):
self.logger.info("Asking mechanic to stop the engine.")
self.send(self.mechanic, mechanic.StopEngine())
def race(cfg, sources=False, build=False, distribution=False, external=False, docker=False):
logger = logging.getLogger(__name__)
# at this point an actor system has to run and we should only join
actor_system = actor.bootstrap_actor_system(try_join=True)
benchmark_actor = actor_system.createActor(BenchmarkActor, targetActorRequirements={"coordinator": True})
try:
result = actor_system.ask(benchmark_actor, Setup(cfg, sources, build, distribution, external, docker))
if isinstance(result, Success):
logger.info("Benchmark has finished successfully.")
# may happen if one of the load generators has detected that the user has cancelled the benchmark.
elif isinstance(result, actor.BenchmarkCancelled):
logger.info("User has cancelled the benchmark (detected by actor).")
elif isinstance(result, actor.BenchmarkFailure):
logger.error("A benchmark failure has occurred")
raise exceptions.RallyError(result.message, result.cause)
else:
raise exceptions.RallyError("Got an unexpected result during benchmarking: [%s]." % str(result))
except KeyboardInterrupt:
logger.info("User has cancelled the benchmark (detected by race control).")
# notify the coordinator so it can properly handle this state. Do it blocking so we don't have a race between this message
# and the actor exit request.
actor_system.ask(benchmark_actor, actor.BenchmarkCancelled())
finally:
logger.info("Telling benchmark actor to exit.")
actor_system.tell(benchmark_actor, thespian.actors.ActorExitRequest())
def set_default_hosts(cfg, host="127.0.0.1", port=9200):
logger = logging.getLogger(__name__)
configured_hosts = cfg.opts("client", "hosts")
if len(configured_hosts.default) != 0:
logger.info("Using configured hosts %s", configured_hosts.default)
else:
logger.info("Setting default host to [%s:%d]", host, port)
default_host_object = opts.TargetHosts("{}:{}".format(host,port))
cfg.add(config.Scope.benchmark, "client", "hosts", default_host_object)
# Poor man's curry
def from_sources_complete(cfg):
port = cfg.opts("provisioning", "node.http.port")
set_default_hosts(cfg, port=port)
return race(cfg, sources=True, build=True)
def from_sources_skip_build(cfg):
port = cfg.opts("provisioning", "node.http.port")
set_default_hosts(cfg, port=port)
return race(cfg, sources=True, build=False)
def from_distribution(cfg):
port = cfg.opts("provisioning", "node.http.port")
set_default_hosts(cfg, port=port)
return race(cfg, distribution=True)
def benchmark_only(cfg):
console.println(BOGUS_RESULTS_WARNING, flush=True)
set_default_hosts(cfg)
# We'll use a special car name for external benchmarks.
cfg.add(config.Scope.benchmark, "mechanic", "car.names", ["external"])
return race(cfg, external=True)
def docker(cfg):
set_default_hosts(cfg)
return race(cfg, docker=True)
Pipeline("from-sources-complete",
"Builds and provisions Elasticsearch, runs a benchmark and reports results.", from_sources_complete)
Pipeline("from-sources-skip-build",
"Provisions Elasticsearch (skips the build), runs a benchmark and reports results.", from_sources_skip_build)
Pipeline("from-distribution",
"Downloads an Elasticsearch distribution, provisions it, runs a benchmark and reports results.", from_distribution)
Pipeline("benchmark-only",
"Assumes an already running Elasticsearch instance, runs a benchmark and reports results", benchmark_only)
# Very experimental Docker pipeline. Should only be used with great care and is also not supported on all platforms.
Pipeline("docker",
"Runs a benchmark against the official Elasticsearch Docker container and reports results", docker, stable=False)
def available_pipelines():
return [[pipeline.name, pipeline.description] for pipeline in pipelines.values() if pipeline.stable]
def list_pipelines():
console.println("Available pipelines:\n")
console.println(tabulate.tabulate(available_pipelines(), headers=["Name", "Description"]))
def run(cfg):
logger = logging.getLogger(__name__)
name = cfg.opts("race", "pipeline")
if len(name) == 0:
# assume from-distribution pipeline if distribution.version has been specified and --pipeline cli arg not set
if cfg.exists("mechanic", "distribution.version"):
name = "from-distribution"
else:
name = "from-sources-complete"
logger.info("User specified no pipeline. Automatically derived pipeline [%s].", name)
cfg.add(config.Scope.applicationOverride, "race", "pipeline", name)
else:
logger.info("User specified pipeline [%s].", name)
if os.environ.get("RALLY_RUNNING_IN_DOCKER", "").upper() == "TRUE":
# in this case only benchmarking remote Elasticsearch clusters makes sense
if name != "benchmark-only":
raise exceptions.SystemSetupError(
"Only the [benchmark-only] pipeline is supported by the Rally Docker image.\n"
"Add --pipeline=benchmark-only in your Rally arguments and try again.\n"
"For more details read the docs for the benchmark-only pipeline in {}\n".format(
doc_link("pipelines.html#benchmark-only")))
try:
pipeline = pipelines[name]
except KeyError:
raise exceptions.SystemSetupError(
"Unknown pipeline [%s]. List the available pipelines with %s list pipelines." % (name, PROGRAM_NAME))
try:
pipeline(cfg)
except exceptions.RallyError as e:
# just pass on our own errors. It should be treated differently on top-level
raise e
except KeyboardInterrupt:
logger.info("User has cancelled the benchmark.")
except BaseException:
tb = sys.exc_info()[2]
raise exceptions.RallyError("This race ended with a fatal crash.").with_traceback(tb)
|
import sys
from optparse import OptionParser
from pprint import pprint
import yaml
import json
import wikipedia_ql
def run():
# TODO: Link to cheatsheet
parser = OptionParser('Usage: %prog [options] query')
parser.add_option("-p", "--page", dest="page",
help=r'''Wikipedia page name to query. If absent, query should have form `from "Page Name" { ... }`''')
parser.add_option("-o", "--output-format", dest="output_format", default='yaml',
help='One of "yaml" (default), "json" or "pprint" (python pretty-print).')
# TODO: cache folder
# TODO: --time
# TODO: force-cache-update
options, args = parser.parse_args()
if len(args) != 1:
parser.print_help()
exit()
query = args[0]
wikipedia = wikipedia_ql.media_wiki.Wikipedia(cache_folder='tmp/cache/')
if options.page:
result = wikipedia.query(query, page=options.page)
else:
result = wikipedia.query(query)
print()
if options.output_format == 'yaml':
print(yaml.safe_dump(result, allow_unicode=True, width=1000, default_style='>'))
elif options.output_format == 'json':
print(json.dumps(result, ensure_ascii=False))
else:
pprint(result)
|
# Generated by Django 3.0.3 on 2020-03-14 19:19
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('common', '0008_remove_submit_source'),
]
operations = [
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text', models.TextField()),
('source', models.CharField(max_length=255)),
('line', models.IntegerField()),
('created_at', models.DateTimeField(auto_now_add=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
('submit', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='common.Submit')),
],
),
]
|
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def isSubtree(self, s: TreeNode, t: TreeNode) -> bool:
def rec(s,t):
if s is None and t is None:
return True
if s is None or t is None:
return False
return s.val==t.val and rec(s.left,t.left) and rec(s.right,t.right)
return s is not None and (rec(s,t) or self.isSubtree(s.left,t) or self
.isSubtree(s.right,t))
|
"""Module defining UniprotData class."""
# python 2/3 compatibility
from __future__ import division, print_function, absolute_import
# global imports
from collections import Counter, namedtuple
import os.path
import re
import pandas
Cofactor = namedtuple('Cofactor', 'chebi name stoichiometry uniprot_note')
class UniprotData(object):
"""
Class parsing RBA-relevant Uniprot data.
Parameters
----------
data: pandas.DataFrame
raw uniprot data
"""
def __init__(self, input_dir):
"""
Build from input directory.
Parameters
----------
input_file: path to uniprot file.
"""
# open uniprot data
self.data = pandas.read_csv(os.path.join(input_dir, 'uniprot.csv'),
sep='\t')
self.data.set_index('Entry', inplace=True)
# create mapping from gene ids to uniprot ids
self._gene_to_entry = {}
self._gene_annotation_score = {}
gene_reader = re.compile(r'([^\s]+)')
annotation_reader = re.compile(r'[0-5]')
for entry, genes, annotation in zip(self.data.index, self.data['Gene names'], self.data['Annotation']):
# transform raw uniprot field into standardized list
if pandas.isnull(genes):
continue
gene_ids = set(g.upper() for g in gene_reader.findall(genes))
annotation_score = annotation_reader.findall(annotation)
for gene in gene_ids:
# test if the gene is already present in the list _gene_to_entry.keys()
if gene in self._gene_to_entry.keys():
# gene present. Test of the annotation score.
if int(annotation_score[0]) > self._gene_annotation_score[gene]:
# better annotation, keep the entry
self._gene_to_entry[gene] = entry
self._gene_annotation_score[gene] = int(annotation_score[0])
else:
# gene absent: insertion
self._gene_to_entry[gene] = entry
self._gene_annotation_score[gene] = int(annotation_score[0])
# create parsers
self._location_parser = LocationParser()
self._cofactor_parser = CofactorParser()
self._subunit_parser = SubunitParser()
def line(self, uniprot_id):
"""
Return data line corresponding to uniprot identifier.
Parameters
----------
uniprot_id : str
Uniprot identifier of a protein.
Returns
-------
pandas.Series
Data associated with protein.
"""
return self.data.loc[uniprot_id]
def find_location(self, uniprot_line):
"""
Parse location of protein.
Parameters
----------
uniprot_line : pandas.Series
Protein data.
Returns
-------
str
Standardized location of protein.
"""
return self._location_parser.parse(
uniprot_line['Subcellular location [CC]']
)
def find_cofactors(self, uniprot_line):
"""
Parse cofactors of protein.
Parameters
----------
uniprot_line : pandas.Series
Protein data.
Returns
-------
str
Standardized cofactors of protein.
"""
return self._cofactor_parser.parse(uniprot_line['Cofactor'])
def find_subunits(self, uniprot_line):
"""
Parse stoichiometry of protein.
Parameters
----------
uniprot_line : pandas.Series
Protein data.
Returns
-------
str
Standardized stoichiometry of protein.
"""
return self._subunit_parser.parse(
uniprot_line['Subunit structure [CC]']
)
def entry(self, gene):
"""
Find uniprot entries from gene identifiers.
Parameters
----------
gene_ids : list
Name of genes to retrieve.
Returns
-------
result : dict
Dictionary where keys are gene ids and values are
corresponding uniprot entries.
not_found : list
Gene ids that could not be retrieved.
"""
return self._gene_to_entry.get(gene.upper(), None)
def average_protein_composition(self):
"""
Compute average protein composition.
Returns
-------
dict
Dictionary where keys are amino acids (one letter format) and
values their average number in a protein.
"""
composition = Counter()
for sequence in self.data['Sequence']:
composition.update(sequence)
nb_proteins = len(self.data.index)
for aa in composition:
composition[aa] /= nb_proteins
return dict(composition)
class LocationParser(object):
"""Class parsing 'Subcellular location' field of uniprot."""
#_location_reader = re.compile(r'SUBCELLULAR LOCATION:\s([\w\s]+\w)')
_location_reader = re.compile(r'\s+([\w\s]+\w)')
def parse(self, field):
"""
Parse 'Subcellular location' field in uniprot.
Parameters
----------
field : str
Subcellular location field from uniprot.
Returns
-------
str
Compartment read.
"""
# Remove all fields such as {ECO:XX|Pubmed:ggg}
# location_remove_ECO = re.compile(r'\{(\w|:|\||-|,|\s)+\}(.|;|\s)');
# Remove all fields such as [Isoform 1]
location_remove_ISO = re.compile(r'\[.*\]:');
if pandas.isnull(field):
return None
try:
# split subcellular localisation
# take the second elements, 1st is ''
fieldSplit = re.split('SUBCELLULAR LOCATION:',field)
# now remove [XXXX]:
fieldWithoutIso = location_remove_ISO.sub("",fieldSplit[1])
return self._location_reader.match(fieldWithoutIso).group(1)
#return self._location_reader.match(field).group(1)
except AttributeError:
print(field)
raise
class SubunitParser(object):
"""
Class parsing 'Subunit' uniprot field.
Attributes
----------
prefix_rule : dict
Dictionary determining rule used to infer stoichiometry.
Keys are all caps prefixes preceding 'mer' in words found
in uniprot field,
values are stoichiometries associated with them. For example,
prefix_rule[MONO] = 1.
"""
prefix_rule = {'MONO': 1, 'HETERODI': 1, 'HOMODI': 2, 'HOMOTRI': 3,
'HOMOTETRA': 4, 'HOMOPENTA': 5, 'HOMOHEXA': 6,
'HEPTA': 7, 'HOMOOCTA': 8, 'HOMODECA': 10, 'HOMODODECA': 12}
_subunit_reader = re.compile(r'([a-zA-Z]+)mer[^a-z]')
def parse(self, field):
"""
Parse uniprot field.
Parameters
----------
field : str
field to parse.
Returns
-------
int
Stoichiometry parsed (None if field was ambiguous).
"""
# if field is empty, stoichiometry is one
if pandas.isnull(field):
return None
prefixes = self._subunit_reader.findall(field)
# if there is only one word of the form [prefix]mer,
if len(prefixes) == 1:
prefix = prefixes[0].upper()
# check for prefix in prefix rule
return self.prefix_rule.get(prefix, None)
else:
return None
class CofactorParser(object):
"""Class parsing Cofactor uniprot field."""
_name_reader = re.compile(r'Name=([^;]+); Xref=ChEBI:([^;]+);')
_note_reader = re.compile(r'Note=(.*)')
_stoichiometry_reader = re.compile(r'Binds ([\w]+)')
def parse(self, field):
"""
Parse uniprot field.
Parameters
----------
field : str
Uniprot field containing cofactor information.
Returns
-------
cofactors: list
Cofactor objects containing information retrieved,
where info was unambiguous.
cofactors_to_cure : list
Cofactor objects where some info was
ambiguous. If some information could not be retrieved,
its field is set to None
"""
if pandas.isnull(field):
return [], []
cofactor_notes = field.split('COFACTOR:')[1:]
cofactors = []
needs_curation = False
for note in cofactor_notes:
# read name(s) and chebi identifier(s) of cofactor(s)
# if no name was found, indicate chebi and name as missing
full_name = self._name_reader.findall(note)
if not full_name:
full_name.append([None, None])
# extract subnote if possible
subnotes = self._note_reader.findall(note)
subnote = subnotes[0] if len(subnotes) == 1 else note
# infer stoichiometry:
# - nothing read: stoichiometry is implicitly 1
# - one value read: use value if can be cast to integer, else
# tag as missing information.
# - multiple values read: tag as missing information.
stoichiometry = self._stoichiometry_reader.findall(note)
if not stoichiometry:
stoichiometry = 1
elif len(stoichiometry) == 1:
try:
stoichiometry = int(stoichiometry[0])
except ValueError:
stoichiometry = None
else:
stoichiometry = None
needs_curation = (
needs_curation or
(stoichiometry is None
or len(full_name) > 1
or full_name[0][0] is None)
)
# if there are several names, assume stoichiometry
# is number found earlier for first element of the list
# and 0 for the rest
for name, chebi in full_name:
cofactors.append(Cofactor(chebi, name, stoichiometry, subnote))
stoichiometry = 0
return cofactors, needs_curation
|
#
# This is the withhacks setuptools script.
# Originally developed by Ryan Kelly, 2009.
#
# This script is placed in the public domain.
#
from distutils.core import setup
import withhacks
VERSION = withhacks.__version__
NAME = "withhacks"
DESCRIPTION = "building blocks for with-statement-related hackery"
LONG_DESC = withhacks.__doc__
AUTHOR = "Ryan Kelly"
AUTHOR_EMAIL = "[email protected]"
URL = "http://github.com/rfk/withhacks"
LICENSE = "MIT"
KEYWORDS = "context manager with statement"
setup(name=NAME,
version=VERSION,
author=AUTHOR,
author_email=AUTHOR_EMAIL,
url=URL,
description=DESCRIPTION,
long_description=LONG_DESC,
license=LICENSE,
keywords=KEYWORDS,
packages=["withhacks","withhacks.tests"],
)
|
from django.db.transaction import atomic
from rest_framework import serializers
from products.models import Product, Category
class ProductSerializer(serializers.ModelSerializer):
class Meta:
model = Product
fields = ("id", "sku", "name", "description", "color", "size", "categories", "created_at", "modified_at")
class CategorySerializer(serializers.ModelSerializer):
class Meta:
model = Category
fields = ("id", "name", )
class ProductDetailedSerializer(serializers.ModelSerializer):
categories = CategorySerializer(many=True)
class Meta:
model = Product
fields = ("id", "sku", "name", "description", "color", "size", "categories", "created_at", "modified_at")
@atomic()
def create(self, validated_data):
categories = validated_data.pop("categories", None)
product = super().create(validated_data)
if categories:
serializer = CategorySerializer(data=categories, many=True)
serializer.is_valid(raise_exception=True)
serializer.save()
product.categories.add(*serializer.instance)
return product
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
This module contains a Google Cloud Text to Speech Hook.
"""
from typing import Union, Dict, Optional
from google.api_core.retry import Retry
from google.cloud.texttospeech_v1 import TextToSpeechClient
from google.cloud.texttospeech_v1.types import (
AudioConfig, SynthesisInput, VoiceSelectionParams, SynthesizeSpeechResponse
)
from airflow.contrib.hooks.gcp_api_base_hook import GoogleCloudBaseHook
class GCPTextToSpeechHook(GoogleCloudBaseHook):
"""
Hook for Google Cloud Text to Speech API.
All the methods in the hook where project_id is used must be called with
keyword arguments rather than positional.
:param gcp_conn_id: The connection ID to use when fetching connection info.
:type gcp_conn_id: str
:param delegate_to: The account to impersonate, if any.
For this to work, the service account making the request must have
domain-wide delegation enabled.
:type delegate_to: str
"""
def __init__(self, gcp_conn_id: str = "google_cloud_default", delegate_to: str = None) -> None:
super().__init__(gcp_conn_id, delegate_to)
self._client = None # type: Optional[TextToSpeechClient]
def get_conn(self) -> TextToSpeechClient:
"""
Retrieves connection to Cloud Text to Speech.
:return: Google Cloud Text to Speech client object.
:rtype: google.cloud.texttospeech_v1.TextToSpeechClient
"""
if not self._client:
self._client = TextToSpeechClient(
credentials=self._get_credentials(),
client_info=self.client_info
)
return self._client
def synthesize_speech(
self,
input_data: Union[Dict, SynthesisInput],
voice: Union[Dict, VoiceSelectionParams],
audio_config: Union[Dict, AudioConfig],
retry: Retry = None,
timeout: float = None
) -> SynthesizeSpeechResponse:
"""
Synthesizes text input
:param input_data: text input to be synthesized. See more:
https://googleapis.github.io/google-cloud-python/latest/texttospeech/gapic/v1/types.html#google.cloud.texttospeech_v1.types.SynthesisInput
:type input_data: dict or google.cloud.texttospeech_v1.types.SynthesisInput
:param voice: configuration of voice to be used in synthesis. See more:
https://googleapis.github.io/google-cloud-python/latest/texttospeech/gapic/v1/types.html#google.cloud.texttospeech_v1.types.VoiceSelectionParams
:type voice: dict or google.cloud.texttospeech_v1.types.VoiceSelectionParams
:param audio_config: configuration of the synthesized audio. See more:
https://googleapis.github.io/google-cloud-python/latest/texttospeech/gapic/v1/types.html#google.cloud.texttospeech_v1.types.AudioConfig
:type audio_config: dict or google.cloud.texttospeech_v1.types.AudioConfig
:return: SynthesizeSpeechResponse See more:
https://googleapis.github.io/google-cloud-python/latest/texttospeech/gapic/v1/types.html#google.cloud.texttospeech_v1.types.SynthesizeSpeechResponse
:rtype: object
:param retry: (Optional) A retry object used to retry requests. If None is specified,
requests will not be retried.
:type retry: google.api_core.retry.Retry
:param timeout: (Optional) The amount of time, in seconds, to wait for the request to complete.
Note that if retry is specified, the timeout applies to each individual attempt.
:type timeout: float
"""
client = self.get_conn()
self.log.info("Synthesizing input: %s", input_data)
return client.synthesize_speech(
input_=input_data, voice=voice, audio_config=audio_config, retry=retry, timeout=timeout
)
|
import os
from porter_stemmer import PorterStemmer
import os
class Parser:
STOP_WORDS_FILE = '%s/../data/english.stop' % os.path.dirname(os.path.realpath(__file__))
stemmer = None
stopwords = []
def __init__(self, stopwords_io_stream = None):
self.stemmer = PorterStemmer()
if(not stopwords_io_stream):
stopwords_io_stream = open(Parser.STOP_WORDS_FILE, 'r')
self.stopwords = stopwords_io_stream.read().split()
def tokenise_and_remove_stop_words(self, document_list):
if not document_list:
return []
vocabulary_string = " ".join(document_list)
tokenised_vocabulary_list = self._tokenise(vocabulary_string)
clean_word_list = self._remove_stop_words(tokenised_vocabulary_list)
return clean_word_list
def _remove_stop_words(self, list):
""" Remove common words which have no search value """
return [word for word in list if word not in self.stopwords ]
def _tokenise(self, string):
""" break string up into tokens and stem words """
string = self._clean(string)
words = string.split(" ")
return [self.stemmer.stem(word, 0, len(word)-1) for word in words]
def _clean(self, string):
""" remove any nasty grammar tokens from string """
string = string.replace(".","")
string = string.replace("\s+"," ")
string = string.lower()
return string
|
from microbit import *
# Please tag us if used!
# We'd love to see what you make:
# @ScienceOxford
'''
This tells us which of the micro:bit's pins is connected to which input on the motor driver (follow the coloured wires!).
e.g. FL means that it controls the pin that turns on the left-hand motor in the forward direction.
'''
FL = pin14
BL = pin13
FR = pin12
BR = pin15
'''
If the pin is set to HIGH (1023), the motor is turned off. The lower the number, the faster the motor goes.
Currently the motors are set to turn on at half speed (511), as this makes it easier to control.
'''
on = 511
off = 1023
'''
The following functions define the combination of pins to control direction.
'''
def stop(time):
display.clear()
FL.write_analog(off)
BL.write_analog(off)
FR.write_analog(off)
BR.write_analog(off)
sleep(time)
def forward(time):
display.show(Image.ARROW_N)
FL.write_analog(on)
BL.write_analog(off)
FR.write_analog(on)
BR.write_analog(off)
sleep(time)
def backward(time):
display.show(Image.ARROW_S)
FL.write_analog(off)
BL.write_analog(on)
FR.write_analog(off)
BR.write_analog(on)
sleep(time)
def left_turn(time):
display.show(Image.ARROW_W)
FL.write_analog(off)
BL.write_analog(on)
FR.write_analog(on)
BR.write_analog(off)
sleep(time)
def right_turn(time):
display.show(Image.ARROW_E)
FL.write_analog(on)
BL.write_analog(off)
FR.write_analog(off)
BR.write_analog(on)
sleep(time)
stop(1)
# WRITE YOUR CODE BELOW HERE!
|
# -*- coding: utf-8 -*-
# vim: set noai syntax=python ts=4 sw=4:
#
# Copyright (c) 2018-2022 Linh Pham
# api.wwdt.me is released under the terms of the Apache License 2.0
"""Testing /v2.0/locations routes
"""
from fastapi.testclient import TestClient
import pytest
from app.main import app
from app.config import API_VERSION
client = TestClient(app)
def test_locations():
"""Test /v2.0/locations route"""
response = client.get(f"/v{API_VERSION}/locations")
locations = response.json()
assert response.status_code == 200
assert "id" in locations["locations"][0]
assert "slug" in locations["locations"][0]
assert "venue" in locations["locations"][0]
assert "city" in locations["locations"][0]
assert "state" in locations["locations"][0]
@pytest.mark.parametrize("location_id", [32])
def test_locations_id(location_id: int):
"""Test /v2.0/locations/id/{location_id} route"""
response = client.get(f"/v{API_VERSION}/locations/id/{location_id}")
location = response.json()
assert response.status_code == 200
assert "id" in location
assert location["id"] == location_id
assert "slug" in location
assert "venue" in location
assert "city" in location
assert "state" in location
@pytest.mark.parametrize("location_slug", ["arlene-schnitzer-concert-hall-portland-or"])
def test_locations_slug(location_slug: str):
"""Test /v2.0/locations/slug/{location_slug} route"""
response = client.get(f"/v{API_VERSION}/locations/slug/{location_slug}")
location = response.json()
assert response.status_code == 200
assert "id" in location
assert "slug" in location
assert location["slug"] == location_slug
assert "venue" in location
assert "city" in location
assert "state" in location
def test_locations_recordings():
"""Test /v2.0/locations/recordings route"""
response = client.get(f"/v{API_VERSION}/locations/recordings")
locations = response.json()
assert response.status_code == 200
assert "locations" in locations
assert "id" in locations["locations"][0]
assert "slug" in locations["locations"][0]
assert "venue" in locations["locations"][0]
assert "city" in locations["locations"][0]
assert "state" in locations["locations"][0]
assert "recordings" in locations["locations"][0]
@pytest.mark.parametrize("location_id", [32])
def test_locations_recordings_id(location_id: int):
"""Test /v2.0/locations/recordings/id/{location_id} route"""
response = client.get(f"/v{API_VERSION}/locations/recordings/id/{location_id}")
location = response.json()
assert response.status_code == 200
assert "id" in location
assert location["id"] == location_id
assert "slug" in location
assert "venue" in location
assert "city" in location
assert "state" in location
assert "recordings" in location
@pytest.mark.parametrize("location_slug", ["arlene-schnitzer-concert-hall-portland-or"])
def test_locations_recordings_slug(location_slug: str):
"""Test /v2.0/locations/recordings/slug/{location_slug} route"""
response = client.get(f"/v{API_VERSION}/locations/recordings/slug/{location_slug}")
location = response.json()
assert response.status_code == 200
assert "id" in location
assert "slug" in location
assert location["slug"] == location_slug
assert "venue" in location
assert "city" in location
assert "state" in location
assert "recordings" in location
|
FIELDS = {
"profile": ["username","age","gender","preferred","address"],
#"profile": ["username","age","gender","preferred","address","height","width","avatar"],
"schedule": ["schedule_list"],
"DELETE": ["_id"],
"history": ["history_events","history_partner"],
"stats": ["rate","lasttime_login","credits"],
"message": ["unprocessed_message"]
}
# define the getAll function
def getData(request,res,db):
'''
Desc:
fetch all data about the user
Args:
request: request with different data
res: result that we need to update and return
Err:
2. invalid objectId
3. fail to get data
4. no match result
'''
# error handler for invalid objectid
data = {"_id":res["mid"]}
print data
# data = {"sid":{"$in":schedule_list}}
docs = db.getData(data)
# error handler for getting data
if docs["status"]:
res["err"] = docs
return res
# error handler for no match result
if docs["content"].count() == 0:
res["err"]["status"] = 1
res["err"]["msg"] = "no matches"
return res
#
# normal process
#
for doc in docs["content"]:
for i,key in enumerate(FIELDS["DELETE"]):
# remove all non-neccessary fields
# del doc[key]
doc[key] = str(doc[key])
if docs["content"].count() > 1:
res["rawdata"]["entries"] = []
res["rawdata"]["entries"].append(doc)
else:
res["rawdata"] = doc
return res
# define the filterData function
def filterData(request,res):
'''
Desc:
Filter data with field parameter
Args:
request : request object
res : result needs to return
'''
if res["field"] == None:
res["content"] = res["rawdata"]
return res
else:
for i,field in enumerate(FIELDS[res["field"]]):
res["content"][field] = res["rawdata"][field]
return res
|
import luigi
class ResData(luigi.ExternalTask):
"""Resistence data"""
__version__ = '0.1'
def output(self):
return luigi.LocalTarget('./data/raw/RES_essential_nonessential_2017-08-17_v5.csv')
class FcData(luigi.ExternalTask):
"""Flow Cytometry Data"""
__version__ = '0.1'
def output(self):
return luigi.LocalTarget('./data/raw/FC_essential_nonessential_2017-08-17_v5.csv')
class RS2CombData(luigi.ExternalTask):
"All data from Rule Set 2"
__version__ = '0.1'
def output(self):
return luigi.LocalTarget('./data/RS2_data/V3_data.csv')
class DoenchTestData(luigi.ExternalTask):
__version__ = '0.1'
def output(self):
return luigi.LocalTarget('./data/raw/SpCas9_test_guides.csv')
class AchillesTestData(luigi.ExternalTask):
__version__ = '0.1'
def output(self):
return luigi.LocalTarget('./data/raw/EWS502_BONE_essential.csv')
class AzimuthPredictions(luigi.Task):
__version__ = '0.1'
def output(self):
return luigi.LocalTarget('./data/raw/')
class ScorePredictions(luigi.Task):
__version__ = '0.1'
def output(self):
return luigi.LocalTarget('./data/raw/ddAUC/Gv2_all_scores.txt')
class OofRes(luigi.Task):
__version__ = '0.1'
def output(self):
return luigi.LocalTarget('./data/raw/RES_data_mutation_rates.csv')
class OofFc(luigi.Task):
__version__ = '0.1'
def output(self):
return luigi.LocalTarget('./data/raw/FC_OOF_activity.csv')
class OofGv2(luigi.Task):
__version__ = '0.1'
def output(self):
return luigi.LocalTarget('./data/raw/Gv2_oof_scores.csv')
class RS3Train(luigi.Task):
__version__ = 'ALL'
def output(self):
return luigi.LocalTarget('./data/raw/FC_RES_TRAIN_ALL_0-100.csv')
class Gv2Test(luigi.Task):
__version__ = '0.1'
def output(self):
return luigi.LocalTarget('./data/raw/Gv2_TEST_V1.csv')
|
def maiusculas(string):
uppercase_string = ''
for letra in string:
if letra.isupper():
uppercase_string += letra
return uppercase_string
maiusculas1 = ('Programamos em python 2?')
# deve devolver 'P'
maiusculas2 = ('Programamos em Python 3.')
# deve devolver 'PP'
maiusculas3 = ('PrOgRaMaMoS em python!')
# deve devolver 'PORMMS'
print(maiusculas(maiusculas1))
print(maiusculas(maiusculas2))
print(maiusculas(maiusculas3)) |
import FWCore.ParameterSet.Config as cms
from HeavyIonsAnalysis.JetAnalysis.jets.akPu4PFJetSequence_PbPb_mc_cff import *
#PU jets with 25 GeV threshold for subtraction
akPu4PFmatch25 = akPu4PFmatch.clone(src = cms.InputTag("akPu4PFJets25"))
akPu4PFparton25 = akPu4PFparton.clone(src = cms.InputTag("akPu4PFJets25"))
akPu4PFcorr25 = akPu4PFcorr.clone(src = cms.InputTag("akPu4PFJets25"))
akPu4PFpatJets25 = akPu4PFpatJets.clone(jetSource = cms.InputTag("akPu4PFJets25"),
jetCorrFactorsSource = cms.VInputTag(cms.InputTag("akPu4PFcorr25")),
genJetMatch = cms.InputTag("akPu4PFmatch25"),
genPartonMatch = cms.InputTag("akPu4PFparton25"),
)
akPu4PFJetAnalyzer25 = akPu4PFJetAnalyzer.clone(jetTag = cms.InputTag("akPu4PFpatJets25"), doSubEvent = cms.untracked.bool(True) )
akPu4PFJetSequence25 = cms.Sequence(akPu4PFmatch25
*
akPu4PFparton25
*
akPu4PFcorr25
*
akPu4PFpatJets25
*
akPu4PFJetAnalyzer25
)
|
# Generated by Django 3.2.6 on 2021-12-13 23:18
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('member', '0017_auto_20211213_1658'),
]
operations = [
migrations.AddField(
model_name='usersns',
name='token',
field=models.CharField(max_length=255, null=True, unique=True, verbose_name='토큰'),
),
]
|
# -*- coding: utf-8 -*-
# symbology across different markets and vendors
# bloomberg
bloomberg_symbology = lambda x: str(int(x.split('.')[0])) + ' ' + x.split('.')[1] + ' EQUITY'
# bca research
bca_symbology = lambda x: x.split(':')[0].zfill(4) + '.' + x.split(':')[1]
# hk local
local_hk_symbology = lambda x: str(x) if (str(x)[0]).isalpha() else str(x).zfill(4) + '.HK' |
'''
Module for units and arrays with units.
Also doctest other parts of this sub-module:
>>> import doctest
>>> doctest.testmod(config)
TestResults(failed=0, attempted=5)
>>> doctest.testmod(units)
TestResults(failed=0, attempted=14)
>>> doctest.testmod(cosmology)
TestResults(failed=0, attempted=0)
# >>> doctest.testmod(simulation)
# TestResults(failed=0, attempted=4)
# >>> doctest.testmod(snapshot)
# TestResults(failed=0, attempted=4)
# >>> doctest.testmod(winds)
# TestResults(failed=0, attempted=4)
>>> doctest.testmod(galaxy)
TestResults(failed=0, attempted=18)
'''
from . import utils
from . import config
from .astroconst import *
from . import units
from . import cosmology
from . import simulation
from . import snapshot
from . import winds
from . import galaxy
from . import simlog
from . import derivedtables
from . import progen
from . import accretion
|
import re
from PyQt4.QtCore import QDir, QPoint, QTimer, QUrl
from PyQt4.QtGui import QDesktopServices
from PyQt4.QtWebKit import QWebView, QWebPage
import markdown
class MikiView(QWebView):
def __init__(self, parent=None):
super(MikiView, self).__init__(parent)
self.parent = parent
self.settings().clearMemoryCaches()
self.notePath = parent.settings.notePath
self.settings().setUserStyleSheetUrl(
QUrl.fromLocalFile(self.parent.settings.cssfile))
self.page().setLinkDelegationPolicy(QWebPage.DelegateAllLinks)
self.page().linkClicked.connect(self.linkClicked)
self.page().linkHovered.connect(self.linkHovered)
self.page().mainFrame(
).contentsSizeChanged.connect(self.contentsSizeChanged)
self.scrollPosition = QPoint(0, 0)
def linkClicked(self, qurl):
'''three kinds of link:
external uri: http/https
page ref link:
toc anchor link: #
'''
name = qurl.toString()
http = re.compile('https?://')
if http.match(name): # external uri
QDesktopServices.openUrl(qurl)
return
self.load(qurl)
name = name.replace('file://', '')
name = name.replace(self.notePath, '').split('#')
item = self.parent.notesTree.pageToItem(name[0])
if not item or item == self.parent.notesTree.currentItem():
return
else:
self.parent.notesTree.setCurrentItem(item)
if len(name) > 1:
link = "file://" + self.notePath + "/#" + name[1]
self.load(QUrl(link))
viewFrame = self.page().mainFrame()
self.scrollPosition = viewFrame.scrollPosition()
def linkHovered(self, link, title, textContent):
'''show link in status bar
ref link shown as: /parent/child/pageName
toc link shown as: /parent/child/pageName#anchor (ToFix)
'''
# TODO: link to page by: /parent/child/pageName#anchor
if link == '': # not hovered
self.parent.statusBar.showMessage(self.parent.notesTree.currentPage())
else: # beautify link
link = link.replace('file://', '')
link = link.replace(self.notePath, '')
self.parent.statusBar.showMessage(link)
def contentsSizeChanged(self, newSize):
'''scroll notesView while editing (adding new lines)
Whithout this, every `updateView` will result in scroll to top.
'''
if self.scrollPosition == QPoint(0, 0):
return
viewFrame = self.page().mainFrame()
newY = self.scrollPosition.y(
) + newSize.height() - self.contentsSize.height()
self.scrollPosition.setY(newY)
viewFrame.setScrollPosition(self.scrollPosition)
def updateView(self):
# url_notebook = 'file://' + os.path.join(self.notePath, '/')
viewFrame = self.page().mainFrame()
# Store scrollPosition before update notesView
self.scrollPosition = viewFrame.scrollPosition()
self.contentsSize = viewFrame.contentsSize()
url_notebook = 'file://' + self.notePath + '/'
self.setHtml(self.parent.notesEdit.toHtml(), QUrl(url_notebook))
# Restore previous scrollPosition
viewFrame.setScrollPosition(self.scrollPosition)
def updateLiveView(self):
if self.parent.actions.get('split').isChecked():
QTimer.singleShot(1000, self.updateView)
|
"""
zoom.collect
"""
import io
import logging
import os
import zoom
from zoom.browse import browse
from zoom.buckets import Bucket
from zoom.context import context
from zoom.alerts import success, error, warning
from zoom.fields import ButtonField
from zoom.forms import form_for, delete_form
from zoom.helpers import link_to
from zoom.models import Model, Attachment
from zoom.store import EntityStore
from zoom.mvc import View, Controller
from zoom.utils import name_for, id_for, Record
from zoom.page import page
from zoom.tools import redirect_to, now
from zoom.logging import log_activity
from zoom.users import authorize
def shared_collection_policy(group):
"""Authourization policy for a shared collection
"""
def policy(item, user, action):
"""Policy rules for shared collection"""
def is_manager(user):
"""Return True if user is a member of the managing group
"""
return user.is_member(group)
actions = {
'create': is_manager,
'read': is_manager,
'update': is_manager,
'delete': is_manager,
}
if action not in actions:
raise Exception('action missing: {}'.format(action))
return actions.get(action)(user)
return policy
def locate(collection, key):
"""locate a record"""
def scan(store, key):
"""brute force scan"""
for rec in store:
if rec.key == key:
return rec
return (
key.isdigit() and
collection.store.get(key) or
collection.store.first(**{collection.key_name: key}) or
scan(collection.store, key)
)
def image_response(name, data):
"""provide an image response based on the file extension"""
_, ext = os.path.splitext(name.lower())
if ext == '.png':
return zoom.response.PNGResponse(data)
elif ext == '.jpg':
return zoom.response.JPGResponse(data)
elif ext == '.gif':
return zoom.response.GIFResponse(data)
class CollectionStore(object):
"""Decorate a Store
Provide additional features to a Store class
to make it work with collections.
"""
def __init__(self, store):
self.store = store
class CollectionModel(Model):
"""CollectionModel"""
@property
def link(self):
"""Return a link"""
return link_to(self.name, self.url)
@property
def url(self):
return self.collection_url + '/' + self.key
def allows(self, user, action):
"""Item level policy"""
return True
CollectionRecord = CollectionModel
# Typically these are the same thing but occassionally
# a Model is more than just a Record. So, we provide
# both names purely for readability so you can use whatever
# makes sense in your app.
class CollectionView(View):
"""View a collection"""
def __init__(self, collection):
View.__init__(self)
self.collection = collection
def index(self, q='', *args, **kwargs):
"""collection landing page"""
c = self.collection
user = c.user
if c.request.route[-1:] == ['index']:
return redirect_to('/'+'/'.join(c.request.route[:-1]), **kwargs)
actions = user.can('create', c) and [
('New', zoom.url_for(c.url, 'new'))
] or []
logger = logging.getLogger(__name__)
if q:
title = 'Selected ' + c.title
records = c.search(q)
else:
has_many_records = c.has_many_records
logger.debug('has many records: %r', has_many_records)
if has_many_records and not kwargs.get('all'):
title = 'Most Recently Updated ' + c.title
records = self._get_recent(15)
actions.append(('Show All', c.url + '?all=1'))
else:
title = c.title
records = c.store
authorized = (i for i in records if user.can('read', i))
filtered = c.filter and filter(c.filter, authorized) or authorized
items = sorted(filtered, key=c.order, reverse=c.is_reversed())
items = c.sorter and c.sorter(items) or items
num_items = len(items)
if num_items != 1:
footer_name = c.title.lower()
else:
footer_name = c.item_title.lower()
if q:
msg = '%s searched %s with %r (%d found)' % (
user.link, c.link, q, num_items
)
log_activity(msg)
footer = '{:,} {} found in search of {:,} {}'.format(
num_items,
footer_name,
len(c.store),
c.title.lower(),
)
else:
if has_many_records:
footer = '{:,} {} shown of {:,} {}'.format(
num_items,
footer_name,
len(c.store),
c.title.lower(),
)
else:
footer = '%s %s' % (len(items), footer_name)
content = browse(
[c.model(i) for i in items],
labels=c.get_labels(),
columns=c.get_columns(),
fields=c.fields,
footer=footer,
sortable=c.sortable,
)
return page(content, title=title, actions=actions, search=q)
def clear(self):
"""Clear the search"""
return redirect_to('/' + '/'.join(self.collection.request.route[:-1]))
def _get_recent(self, number):
c = self.collection
cmd = """
select row_id, max(value) as newest
from attributes
where kind = %s and attribute in ("created", "updated")
group by row_id
order by newest desc
limit %s
"""
ids = [id for id, _ in c.store.db(cmd, c.store.kind, number)]
return c.store.get(ids)
def new(self, *args, **kwargs):
"""Return a New Item form"""
c = self.collection
c.user.authorize('create', c)
if kwargs:
c.fields.validate(kwargs)
form = form_for(c.fields, ButtonField('Create', cancel=c.url))
return page(form, title='New '+c.item_title)
def show(self, key):
"""Show a record"""
def action_for(record, name):
return name, '/'.join([record.url, id_for(name)])
c = self.collection
user = c.user
record = locate(c, key)
if record:
user.authorize('read', record)
actions = []
if user.can('delete', record):
actions.append(action_for(record, 'Delete'))
if user.can('update', record):
actions.append(action_for(record, 'Edit'))
if user.can('create', record):
actions.append(action_for(record, 'New'))
c.fields.initialize(c.model(record))
if 'updated' in record and 'updated_by' in record:
memo = (
'<div class="meta" style="float:right">'
'updated %(when)s by %(who)s'
'</div>'
'<div style="clear:both"></div>'
) % dict(
when=zoom.helpers.when(record['updated']),
who=zoom.helpers.who(record['updated_by'])
)
else:
memo = ''
if c.verbose:
msg = '%s viewed %s %s' % (
user.link,
c.link,
record.link,
)
log_activity(msg)
return page(
c.fields.show() + memo,
title=c.item_title,
actions=actions
)
def image(self, key, name):
"""Respond with image field contents"""
record = locate(self.collection, key)
if record:
return zoom.response.PNGResponse(
record[name],
max_age=0,
)
def edit(self, key, **data):
"""Display an edit form for a record"""
c = self.collection
user = c.user
user.authorize('update', c)
record = locate(c, key)
if record:
user.authorize('read', record)
user.authorize('update', record)
c.fields.initialize(record)
c.fields.update(data)
form = form_for(c.fields, ButtonField('Save', cancel=record.url))
if c.verbose:
msg = '%s edited %s %s' % (
user.link,
c.link,
record.link,
)
log_activity(msg)
return page(form, title=c.item_title)
else:
return page('%s missing' % key)
def delete(self, key, confirm='yes'):
"""Show a delete form for a collection record"""
if confirm == 'yes':
record = locate(self.collection, key)
if record:
return page(delete_form(record.name))
def list_images(self, key=None, value=None):
"""return list of images for an ImagesField value for this record"""
attachments = zoom.store.store_of(Attachment)
t = [dict(
name=a.attachment_name,
size=a.attachment_size,
item_id=a.attachment_id,
url=zoom.helpers.url_for('get-image', item_id=a.attachment_id),
) for a in attachments.find(field_value=value)]
return zoom.jsonz.dumps(t)
def get_image(self, *a, **k): # pylint: disable=W0613
"""return one of the images from an ImagesField value"""
item_id = k.get('item_id', None)
path = os.path.join(zoom.system.site.data_path, 'buckets')
bucket = Bucket(path)
return image_response('house.png', bucket.get(item_id))
class CollectionController(Controller):
"""Perform operations on a Collection"""
def __init__(self, collection):
Controller.__init__(self)
self.collection = collection
def create_button(self, *args, **data):
"""Create a record"""
collection = self.collection
user = collection.user
logger = logging.getLogger(__name__)
if collection.fields.validate(data):
record = collection.model(
collection.fields,
)
record.pop('key', None)
try:
key = record.key
except AttributeError:
key = None
if key and locate(collection, record.key) is not None:
error('That {} already exists'.format(collection.item_name))
else:
try:
# convert property to data attribute
# so it gets stored as data
record.key = record.key
except AttributeError:
# can happen when key depends on database
# auto-increment value.
pass
record.update(dict(
created=now(),
updated=now(),
owner_id=user._id,
created_by=user._id,
updated_by=user._id,
))
self.before_insert(record)
collection.store.put(record)
collection.search_engine(collection).add(
record._id,
collection.fields.as_searchable(),
)
self.after_insert(record)
msg = '%s added %s %s' % (
user.link,
collection.link,
record.link
)
logger.info(msg)
log_activity(msg)
return redirect_to(collection.url)
else:
logger.debug('field validation failed: %r', collection.fields)
def save_button(self, key, *a, **data):
"""Save a record"""
collection = self.collection
user = collection.user
user.authorize('update', collection)
if collection.fields.validate(data):
record = locate(collection, key)
if record:
user.authorize('update', record)
record.update(collection.fields)
record.pop('key', None)
if record.key != key and locate(collection, record.key):
# record key should always be a str, even if the actual
# record.id is being used as the key.
error('That {} already exists'.format(collection.item_name))
else:
record.updated = now()
record.updated_by = user._id
# convert property to data attribute
# so it gets stored as data
record.key = record.key
self.before_update(record)
collection.store.put(record)
collection.search_engine(collection).update(
record._id,
collection.fields.as_searchable(),
)
self.after_update(record)
msg = '%s updated %s %s' % (
user.link,
collection.link,
record.link
)
logger = logging.getLogger(__name__)
logger.info(msg)
log_activity(msg)
if record.key != key:
log_activity(
'%s changed %s %s to %s' % (
user.link,
collection.link,
key,
record.key
)
)
return redirect_to(record.url)
def delete(self, key, confirm='yes'):
"""Delete a record"""
c = self.collection
c.user.authorize('delete', c)
if confirm == 'no':
record = locate(c, key)
if record:
c.user.authorize('delete', record)
self.before_delete(record)
c.store.delete(record)
c.search_engine(c).delete(
record._id,
)
self.after_delete(record)
msg = '%s deleted %s %s' % (
c.user.link,
c.link,
record.name
)
logger = logging.getLogger(__name__)
logger.info(msg)
log_activity(msg)
return redirect_to(c.url)
def delete_image(self, key, name):
"""Delete an image field"""
record = locate(self.collection, key)
if record:
record[name] = None
record.save()
return redirect_to(zoom.helpers.url_for(record.url, 'edit'))
def add_image(self, *_, **kwargs):
"""accept uploaded images and attach them to the record"""
dummy = Record(
filename='dummy.png',
file=io.StringIO('test'),
)
# put the uploaded image data in a bucket
path = os.path.join(zoom.system.site.data_path, 'buckets')
bucket = Bucket(path)
f = kwargs.get('file', dummy)
name = f.filename
data = f.file.read()
item_id = bucket.put(data)
# create an attachment record for this bucket
c = self.collection
field_name = kwargs.get('field_name', 'unknown')
field_value = kwargs.get('field_value', 'unknown')
attachment = Attachment(
record_kind=c.store.kind,
field_name=field_name,
field_value=field_value,
attachment_id=item_id,
attachment_size=len(data),
attachment_name=name,
)
attachments = zoom.store.store_of(Attachment)
attachments.put(attachment)
return item_id
def remove_image(self, *_, **kwargs):
"""remove a dropzone image"""
# k contains item_id and filename for file to be removed
item_id = kwargs.get('id', None)
# detach the image from the record
if item_id:
attachments = zoom.store.store_of(Attachment)
key = attachments.first(attachment_id=item_id)
if key:
attachments.delete(key)
# delete the bucket
path = os.path.join(zoom.system.site.data_path, 'buckets')
bucket = Bucket(path)
if item_id in bucket.keys():
bucket.delete(item_id)
return 'ok'
return 'empty'
def before_update(self, record):
"""Things to do before updating a record"""
pass
def after_update(self, record):
"""Things to do after updating a record"""
pass
def before_insert(self, record):
"""Things to do before inserting a record"""
pass
def after_insert(self, record):
"""Things to do after inserting a record"""
pass
def before_delete(self, record):
"""Things to do before deleting a record"""
pass
def after_delete(self, record):
"""Things to do after deleting a record"""
pass
@authorize('administrators')
def reindex(self):
"""Reindex the collection"""
self.collection.search_engine(self.collection).reindex()
success('reindexing completed')
return page('complete!', title='Reindex')
class RawSearch(object):
"""Raw Data Search"""
def __init__(self, collection):
self.collection = collection
logger = logging.getLogger(__name__)
logger.debug(
'starting %s for %s collection',
self.__class__.__name__,
self.collection.name
)
def search(self, text):
"""Return records that match raw search text"""
def matches(item, terms):
"""match a search by field values"""
v = ';'.join([
str(value).lower()
for key, value in item.items()
if not key.startswith('_')
])
return terms and not any(t not in v for t in terms)
terms = text and [t.lower() for t in text.split()]
return [
record for record in self.collection.store
if matches(record, terms)
]
def add(self, key, values):
"""Add record values to index"""
pass
def update(self, key, values):
"""Update indexed record values"""
pass
def delete(self, key):
"""Delete indexed record values"""
pass
def reindex(self): # pragma: no cover
zoom.alerts.warning('%s does not use indexing' % self.__class__.__name__)
class BasicSearch(RawSearch):
"""Provides basic unindexed field aware search capability"""
def search(self, text):
"""Return records that match search text"""
def matches(item, terms):
"""match a search by field values"""
fields.initialize(item)
v = ';'.join(
map(str, fields.as_searchable())
).lower()
return terms and not any(t not in v for t in terms)
fields = self.collection.fields
terms = text and [t.lower() for t in text.split()]
return [
record for record in self.collection.store
if matches(record, terms)
]
def as_tokens(values, max_len=20):
"""Return values as a set of tokens
>>> sorted(list(as_tokens(['this is a test', 'other', 'tokentoolongtobecapturedasis'])))
['a', 'is', 'other', 'test', 'this', 'tokentoolongtobecapt']
"""
tokens = set([
t[:max_len] for v in values
for t in v.lower().split()
])
return tokens
class IndexedCollectionSearch(object):
"""Provides token index for fast lookups
We only provide enough room for tokens up to length 20 only because
we have to draw the line somewhere. This may result in some records
not being found if the search would have mached on characters beyond
the position 20.
"""
max_token_len = 20
def __init__(self, collection):
logger = logging.getLogger(__name__)
self.collection = collection
self.db = collection.store.db
self.kind = self.collection.store.kind
logger.debug(
'starting IndexedCollectionSearch for %s collection',
self.collection.name
)
if 'tokens' not in self.db.get_tables():
self.db("""
create table if not exists tokens (
kind varchar(100),
row_id int unsigned not null,
token char({})
)
""".format(self.max_token_len))
def reindex(self):
"""Rebuild the collection index
This method indexes a few records at a time, in batches. It
can be very slow so should be done only by admins or as part
of a maintenance cycle in the background. Once the table is
indexed this routine should not be needed. It's mainly provided
to index an already existing table or to replace a damaged
index.
"""
collection = self.collection
fields = collection.fields
logger = logging.getLogger(__name__)
count = 0
tick = 100
total = len(collection.store)
block = []
cmd = 'insert into tokens values ({!r}, %s, %s)'.format(
self.kind
)
msg = 'indexed %s of %s records (%0.4s%%)'
self.zap()
for record in collection.store:
if not count % tick:
if block:
self.db.execute_many(cmd, block)
block.clear()
logger.debug(msg, count, total, 100.0*count/total)
count += 1
fields.initialize(collection.model(record))
values = as_tokens(fields.as_searchable(), self.max_token_len)
block.extend(zip([record._id] * len(values), values))
if block:
self.db.execute_many(cmd, block)
block.clear()
logger.debug(msg, count, total, 100.0*count/total)
def add(self, key, values):
"""Add record values to index"""
tokens = [(t,) for t in as_tokens(values, self.max_token_len)]
cmd = 'insert into tokens values ({!r}, {!r}, %s)'.format(
self.kind, key
)
self.db.execute_many(cmd, tokens)
def delete(self, key):
"""Delete indexed record values"""
self.db(
'delete from tokens where kind=%s and row_id=%s',
self.kind,
key
)
def update(self, key, values):
"""Update indexed record values"""
self.delete(key)
self.add(key, values)
def zap(self):
"""Delete values for all records"""
self.db('delete from tokens where kind=%s', self.kind)
def search(self, text):
"""Return records that match search text"""
terms = text and [t.lower()[:self.max_token_len] for t in text.split()]
cmd = 'select distinct row_id from tokens where kind=%s and token like %s'
result = []
for term in sorted(terms, key=len, reverse=True):
target = '%{}%'.format(term)
result.append(
set(i for i, in self.db(cmd, self.kind, target))
)
keys = list(set.intersection(*result))
return self.collection.store.get(keys)
class Collection(object):
"""A collection of Records"""
controller = CollectionController
view = CollectionView
store_type = EntityStore
store = None
url = None
allows = shared_collection_policy('managers')
verbose = True
sorter = None
@property
def fields(self):
"""a fields callable may have data intensive operations, delay execution until it is needed"""
if callable(self.__fields):
self.__fields = self.__fields()
return self.__fields
def set_fields(self, fields):
"""Set the fields to a new value
This can be used for switching fields on the fly, for example
when your collection contains entities that are similar enough
to be included in the same collection, but that require their
own fields.
"""
self.__fields = fields
def __init__(self, fields, **kwargs):
def name_from(fields):
"""make a name from the field function provided"""
def rtrim(text, suffix):
if text.endswith(suffix):
return text[:-len(suffix)]
return text
return name_for(
rtrim(rtrim(fields.__name__, '_fields'), '_form')
)
def calc_url():
"""calculate default collection URL"""
return '/' + '/'.join(context.request.route[:2])
get = kwargs.pop
self.__fields = fields
self.item_name = get('item_name', None) or name_from(fields)
self.name = get('name', self.item_name + 's')
self.title = self.name.title().replace('_',' ')
self.item_title = self.item_name.title().replace('_',' ')
self.filter = get('filter', None)
self.columns = get('columns', None)
self.labels = get('labels', None)
self.model = get('model', None)
self.store = get('store', None)
self.url = get('url', calc_url())
self.controller = get('controller', self.controller)
self.view = get('view', self.view)
self.link = link_to(self.name, self.url)
self.key_name = get('key_name', 'key')
self.user = None
self.request = None
self.route = None
self.search_engine = get('search_engine', BasicSearch)
self.many_records = 50
self.sorter = get('sorter', None)
self.sortable = get('sortable', False)
if 'policy' in kwargs:
self.allows = get('policy')
def search(self, text):
"""Seach the collection for records matching text"""
return self.search_engine(self).search(text)
@property
def has_many_records(self):
return len(self.store) >= self.many_records
def order(self, item):
"""Returns the sort key"""
return item.name.lower()
def is_reversed(self):
"""Sort ASC or DESC"""
return False
def locate(self, key):
"""locate a record"""
return locate(self, key)
def get_columns(self):
"""Return the collection columns."""
if self.columns:
return self.columns
return ['link'] + [f.name for f in self.fields.as_list()[1:]]
def get_labels(self):
"""Return the collection labels."""
if self.labels:
return self.labels
lookup = {f.name: f.label for f in self.fields.as_dict().values()}
lookup.update(dict(
link=self.fields.as_list()[0].label,
))
labels = [lookup.get(name, name.capitalize()) for name in self.get_columns()]
return labels
def handle(self, route, request):
"""handle a request"""
self.user = request.user
self.request = request
self.route = route
logger = logging.getLogger(__name__)
logger.debug('Collection handler called')
if self.store is None:
if self.model is None:
self.model = CollectionModel
self.model.collection_url = self.url
self.store = EntityStore(
request.site.db,
self.model,
self.item_name + '_collection'
)
else:
self.store = EntityStore(
request.site.db,
self.model,
)
return (
self.controller(self)(*route, **request.data) or
self.view(self)(*route, **request.data)
)
def process(self, *args, **data):
"""Process method parameters
This style of calling collections is useful when you want to
make your collection available as an attribute of a Dispatcher.
"""
route = args
request = context.request
self.user = request.user
self.request = request
self.route = route
logger = logging.getLogger(__name__)
logger.debug('Collection process called')
if self.store is None:
if self.model is None:
self.model = CollectionModel
self.model.collection_url = self.url
self.store = EntityStore(
request.site.db,
self.model,
self.item_name + '_collection'
)
else:
self.store = EntityStore(
request.site.db,
self.model,
)
return (
self.controller(self)(*route, **request.data) or
self.view(self)(*route, **request.data)
)
def __call__(self, route, request):
return self.handle(route, request)
def __str__(self):
return 'collection of ' + str(self.store.kind)
class SilentCollection(Collection):
"""A collection of Records where we do not audit "View" events"""
verbose = False
def collection_of(fields, **kwargs):
"""Returns a collection"""
return Collection(fields, **kwargs)
|
from shinytest import ShinyTestCase
class TestNpc(ShinyTestCase):
def setUp(self):
ShinyTestCase.setUp(self)
from shinymud.models.player import Player
from shinymud.models.npc import Npc
from shinymud.models.area import Area
from shinymud.models.room import Room
from shinymud.modes.build_mode import BuildMode
from shinymud.data import config
self.PERMS = config.PERMS
self.bob = Player(('bob', 'bar'))
self.bob.mode = None
self.bob.playerize({'name':'bob'})
self.world.player_add(self.bob)
self.bob.mode = BuildMode(self.bob)
self.bob.permissions = self.bob.permissions | self.PERMS['builder']
self.area = Area.create({'name': 'foo'})
self.room = self.area.new_room()
self.area2 = Area.create({'name': 'SimCity'})
self.area2_script = self.area2.new_script()
def tearDown(self):
del self.area
del self.area2
del self.bob
def test_existance(self):
"""Test if an NPC can exist within an area properly (unspawned)"""
self.npc = self.area.new_npc()
self.npc.characterize({'name': 'bobert'})
self.assertTrue(self.npc in self.area.npcs.values())
def test_build_add_remove_events(self):
npc = self.area.new_npc()
fail_message = 'Type "help events" for help with this command.'
message = npc.build_add_event('', self.bob)
self.assertEqual(message, fail_message)
#test for non-existant scripts
message = npc.build_add_event('pc_enter call script 1', self.bob)
self.assertEqual(message, "Script 1 doesn't exist.")
message = npc.build_add_event("hears 'spam' call script 0", self.bob)
self.assertEqual(message, "Script 0 doesn't exist.")
message = npc.build_add_event("hears 'spam' call script 602", self.bob)
self.assertEqual(message, "Script 602 doesn't exist.")
script = self.area.new_script()
#Test basic add
message = npc.build_add_event('pc_enter call script 1', self.bob)
self.assertEqual(message, 'Event added.' )
#Test invalid areas
message = npc.build_add_event('pc_enter call script 1 from area AreaDontExist', self.bob)
self.assertEqual(message, 'Area "AreaDontExist" doesn\'t exist.')
message = npc.build_add_event('pc_enter call script 1 from area AreaDontExist 100', self.bob)
self.assertEqual(message, 'Area "AreaDontExist" doesn\'t exist.')
#Test invalid probabilities.
message = npc.build_add_event('pc_enter call script 1 0', self.bob)
self.assertEqual(message, 'Probability value must be between 1 and 100.')
message = npc.build_add_event('pc_enter call script 1 101', self.bob)
self.assertEqual(message, 'Probability value must be between 1 and 100.')
message = npc.build_add_event('pc_enter call script 1 9999', self.bob)
self.assertEqual(message, 'Probability value must be between 1 and 100.')
#Test different froms of valid adds.
message = npc.build_add_event('pc_enter call script 1 50', self.bob)
self.assertEqual(message, 'Event added.')
message = npc.build_add_event('pc_enter call script 1 from area SimCity', self.bob)
self.assertEqual(message, 'Event added.')
message = npc.build_add_event('pc_enter call script 1 from area SimCity 75', self.bob)
self.assertEqual(message, 'Event added.')
message = npc.build_add_event('pc_enter call 1 from SimCity 50', self.bob)
self.assertEqual(message, 'Event added.')
#Test for trigger 'hears'
message = npc.build_add_event("hears 'food' call script 1", self.bob)
self.assertEqual(message, 'Event added.' )
#Test for trigger 'emoted'
message = npc.build_add_event("emoted 'slap' call script 1", self.bob)
self.assertEqual(message, 'Event added.' )
#Technically invalid, but will be left to user responsibility for now.
#(it shouldn't ever cause a crash)
message = npc.build_add_event("emoted 'emotedontexist' call script 1", self.bob)
self.assertEqual(message, 'Event added.' )
#Test for new items
self.area.new_item()
message = npc.build_add_event("given_item 'item 1' call script 1", self.bob)
self.assertEqual(message, 'Event added.' )
#Technically invalid, but will be left to user responsibility for now.
#(it shouldn't ever cause a crash)
message = npc.build_add_event("given_item 'item 5' call script 1", self.bob)
self.assertEqual(message, 'Event added.' )
#we should now have 5 successfully added events in pc_enter
self.assertEqual(len(npc.events['pc_enter']), 5)
message = npc.build_remove_event("pc_enter -1", self.bob)
self.assertEqual(message, 'Try: "remove event <event-trigger> <event-id>" or see "help npc events".' )
self.assertEqual(len(npc.events['pc_enter']), 5)
message = npc.build_remove_event("pc_enter 5", self.bob)
self.assertEqual(message, "Npc 1 doesn't have the event pc_enter #5." )
self.assertEqual(len(npc.events['pc_enter']), 5)
message = npc.build_remove_event("pc_enter 4", self.bob)
self.assertEqual(message, 'Event pc_enter, number 4 has been removed.')
self.assertEqual(len(npc.events['pc_enter']), 4)
message = npc.build_remove_event("given_item 1", self.bob)
self.assertEqual(message, 'Event given_item, number 1 has been removed.')
self.assertEqual(len(npc.events['pc_enter']), 4)
self.assertEqual(len(npc.events['given_item']), 1)
def test_build_add_remove_permissions(self):
npc = self.area.new_npc()
#set npc permissions to nothing.
npc.permissions = 0
message = npc.build_add_permission("dm", self.bob)
self.assertEqual(message, "You need to be GOD in order to edit an npc's permissions.")
#Current permission level needed for messing with npc perms is 'god'. (for when this test was written)
#Change as needed!
self.bob.permissions = self.bob.permissions | self.PERMS['god']
#Bad input tests
message = npc.build_add_permission("", self.bob)
self.assertEqual(message, 'Try: "add permission <permission group>". See "help permissions".')
message = npc.build_add_permission("monkey", self.bob)
self.assertTrue('Valid permissions are: admin, player, builder, dm, god\n' in message)
#good input tests
message = npc.build_add_permission("god", self.bob)
self.assertTrue('Shiny McShinerson now has god permissions.' in message)
self.assertTrue(npc.permissions is self.PERMS['god'])
message = npc.build_add_permission("dm", self.bob)
self.assertTrue('Shiny McShinerson now has dm permissions.' in message)
self.assertTrue(npc.permissions is self.PERMS['god'] | self.PERMS['dm'])
self.assertTrue(npc.permissions is not self.PERMS['god'] | self.PERMS['dm'] | self.PERMS['admin'])
message = npc.build_add_permission("admin", self.bob)
self.assertTrue('Shiny McShinerson now has admin permissions.' in message)
self.assertTrue(npc.permissions is self.PERMS['god'] | self.PERMS['dm'] | self.PERMS['admin'])
#Removing Permissions
#reset bobs permissions for next test
self.bob.permissions = 0
message = npc.build_remove_permission("dm", self.bob)
self.assertEqual(message, "You need to be GOD in order to edit an npc's permissions.")
#Current permission level needed for messing with npc perms is 'god'. (for when this test was written)
#Change as needed!
self.bob.permissions = self.bob.permissions | self.PERMS['god']
#Bad input tests
message = npc.build_remove_permission("", self.bob)
self.assertEqual(message, 'Try: "remove permission <permission group>", or see "help permissions".')
message = npc.build_remove_permission("monkey", self.bob)
self.assertEqual("Shiny McShinerson doesn't have monkey permissions.", message)
#Good input tests
self.assertTrue(npc.permissions is self.PERMS['god'] | self.PERMS['dm'] | self.PERMS['admin'])
message = npc.build_remove_permission("god", self.bob)
self.assertEqual('Shiny McShinerson no longer has god permissions.', message)
self.assertTrue(npc.permissions is self.PERMS['dm'] | self.PERMS['admin'])
self.assertTrue(npc.permissions < self.PERMS['god'])
message = npc.build_remove_permission("dm", self.bob)
self.assertEqual('Shiny McShinerson no longer has dm permissions.', message)
self.assertTrue(npc.permissions is self.PERMS['admin'])
self.assertTrue(npc.permissions >= self.PERMS['dm'])
message = npc.build_remove_permission("admin", self.bob)
self.assertEqual('Shiny McShinerson no longer has admin permissions.', message)
self.assertTrue(npc.permissions is 0)
def test_build_add_remove_ai(self):
npc = self.area.new_npc()
#Test adding ai pack
message = npc.build_add_ai("", self.bob)
self.assertEqual('Try: "add ai <ai-pack-name>", or type "help ai packs".', message)
message = npc.build_add_ai("doesnotexist", self.bob)
self.assertEqual('"doesnotexist" is not a valid ai pack. See "help ai packs".', message)
message = npc.build_add_ai("merchant", self.bob)
self.assertEqual("This npc (Shiny McShinerson) is now a merchant.", message)
message = npc.build_add_ai("merchant", self.bob)
self.assertEqual('This npc (Shiny McShinerson) already has that ai pack.', message)
#Test basic add behavior for ai pack
message = str(npc)
self.assertTrue("MERCHANT ATTRIBUTES:" in message)
#Test removing ai pack
message = npc.build_remove_ai("", self.bob)
self.assertEqual('Try: "remove ai <ai-pack-name>", or type "help ai packs".', message)
message = npc.build_remove_ai("doesnotexist", self.bob)
self.assertEqual('This npc doesn\'t have the "doesnotexist" ai type.', message)
message = npc.build_remove_ai("merchant", self.bob)
self.assertEqual('Npc 1 (Shiny McShinerson) no longer has merchant ai.', message)
message = npc.build_remove_ai("merchant", self.bob)
self.assertEqual('This npc doesn\'t have the "merchant" ai type.', message)
|
import os
import cv2
import numpy as np
import matplotlib.pyplot as plt
def morphology_diff(contrast_green, clahe):
#apply open / closing morphology
#1st
open1 = cv2.morphologyEx(contrast_green, cv2.MORPH_OPEN, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(5,5)), iterations = 1)
close1 = cv2.morphologyEx(open1, cv2.MORPH_CLOSE, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(5,5)), iterations = 1)
#2nd
open2 = cv2.morphologyEx(close1, cv2.MORPH_OPEN, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(11,11)), iterations = 1)
close2 = cv2.morphologyEx(open2, cv2.MORPH_CLOSE, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(11,11)), iterations = 1)
#3rd
open3 = cv2.morphologyEx(close2, cv2.MORPH_OPEN, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(23,23)), iterations = 1)
close3 = cv2.morphologyEx(open3, cv2.MORPH_CLOSE, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(23,23)), iterations = 1)
#make diff between contrast_green & blured vision
contrast_morph = cv2.subtract(close3, contrast_green)
return clahe.apply(contrast_morph)
def remove_noise(morph_image):
ret, thr = cv2.threshold(morph_image,15,255,cv2.THRESH_BINARY)
mask = np.ones(morph_image.shape[:2], dtype="uint8") * 255
contours, hierarchy = cv2.findContours(thr.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
im2 = cv2.findContours(thr.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
for cnt in contours:
if cv2.contourArea(cnt) <= 200:
cv2.drawContours(mask, [cnt], -1, 0, -1)
im = cv2.bitwise_and(morph_image, morph_image, mask=mask)
ret,fin_thr = cv2.threshold(im,15,255,cv2.THRESH_BINARY_INV)
new_img = cv2.erode(fin_thr, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3)), iterations=1)
return new_img
def remove_blob(clear_image, org_image):
fundus_eroded = cv2.bitwise_not(clear_image)
xmask = np.ones(org_image.shape[:2], dtype="uint8") * 255
xcontours, xhierarchy = cv2.findContours(fundus_eroded.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
x1 = cv2.findContours(fundus_eroded.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
for cnt in xcontours:
shape = "unidentified"
peri = cv2.arcLength(cnt, True)
approx = cv2.approxPolyDP(cnt, 0.04 * peri, False)
if len(approx) > 4 and cv2.contourArea(cnt) <= 3000 and cv2.contourArea(cnt) >= 100:
shape = "circle"
else:
shape = "veins"
if(shape=="circle"):
cv2.drawContours(xmask, [cnt], -1, 0, -1)
finimage = cv2.bitwise_and(fundus_eroded,fundus_eroded,mask=xmask)
blood_vessels = cv2.bitwise_not(finimage)
return blood_vessels
def detect_vessel(org_image):
copy_org_image = org_image.copy()
#make split of red green blue colors
blue, green, red = cv2.split(org_image)
#create a CLAHE object
clahe = cv2.createCLAHE(clipLimit=5.0, tileGridSize=(8,8))
contrast_green = clahe.apply(green)
#get image after morph - blured & clahe
morph_image = morphology_diff(contrast_green, clahe)
#remove noise
clear_image = remove_noise(morph_image)
#remove blobs
fin_image = remove_blob(clear_image, org_image)
i = 0
j = 0
for gr, fin in zip(green, fin_image):
for g, f in zip(gr, fin):
if(f == 0):
green[i][j] = 255
j = j + 1
j = 0
i = i + 1
#return fin_image
return fin_image, cv2.merge(( blue, green, red))
if __name__ == "__main__":
main()
|
import matplotlib.pyplot as plt
import numpy as np
from collections import deque
import datetime
# Global variables
# speed
move_speed = 1.0
# mackey glass params
gamma = 0.1
beta = 0.2
tau = 17
en = 10.
def run(num_data_samples=5000, init_x=1.0, init_x_tau=0.0):
x_history = deque(maxlen=tau)
x_history.clear()
x_pos = init_x
x_pos_tau = init_x_tau
# record timesteps
sample_timer = 0
# sample for training
current_sample = 0
data_samples = []
while True:
if len(x_history) >= tau:
x_pos_tau = x_history[0]
x_pos += move_speed * d_x(x_pos, x_pos_tau)
# store data
if sample_timer > 300:
data_samples.append(x_pos)
current_sample += 1
if current_sample >= num_data_samples:
# print("DONE")
return data_samples
# record move history
x_history.append(x_pos)
sample_timer += 1
def onExit(data, plot2d=False):
# save the data
data_np = np.asarray(data)
np.savetxt("data_{}.txt".format(datetime.date.today()), data_np,
delimiter=",")
# plot the data
if plot2d:
x_minus_tau = data[:-tau]
x_ = data[tau:]
plt.plot(x_, x_minus_tau, linewidth=0.2)
plt.xlabel("x(t)")
plt.ylabel("x(t-tau)")
plt.show()
else:
plt.plot(range(len(data)), data)
plt.show()
def d_x(x_t, x_t_tau):
return beta * (x_t_tau/(1.+pow(x_t_tau, en))) - gamma * x_t
if __name__ == "__main__":
data = run(num_data_samples=8000)
onExit(data, plot2d=True)
|
import h5py
import numpy as np
from matplotlib import pyplot as plt
import seaborn as sns
current_palette = sns.color_palette('bright',10)
#%% initialization
result_path = 'ShapeNet_testing_result.hdf5'
class_name = np.genfromtxt('hdf5_data/all_object_categories.txt',dtype='U')[:,0]
#############################################################################
# ['Airplane' 'Bag' 'Cap' 'Car' 'Chair' 'Earphone' 'Guitar' 'Knife' #
# 'Lamp' 'Laptop' 'Motorbike' 'Mug' 'Pistol' 'Rocket' 'Skateboard' 'Table'] #
#############################################################################
#%% load the test set
print("loading testing data")
f = h5py.File(result_path,'r')
x_test = f['x_test']
y_test = f['y_test'][:]
s_test = f['s_test']
p_test = f['p_test']
pre_test = f['pre_test']
#%% select a test sample
idx_class = 0
idx_class_sample = 0
idx_sample_list = np.where(y_test==idx_class)[0]
idx_sample = idx_sample_list[idx_class_sample]
label_min = s_test[idx_sample_list].min()
label_max = s_test[idx_sample_list].max()
print('Class_name:',class_name[idx_class],', test sample id:',idx_sample)
#%% load the test sample
x_pt = x_test[idx_sample]
s_pt = s_test[idx_sample]-label_min
pre_pt = pre_test[idx_sample]-label_min
#%% visulize the reconstructed points
fig = plt.figure(figsize=(10,10))
ax = fig.add_subplot(111, projection='3d')
ax.set_xlim(-1,1)
ax.set_ylim(-1,1)
ax.set_zlim(-1,1)
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
for i_seg in range(label_max - label_min +1):
idxs = np.where(s_pt == i_seg)[0]
color = current_palette.as_hex()[i_seg]
ax.scatter(x_pt[idxs,0], x_pt[idxs,1], x_pt[idxs,2],marker='.',c=color,s=2,label='Category '+str(i_seg))
ax.set_title('Ground Truth')
ax.legend()
plt.show()
#%% visulize the reconstructed points
fig = plt.figure(figsize=(10,10))
ax = fig.add_subplot(111, projection='3d')
ax.set_xlim(-1,1)
ax.set_ylim(-1,1)
ax.set_zlim(-1,1)
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
for i_seg in range(label_max - label_min +1):
idxs = np.where(pre_pt == i_seg)[0]
color = current_palette.as_hex()[i_seg]
ax.scatter(x_pt[idxs,0], x_pt[idxs,1], x_pt[idxs,2],marker='.',c=color,s=2,label='Category '+str(i_seg))
ax.set_title('Network Output')
ax.legend()
plt.show() |
# SPDX-FileCopyrightText: 2018 Dave Astels for Adafruit Industries
#
# SPDX-License-Identifier: MIT
"""
Display code for signal generator.
Adafruit invests time and resources providing this open source code.
Please support Adafruit and open source hardware by purchasing
products from Adafruit!
Written by Dave Astels for Adafruit Industries
Copyright (c) 2018 Adafruit Industries
Licensed under the MIT license.
All text above must be included in any redistribution.
"""
import math
import board
import busio
import adafruit_ssd1306
import shapes
class Display:
"""Manage the OLED Featherwing display"""
i2c = None
oled = None
shape = None
frequency = None
def __init__(self):
self.i2c = busio.I2C(board.SCL, board.SDA)
self.oled = adafruit_ssd1306.SSD1306_I2C(128, 32, self.i2c)
self.oled.fill(0)
self.oled.show()
def draw_sine(self):
for i in range(32):
self.oled.pixel(i, int(math.sin(i/32 * math.pi * 2) * 16) + 16, 1)
def draw_square(self):
for i in range(16):
self.oled.pixel(0, 32 - i, 1)
self.oled.pixel(i, 31, 1)
self.oled.pixel(31, i, 1)
self.oled.pixel(15, 16 + i, 1)
self.oled.pixel(15, i, 1)
self.oled.pixel(16 + i, 0, 1)
def draw_triangle(self):
for i in range(8):
self.oled.pixel(i, 16 + i * 2, 1)
self.oled.pixel(8 + i, 32 - i * 2, 1)
self.oled.pixel(16 + i, 16 - i * 2, 1)
self.oled.pixel(24 + i, i * 2, 1)
def draw_sawtooth(self):
for i in range(16):
self.oled.pixel(0, 16 + i, 1)
self.oled.pixel(31, i, 1)
for i in range(32):
self.oled.pixel(i, 31 - i, 1)
def update(self):
self.oled.fill(0)
if self.shape == shapes.SINE:
self.draw_sine()
elif self.shape == shapes.SQUARE:
self.draw_square()
elif self.shape == shapes.TRIANGLE:
self.draw_triangle()
elif self.shape == shapes.SAWTOOTH:
self.draw_sawtooth()
self.oled.text("{0}".format(self.frequency), 40, 10, 1)
self.oled.show()
def update_shape(self, shape):
if shape != self.shape:
self.shape = shape
self.update()
def update_frequency(self, frequency):
if frequency != self.frequency:
self.frequency = frequency
self.update()
|
# -*- coding:utf-8 -*-
__author__ = 'hhstore'
'''
功能: 读写office文档,支持docx.
依赖: docx模块
'''
from docx import Document
def parse_docx(in_file, out_file):
doc = Document(in_file)
for item in doc.paragraphs: # 通过段落解析内容.
print item.text
doc.save(out_file)
if __name__ == '__main__':
infile = "test.docx"
outfile = "out.docx"
parse_docx(infile, outfile)
|
import inspect
from pathlib import Path
from functools import partial
import torch
from torch.autograd.profiler import profile
import torch.distributed as dist
from torch.distributed import ReduceOp
from colossalai.utils import get_current_device
from .prof_utils import BaseProfiler, _format_time, _format_memory, _format_bandwidth
from typing import List, Optional
def _get_code_location(depth: int):
ret = []
length = min(len(inspect.stack()), depth + 1)
for i in range(3, length):
upper_frame = inspect.stack()[i]
function_name = inspect.stack()[i - 1].function
ret.append(upper_frame.filename)
ret.append('(')
ret.append(str(upper_frame.lineno))
ret.append('): ')
ret.append(function_name)
if i != length - 1:
ret.append('\n')
return ''.join(ret)
torch_all_reduce = dist.all_reduce
torch_all_gather = dist.all_gather
torch_reduce_scatter = dist.reduce_scatter
torch_broadcast = dist.broadcast
torch_reduce = dist.reduce
class CommEvent(object):
"""Communication Event. Used for communication time and communication
volume recording.
"""
def __init__(self, count: int = 0, comm_vol: float = 0., cuda_time: int = 0):
self.self_count = count
self.self_comm_vol = comm_vol
self.self_cuda_time = cuda_time
def add(self, rhs):
self.self_count += rhs.self_count
self.self_comm_vol += rhs.self_comm_vol
self.self_cuda_time += rhs.self_cuda_time
class CommProfiler(BaseProfiler):
"""Communication profiler. Records all communication events.
"""
def __init__(self, depth: int = 0, total_count: int = 0, total_comm_vol: float = 0, total_cuda_time: int = 0):
super().__init__(profiler_name="Collective_Communication", priority=0)
self.depth = 3 + depth
self.total_count = total_count
self.total_comm_vol = total_comm_vol
self.total_cuda_time = total_cuda_time
self.ops_record = dict()
self.profiler = None
self.pending_op = None
self.pending_metadata = None
self.warn_flag = False
def reset(self):
self.total_count = 0
self.total_comm_vol = 0
self.total_cuda_time = 0
self.ops_record = dict()
self.profiler = None
self.pending_op = None
self.pending_metadata = None
self.warn_flag = False
def enable(self):
dist.all_reduce = partial(all_reduce, profiler=self)
dist.all_gather = partial(all_gather, profiler=self)
dist.reduce_scatter = partial(reduce_scatter, profiler=self)
dist.broadcast = partial(broadcast, profiler=self)
dist.reduce = partial(reduce, profiler=self)
def disable(self):
dist.all_reduce = torch_all_reduce
dist.all_gather = torch_all_gather
dist.reduce_scatter = torch_reduce_scatter
dist.broadcast = torch_broadcast
dist.reduce = torch_reduce
def to_tensorboard(self, writer):
writer.add_text(tag="Collective Communication", text_string=self.result_str("\n\n"))
def to_file(self, filename: Path):
with open(filename, "w") as f:
f.write(self.result_str())
def show(self):
print(self.result_str())
def result_str(self, sep: str = "\n"):
res = []
def append(s: str = None):
if s is not None:
res.append(s)
res.append(sep)
if self.warn_flag:
append("Warnning: there exists multiple communication operations in the same time. As a result, "
"the profiling result is not accurate.")
if self.total_cuda_time == 0:
return "No collective communication has been called yet!"
append("Collective communication profiling result:")
append("total cuda time: {}".format(_format_time(self.total_cuda_time)))
append("average bandwidth: {}".format(_format_bandwidth(self.total_comm_vol, self.total_cuda_time)))
append("total number of calls: {}".format(self.total_count))
append("All events:")
seperation = '-' * 74
row_format = '{:^10}' + '{:^12}' * 2 + '{:^16}' + '{:^12}' * 2
append(seperation)
append(row_format.format('Location', 'GPU time', 'Percentage', 'Comm volume', 'Bandwidth', 'Num of calls'))
append(seperation)
show_list = sorted(self.ops_record.items(), key=lambda kv: -kv[1].self_cuda_time)
for location, event in show_list:
append(location)
append(
row_format.format('', _format_time(event.self_cuda_time),
'{:.1f}%'.format(event.self_cuda_time / self.total_cuda_time * 100.0),
_format_memory(event.self_comm_vol),
_format_bandwidth(event.self_comm_vol, event.self_cuda_time), event.self_count))
append()
return ''.join(res)
@property
def has_aync_op(self):
return self.pending_op is not None
def activate_profiler(self, kn: str, vol: float):
self.pending_metadata = (kn, _get_code_location(self.depth), vol)
self.profiler = profile(enabled=True, use_cuda=True, use_cpu=True, use_kineto=True)
self.profiler.__enter__()
def close_profiler(self, group=None):
assert self.profiler is not None, "There is no running dist op"
kernel_name, code_location, vol = self.pending_metadata
self.profiler.__exit__(None, None, None)
if self.profiler.enabled and dist.get_world_size(group) > 1:
assert_flag = 0
current_comm_event = None
events = self.profiler.function_events
for event in events:
if kernel_name in event.name:
assert assert_flag == 0, "Multiple dist ops has been called "
current_comm_event = CommEvent(1, vol, event.self_cuda_time_total)
assert_flag += 1
assert current_comm_event is not None, "dist op has not been found"
buffer = torch.tensor([current_comm_event.self_cuda_time], device=get_current_device())
torch_all_reduce(buffer, op=ReduceOp.MIN, group=group)
current_comm_event.self_cuda_time = buffer.item()
self.total_count += current_comm_event.self_count
self.total_comm_vol += current_comm_event.self_comm_vol
self.total_cuda_time += current_comm_event.self_cuda_time
if code_location in self.ops_record:
self.ops_record[code_location].add(current_comm_event)
else:
self.ops_record[code_location] = current_comm_event
self.profiler = None
self.pending_op = None
self.pending_metadata = None
def wait_async_op(self):
if self.pending_op is not None:
op = self.pending_op
op.wait()
self.close_profiler()
class CommHandler(object):
"""Communication handler. A dummy handler to wait aync operations.
"""
def __init__(self, profiler: CommProfiler):
super().__init__()
self.prof = profiler
def wait(self):
self.prof.wait_async_op()
def async_check(profiler: CommProfiler):
if profiler.pending_op is not None:
profiler.warn_flag = True
profiler.wait_async_op()
def all_reduce(tensor: torch.Tensor,
op: ReduceOp = ReduceOp.SUM,
group=None,
async_op: bool = False,
profiler: CommProfiler = None) -> Optional[CommHandler]:
async_check(profiler)
comm_size = dist.get_world_size(group)
correction = 2 * (comm_size - 1) / comm_size
comm_vol = correction * tensor.element_size() * tensor.numel()
profiler.activate_profiler("ncclKernel_AllReduce_", comm_vol)
profiler.pending_op = torch_all_reduce(tensor, op, group, async_op)
if async_op:
return CommHandler(profiler)
profiler.close_profiler(group)
def reduce_scatter(output: torch.Tensor,
input_list: List[torch.Tensor],
op: ReduceOp = ReduceOp.SUM,
group=None,
async_op: bool = False,
profiler: CommProfiler = None) -> Optional[CommHandler]:
async_check(profiler)
comm_size = dist.get_world_size(group)
correction = (comm_size - 1) / comm_size
comm_vol = 0
for tensor in input_list:
comm_vol += tensor.element_size() * tensor.numel()
comm_vol *= correction
profiler.activate_profiler("ncclKernel_ReduceScatter_", comm_vol)
profiler.pending_op = torch_reduce_scatter(output, input_list, op, group, async_op)
if async_op:
return CommHandler(profiler)
profiler.close_profiler(group)
def all_gather(tensor_list: List[torch.Tensor],
tensor: torch.Tensor,
group=None,
async_op: bool = False,
profiler: CommProfiler = None) -> Optional[CommHandler]:
async_check(profiler)
comm_size = dist.get_world_size(group)
correction = (comm_size - 1) / comm_size
comm_vol = 0
for ten in tensor_list:
comm_vol += ten.element_size() * ten.numel()
comm_vol *= correction
profiler.activate_profiler("ncclKernel_AllGather_", comm_vol)
profiler.pending_op = torch_all_gather(tensor_list, tensor, group, async_op)
if async_op:
return CommHandler(profiler)
profiler.close_profiler(group)
def broadcast(tensor: torch.Tensor,
src: int,
group=None,
async_op: bool = False,
profiler: CommProfiler = None) -> Optional[CommHandler]:
async_check(profiler)
comm_vol = 1.0 * tensor.element_size() * tensor.numel()
profiler.activate_profiler("ncclKernel_Broadcast_", comm_vol)
profiler.pending_op = torch_broadcast(tensor, src, group, async_op)
if async_op:
return CommHandler(profiler)
profiler.close_profiler(group)
def reduce(tensor: torch.Tensor,
dst: int,
op: ReduceOp = ReduceOp.SUM,
group=None,
async_op: bool = False,
profiler: CommProfiler = None) -> Optional[CommHandler]:
async_check(profiler)
comm_vol = 1.0 * tensor.element_size() * tensor.numel()
profiler.activate_profiler("ncclKernel_Reduce_", comm_vol)
profiler.pending_op = torch_reduce(tensor, dst, op, group, async_op)
if async_op:
return CommHandler(profiler)
profiler.close_profiler(group)
|
from setuptools import setup
from os import path
here = path.abspath(path.dirname(__file__))
with open(path.join(here, "README.md"), encoding="utf-8") as f:
long_description = f.read()
packages = ["appex", "clipboard", "console", "reminders", "sound", "speech"]
packages_stubs = [p + "-stubs" for p in packages]
package_dir = {p + "-stubs": path.join("stubs", p) for p in packages}
package_data = {ps: ["__init__.pyi"] for ps in packages_stubs}
setup(
name="pythonista-stubs",
version="0.0.3",
description="A collection of Pythonista stub files",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/hbmartin/pythonista-stubs",
package_dir=package_dir,
packages=packages_stubs,
package_data=package_data,
install_requires=["typing_extensions", "mypy", "Pillow"],
classifiers=[
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"Intended Audience :: Information Technology",
"License :: OSI Approved :: Apache Software License",
"Operating System :: iOS",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Topic :: Software Development :: Libraries :: Python Modules",
"Typing :: Typed",
],
license="Apache License 2.0",
keywords="pythonista stubs ios ide",
project_urls={"Bug Reports": "https://github.com/hbmartin/pythonista-stubs/issues"},
)
|
import re
from decimal import Decimal
import datetime
import hashlib
from app.core.utils import check_datetime_format
from app.core.init import SELECT_PARTICIPANT, INSERT_PARTICIPANT, SELECT_TANSACTIONS
from .transfer import Transfer
class Participant:
@classmethod
async def get_by_id(cls, app, participant_id):
async with app['pg'].acquire() as pgcon:
async with pgcon.cursor() as c:
await c.execute('select email, id, currency from participants where id=%(id)s', {'id': participant_id})
return await c.fetchone()
@classmethod
async def create(cls, app, account):
errors = []
async with app['pg'].acquire() as pgcon:
async with pgcon.cursor() as c:
try:
await c.execute(
INSERT_PARTICIPANT,
(
account.get('email'),
hashlib.sha1(
account.get('password').encode()).hexdigest(),
account.get('currency')
))
except Exception as e:
errors.append(e.pgerror)
return errors
@classmethod
async def get(cls, app, email: str, password: str=None):
async with app['pg'].acquire() as pgcon:
async with pgcon.cursor() as c:
if password:
await c.execute(SELECT_PARTICIPANT, (
email,
hashlib.sha1(password.encode()).hexdigest(),
))
participant = await c.fetchone()
else:
await c.execute(
'select email, id, currency from participants where email=%s;',
(email, )
)
return await c.fetchone()
if participant:
return participant[0]
@classmethod
async def make_transaction(cls, app, participant_id, data):
errors = []
payee = await cls.get(app, data.get('payee'))
if not payee:
errors.append({1001: 'Получатель не найден!'})
amount = data.get('amount')
if not amount:
return {'errors': {'1010': 'Не указано поле amount!'}}
amount = str(amount)
m = re.match(r'\d+(\.\d{0-4})?', amount)
if not m:
return {'errors': {1002: 'Сумма платежа должна быть десятичным числом 0000000[.00[00]]!'}}
amount = Decimal(amount[m.span()[0]:m.span()[1]])
participant_id = int(participant_id)
payer = await cls.get_by_id(app, participant_id)
funds = await Transfer.get_funds(app, payer, amount)
if (funds < amount) and (participant_id != payee[1]):
errors.append({1003: 'Не достаточно средств!'})
else:
transfer_errors = await Transfer.create(app, payer, payee, amount, data.get('description'))
if transfer_errors:
errors += transfer_errors
return {'result': 'success'} if not errors else {'errors': errors}
@classmethod
async def get_transactions(cls, app, participant_id, data):
#TODO: По уму нужна пагинация, но пока без неё...
errors = []
date1 = data.get('date_from')
if not date1:
now = datetime.datetime.now()
date1 = datetime.datetime(now.year, now.month, 1)
else:
m = check_datetime_format(date1)
if not m:
date1 = datetime.datetime.now().date()
errors.append({2001: f'Дата начала - не дата/время! Но продолжаем с {date1}'})
date2 = data.get('date_to')
if not date2:
date2 = datetime.datetime.now()
else:
m = check_datetime_format(date2)
if not m:
date2 = datetime.datetime.now()
errors.append({2002: f'Дата конца - не дата/время! Но продолжаем с {date2}'})
participant_id = int(participant_id)
participant = await cls.get_by_id(app, participant_id)
result = []
async with app['pg'].acquire() as pgcon:
async with pgcon.cursor() as c:
await c.execute(SELECT_TANSACTIONS,
{'date1': date1,
'date2': date2,
'currency': participant[2],
'payer_id': participant[1]},
)
result = [
{'date': r[0],
'currency': r[1],
'debt': str(r[2]),
'credt': r[3],
'email': r[4],
'description': r[5]} for r in await c.fetchall()]
return {'results': result} if not errors else {'results': results, 'errors': errors}
|
from twisted.web.client import getPage
from phxd.constants import *
from phxd.packet import HLPacket
from phxd.permissions import PRIV_MODIFY_USERS
from phxd.server.utils import certifyIcon
def gotIcon(data, user, server):
if user and data and len(data) > 0 and certifyIcon(data):
user.gif = data
change = HLPacket(HTLS_HDR_ICON_CHANGE)
change.addNumber(DATA_UID, user.uid)
server.sendPacket(change)
def handle(server, user, args, ref):
parts = str(args).strip().split()
if len(parts) > 1:
if user.hasPriv(PRIV_MODIFY_USERS):
user = server.getUser(int(parts[0]))
arg = parts[1]
else:
return
else:
arg = parts[0]
try:
otherUid = int(arg)
otherUser = server.getUser(otherUid)
user.gif = otherUser.gif
change = HLPacket(HTLS_HDR_ICON_CHANGE)
change.addNumber(DATA_UID, user.uid)
server.sendPacket(change)
except ValueError:
getPage(arg).addCallback(gotIcon, user, server)
|
# Copyright 2018 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import math
import dateutil.parser
import logging
from gsuite_exporter import auth
from gsuite_exporter.exporters.base import BaseExporter
logger = logging.getLogger(__name__)
class StackdriverExporter(BaseExporter):
"""Convert Admin SDK logs to logging entries and send them to Stackdriver
Logging API.
Args:
api (`googleapiclient.discovery.Resource`): The Admin SDK API to fetch
records from.
version (str): The Admin SDK API version.
credentials_path (str, optional): The path to the GSuite Admin credentials.
"""
SCOPES = [
'https://www.googleapis.com/auth/logging.read',
'https://www.googleapis.com/auth/logging.write'
]
LOGGING_API_VERSION = 'v2'
def __init__(self,
project_id,
credentials_path=None):
logger.debug("Initializing Stackdriver Logging API ...")
self.api = auth.build_service(
api='logging',
version=StackdriverExporter.LOGGING_API_VERSION,
credentials_path=credentials_path,
scopes=StackdriverExporter.SCOPES)
self.project_id = "projects/{}".format(project_id)
def send(self, records, log_name, dry=False):
"""Writes a list of Admin SDK records to Stackdriver Logging API.
Args:
records (list): A list of log records.
log_name (str): The log name to write (e.g: 'logins').
dry (bool): Toggle dry-run mode (default: False).
Returns:
`googleapiclient.http.HttpRequest`: The API response object.
"""
res = None
destination = self.get_destination(log_name)
if records:
entries = self.convert(records)
body = {
'entries': entries,
'logName': '{}'.format(destination),
'dryRun': dry
}
logger.debug("Writing {} entries to Stackdriver Logging API @ '{}'".format(
len(entries),
destination))
res = self.api.entries().write(body=body).execute()
return res
def convert(self, records):
"""Convert a bunch of Admin API records to Stackdriver Logging API
entries.
Args:
records (list): A list of Admin API records.
Returns:
list: A list of Stackdriver Logging API entries.
"""
return map(lambda i: self.__convert(i), records)
def get_destination(self, log_name):
return "{}/logs/{}".format(self.project_id, log_name)
def get_last_timestamp(self, log_name):
"""Last log timestamp from Stackdriver Logging API given our project id
and log name.
"""
destination = self.get_destination(log_name)
query = {
'orderBy': 'timestamp desc',
'pageSize': 1,
'resourceNames': [self.project_id],
'filter': 'logName={}'.format(destination)
}
log = self.api.entries().list(body=query).execute()
try:
timestamp = log['entries'][0]['timestamp']
except (KeyError, IndexError):
timestamp = None
return timestamp
def __convert(self, record):
"""Converts an Admin SDK log entry to a Stackdriver Log entry.
Args:
record (dict): The Admin SDK record as JSON.
Returns:
dict: The Stackdriver Logging entry as JSON.
"""
return {
'timestamp': {'seconds': int(time.time())},
'insertId': record['etag'],
'jsonPayload': {
'requestMetadata': {'callerIp': record.get('ipAddress')},
'authenticationInfo': {
'callerType': record['actor'].get('callerType'),
'principalEmail': record['actor'].get('email')
},
'methodName': record['events'][0]['name'],
'parameters': record['events'][0].get('parameters'),
'report_timestamp': self.__convert_timestamp(record)
},
'resource': {'type': 'global'}
}
def __convert_timestamp(self, record):
"""Converts timestamp for an Admin API record into a time dict.
Args:
record (dict): An Admin API record.
Returns:
dict: A dict with a key 'seconds' containing the record timestamp.
"""
remainder, seconds = math.modf(time.mktime(
dateutil.parser.parse(record['id']['time']).timetuple()
))
return {'seconds': int(seconds)}
|
"""
从接收到的数据解包
"""
import struct
class Message:
"""
┏━━━━━━━━━━
┃ Header ┃ 6部分,共12字节
┃━━━━━━━━━ ┃
┃ Question ┃ 查询区域
┃━━━━━━━━━ ┃
┃ Answer ┃ 回答区域
┃━━━━━━━━━ ┃
┃ Authority ┃ 授权区域
┃━━━━━━━━━ ┃
┃ Additional ┃ 附加区域
┃━━━━━━━━━ ┃
"""
def __init__(self, data):
self.header = Header(data[0:12])
self.questions = []
self.answers = []
self.authorities = []
self.additionals = []
index = 12
if self.header.QDCOUNT != 0 :
index = self._handle_question(data, index)
if self.header.ANCOUNT != 0 :
index = self._handle_resource(data, index, self.answers, self.header.ANCOUNT)
if self.header.NSCOUNT != 0 :
index = self._handle_resource(data, index, self.authorities, self.header.NSCOUNT)
if self.header.ARCOUNT != 0 :
index = self._handle_resource(data, index, self.additionals, self.header.ARCOUNT)
def _handle_question(self, data, index):
for i in range(self.header.QDCOUNT):
next_index = index + data[index:].find(0) + 5
self.questions.append(Question(data[index:next_index]))
index = next_index
return index
@staticmethod
def _handle_resource(data, index, rlist, count):
next_index = index
for i in range(count):
if data[next_index] >> 6 == 3:
start = next_index + 2
else:
start = next_index + data[next_index:].find(0) + 1
Rtype, Rclass, Rttl, Rdlengh = struct.unpack('>HHIH',data[start:start+10])
rlist.append(Resource(data[next_index:start], Rtype, Rclass, Rttl, Rdlengh, data[start+10:start+10+Rdlengh]))
next_index = start + 10 + Rdlengh
return next_index
class Header:
"""
0 1 2 3 4 5 9 7 8 9 0 1 2 3 4 5
+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
| ID | 消息ID,请求和应答相同,2字节
+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
|QR| Opcode |AA|TC|RD|RA| Z | RCODE |
+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
| QDCOUNT | 问题数,2字节,无符号16位整数
+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
| ANCOUNT | 回答资源记录数,2字节,无符号16位整数
+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
| NSCOUNT | 授权资源记录数,2字节,无符号16位整数
+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
| ARCOUNT | 附加资源记录数,2字节,无符号16位整数
+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
QR(1bit) 查询/响应标志,0为查询,1为响应
opcode(4bit) 0表示标准查询,1表示反向查询,2表示服务器状态请求
AA(1bit) 表示授权回答
TC(1bit) 表示可截断的 ,0 为报文未截断 ,1 为报文过长被截断 (只返回了前 512 个字节)
RD(1bit) 表示期望递归 ,0 为不期望进行递归查询 ,1 为期望进行递归查询 (从域名服务器进行递归查询)
RA(1bit) 表示可用递归 ,0 为应答服务器不支持递归查询 ,1 为应答服务器支持递归查询
Z(3bit) 未使用,必须置0
rcode(4bit) 表示返回码,0表示没有差错,3表示名字差错,2表示服务器错误(Server Failure)
"""
def __init__(self,data):
self.ID, self.FLAGS, self.QDCOUNT, self.ANCOUNT, self.NSCOUNT, self.ARCOUNT = struct.unpack('>HHHHHH',data)
self.QR = self.FLAGS >>15
self.Opcode = self.FLAGS << 1 >> 12
self.AA = self.FLAGS << 5 >> 15
self.TC = self.FLAGS << 6 >> 15
self.RD = self.FLAGS << 7 >> 15
self.RA = self.FLAGS >> 7 & 0x01
self.Z = self.FLAGS >> 4 & 0x07
self.RCODE = self.FLAGS & 0x0f
class Question:
"""
0 1 2 3 4 5 9 7 8 9 0 1 2 3 4 5
+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
| | 查询名字,长度不固定,不使用填充字节,如果是反向查询,则为IP,
/ QNAME / 每个标识符以首字节的计数值来说明随后标识符的字节长度,
/ / 每个名字以最后字节为0结束
+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
| QTYPE | 2字节,查询类型,取值可以为任何可用的类型值
+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
| QCLASS | 2字节,查询类:通常为1,表示IN,表明是Internet数据。
+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
"""
def __init__(self, data):
self.QTYPE, self.QCLASS = struct.unpack('>HH',data[-4:])
self.QNAME = data[:-4]
if self.QTYPE == 1 or self.QTYPE == 28:
self.webname = self._get_webname(self.QNAME).lower()
elif self.QTYPE == 2:
self.ip = self.QNAME
@staticmethod
def _get_webname(data):
i = 1
webname = ''
while True:
char = data[i]
if char == 0:
break
if char < 32:
webname = webname + '.'
else:
webname = webname + chr(char)
i = i + 1
return webname
class Resource:
"""
资源记录(包括回答区域,授权区域和附加区域)
0 1 2 3 4 5 9 7 8 9 0 1 2 3 4 5
+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
| | 它的格式和Queries区域的查询名字字段是一样的
/ NAME / 当报文中域名重复出现的时候,该字段使用2个字节的偏移指针来表示
/ / 前两位 11,用于识别指针。后14位从DNS报文的开始处计数(从0开始),指出该报文中的相应字节数
+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
| TYPE | 2字节,查询类型,取值可以为任何可用的类型值,与question相同
+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
| CLASS | 2字节,查询类:通常为1,表示IN,表明是Internet数据。
+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
| Time To Live(TTL) | 4字节无符号整数表示资源记录可以缓存的时间。 以秒为单位,
| | 表示的是资源记录的生命周期,用于当地址解析程序取出资源记录后决定保存及使用缓存数据的时间
+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
| RDLENGTH | 2字节,无符号整数表示RDATA的长度
+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
| RDATA | 可变长字段,不定长字符串来表示记录,格式与TYPE和CLASS有关
/ /
/ /
+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
"""
def __init__(self, rname, rtype, rclass,rttl, rdlength, rdata ):
self.NAME = rname
self.TYPE = rtype
self.CLASS = rclass
self.TTL = rttl
self.RDLENGTH = rdlength
self.RDATA = rdata
|
import pandas as pd
ds1 = pd.Series([12, 14, 16, 18, 20, 24])
ds2= pd.Series([2, 4, 6, 8, 10, 12])
print(ds1+ds2)
print(ds1-ds2)
print(ds1*ds2)
print(ds1==ds2)
print(ds1>ds2)
print(ds1<ds2) |
# coding: utf-8
# Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class ScriptParameterInfo(object):
"""
Information about script parameters.
isOverwritten specifies that the default parameter present in the script content is overwritten.
"""
def __init__(self, **kwargs):
"""
Initializes a new ScriptParameterInfo object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param script_parameter:
The value to assign to the script_parameter property of this ScriptParameterInfo.
:type script_parameter: oci.apm_synthetics.models.ScriptParameter
:param is_overwritten:
The value to assign to the is_overwritten property of this ScriptParameterInfo.
:type is_overwritten: bool
"""
self.swagger_types = {
'script_parameter': 'ScriptParameter',
'is_overwritten': 'bool'
}
self.attribute_map = {
'script_parameter': 'scriptParameter',
'is_overwritten': 'isOverwritten'
}
self._script_parameter = None
self._is_overwritten = None
@property
def script_parameter(self):
"""
**[Required]** Gets the script_parameter of this ScriptParameterInfo.
:return: The script_parameter of this ScriptParameterInfo.
:rtype: oci.apm_synthetics.models.ScriptParameter
"""
return self._script_parameter
@script_parameter.setter
def script_parameter(self, script_parameter):
"""
Sets the script_parameter of this ScriptParameterInfo.
:param script_parameter: The script_parameter of this ScriptParameterInfo.
:type: oci.apm_synthetics.models.ScriptParameter
"""
self._script_parameter = script_parameter
@property
def is_overwritten(self):
"""
**[Required]** Gets the is_overwritten of this ScriptParameterInfo.
If parameter value is default or overwritten.
:return: The is_overwritten of this ScriptParameterInfo.
:rtype: bool
"""
return self._is_overwritten
@is_overwritten.setter
def is_overwritten(self, is_overwritten):
"""
Sets the is_overwritten of this ScriptParameterInfo.
If parameter value is default or overwritten.
:param is_overwritten: The is_overwritten of this ScriptParameterInfo.
:type: bool
"""
self._is_overwritten = is_overwritten
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
|
import unicodedata
import re
import random
import torch
from torch.autograd import Variable
# loading data files
# indexing words
# 一些标志符号(词)
PAD_token = 0
SOS_token = 1 # 句子起始
EOS_token = 2 # 句子结束
USE_CUDA = False
class Lang:
def __init__(self, name):
self.name = name
self.trimmed = False # 是否去掉了一些不常用的词
self.word2index = {} # 词-> 索引
self.word2count = {}
self.index2word = {0: "PAD", 1: "SOS", 2: "EOS"} # 索引 -> 词
self.n_words = 3 # 默认收录到词典里的在训练库里出现的最少次数
def index_words(self, sentence): # 从句子收录词语到字典
for word in sentence.split(' '):
self.index_word(word)
def index_word(self, word): # 收录一个词到词典
if word not in self.word2index:
self.word2index[word] = self.n_words
self.word2count[word] = 1
self.index2word[self.n_words] = word
self.n_words += 1
else:
self.word2count[word] += 1
# 词在语料库里出现的次数低于指定的次数时将把该词剔除
def trim(self, min_count):
if self.trimmed: return
self.trimmed = True
keep_words = []
for k, v in self.word2count.items():
if v >= min_count:
keep_words.append(k)
print('keep_words %s / %s = %.4f' % (
len(keep_words), len(self.word2index), len(keep_words) / len(self.word2index)
))
# Reinitialize dictionaries
self.word2index = {}
self.word2count = {}
self.index2word = {0: "PAD", 1: "SOS", 2: "EOS"}
self.n_words = 3 # Count default tokens
for word in keep_words:
self.index_word(word)
# reading and decoding files
# Turn a Unicode string to plain ASCII, thanks to:
# http://stackoverflow.com/a/518232/2809427
def unicode_to_ascii(s): # 完成从Unicode到AsciII编码的转换
return ''.join(
c for c in unicodedata.normalize('NFD', s)
if unicodedata.category(c) != 'Mn'
)
# Lowercase, trim, and remove non-letter characters
def normalize_string(s): # 去掉一些不是字母的字符
s = unicode_to_ascii(s.lower().strip())
s = re.sub(r"([,.!?])", r" \1 ", s) # 在指定字符前后增加空格
s = re.sub(r"[^a-zA-Z,.!?]+", r" ", s) # 用空格去掉一些非字母即指定标点的字符。
s = re.sub(r"\s+", r" ", s).strip() # 去掉首尾空格
return s
def read_langs(lang1, lang2, reverse=False):
print("Reading lines...")
# Read the file and split into lines
# filename = '../data/%s-%s.txt' % (lang1, lang2)
filename = '../%s-%s.txt' % (lang1, lang2)
lines = open(filename).read().strip().split('\n')
# Split every line into pairs and normalize
pairs = [[normalize_string(s) for s in l.split('\t')] for l in lines]
# Reverse pairs, make Lang instances
if reverse:
pairs = [list(reversed(p)) for p in pairs]
input_lang = Lang(lang2)
output_lang = Lang(lang1)
else:
input_lang = Lang(lang1)
output_lang = Lang(lang2)
return input_lang, output_lang, pairs
MIN_LENGTH = 3
MAX_LENGTH = 25
def filter_pairs(pairs): # 去掉任何长度不符合要求的句子对
filtered_pairs = []
for pair in pairs:
if len(pair[0]) >= MIN_LENGTH and len(pair[0]) <= MAX_LENGTH \
and len(pair[1]) >= MIN_LENGTH and len(pair[1]) <= MAX_LENGTH:
filtered_pairs.append(pair)
return filtered_pairs
def prepare_data(lang1_name, lang2_name, reverse=False):
input_lang, output_lang, pairs = read_langs(lang1_name, lang2_name, reverse)
print("Read %d sentence pairs" % len(pairs))
pairs = filter_pairs(pairs)
print("Filtered to %d pairs" % len(pairs))
print("Indexing words...")
for pair in pairs:
input_lang.index_words(pair[0])
output_lang.index_words(pair[1])
print('Indexed %d words in input language, %d words in output' % (input_lang.n_words, output_lang.n_words))
return input_lang, output_lang, pairs
input_lang, output_lang, pairs = prepare_data('eng', 'fra', True)
MIN_COUNT = 5 # 出现次数低于5次的次将被扔掉
input_lang.trim(MIN_COUNT)
output_lang.trim(MIN_COUNT)
keep_pairs = []
# 去掉一些句子对,在这些句子对中存在被剔除出任一词典的词。
for pair in pairs:
input_sentence = pair[0]
output_sentence = pair[1]
keep_input = True
keep_output = True
for word in input_sentence.split(' '):
if word not in input_lang.word2index:
keep_input = False
break
for word in output_sentence.split(' '):
if word not in output_lang.word2index:
keep_output = False
break
# Remove if pair doesn't match input and output conditions
if keep_input and keep_output:
keep_pairs.append(pair)
print("Trimmed from %d pairs to %d, %.4f of total" %\
(len(pairs), len(keep_pairs), len(keep_pairs) / len(pairs)))
pairs = keep_pairs
# Turning training data into Tensors
# Return a list of indexes, one for each word in the sentence, plus EOS
# 先得到一个句子在某一语种词典中的索引
def indexes_from_sentence(lang, sentence):
return [lang.word2index[word] for word in sentence.split(' ')] + [EOS_token]
# Pad a with the PAD symbol
# Pad 是句子长度不到最大长度时的占位符吗?要把seq补足长度?
def pad_seq(seq, max_length):
seq += [PAD_token for i in range(max_length - len(seq))]
return seq
def random_batch(batch_size):
input_seqs = []
target_seqs = []
# Choose random pairs
for i in range(batch_size):
pair = random.choice(pairs)
input_seqs.append(indexes_from_sentence(input_lang, pair[0]))
target_seqs.append(indexes_from_sentence(output_lang, pair[1]))
# Zip into pairs, sort by length (descending), unzip
seq_pairs = sorted(zip(input_seqs, target_seqs), key=lambda p: len(p[0]), reverse=True)
input_seqs, target_seqs = zip(*seq_pairs)
# For input and target sequences, get array of lengths and pad with 0s to max length
input_lengths = [len(s) for s in input_seqs]
input_padded = [pad_seq(s, max(input_lengths)) for s in input_seqs]
target_lengths = [len(s) for s in target_seqs]
target_padded = [pad_seq(s, max(target_lengths)) for s in target_seqs]
# Turn padded arrays into (batch_size x max_len) tensors, transpose into (max_len x batch_size)
input_var = Variable(torch.LongTensor(input_padded)).transpose(0, 1)
target_var = Variable(torch.LongTensor(target_padded)).transpose(0, 1)
if USE_CUDA:
input_var = input_var.cuda()
target_var = target_var.cuda()
return input_var, input_lengths, target_var, target_lengths
input_var, input_length, target_var, target_length = random_batch(2)
print("{}, {}".format(input_length, target_length))
print(input_var)
print(target_var)
|
# -*- coding: utf-8 -*-
# snapshottest: v1 - https://goo.gl/zC4yUc
from __future__ import unicode_literals
from snapshottest import Snapshot
snapshots = Snapshot()
snapshots['TestEnvironmentBaseImageQueries.test_get_base_image_by_node 1'] = {
'data': {
'node': {
'componentId': 'quickstart-jupyterlab',
'description': 'Data Science Quickstart using Jupyterlab, numpy, and Matplotlib. A great base for any analysis.',
'developmentTools': [
'jupyterlab'
],
'dockerImageNamespace': 'gigantum',
'dockerImageRepository': 'python3-minimal',
'dockerImageServer': 'hub.docker.com',
'dockerImageTag': '826b6f24-2018-02-09',
'icon': 'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAQAAAC1HAwCAAAAC0lEQVR42mNk+A8AAQUBAScY42YAAAAASUVORK5CYII=',
'id': 'QmFzZUNvbXBvbmVudDpnaWdhbnR1bV9iYXNlLWltYWdlcy10ZXN0aW5nJnF1aWNrc3RhcnQtanVweXRlcmxhYiYx',
'languages': [
'python3'
],
'license': 'MIT',
'name': 'Data Science Quickstart with JupyterLab',
'osClass': 'ubuntu',
'osRelease': '16.04',
'packageManagers': [
'apt',
'pip3'
],
'readme': 'Empty for now',
'tags': [
'ubuntu',
'python3',
'jupyterlab'
],
'url': None
}
}
}
snapshots['TestEnvironmentBaseImageQueries.test_get_available_base_images 1'] = {
'data': {
'availableBases': {
'edges': [
{
'node': {
'componentId': 'quickstart-jupyterlab',
'cudaVersion': None,
'description': 'Data Science Quickstart using Jupyterlab, numpy, and Matplotlib. A great base for any analysis.',
'developmentTools': [
'jupyterlab'
],
'dockerImageNamespace': 'gigantum',
'dockerImageRepository': 'python3-minimal',
'dockerImageServer': 'hub.docker.com',
'dockerImageTag': '1effaaea-2018-05-23',
'icon': 'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAQAAAC1HAwCAAAAC0lEQVR42mNk+A8AAQUBAScY42YAAAAASUVORK5CYII=',
'id': 'QmFzZUNvbXBvbmVudDpnaWdhbnR1bV9iYXNlLWltYWdlcy10ZXN0aW5nJnF1aWNrc3RhcnQtanVweXRlcmxhYiYy',
'installedPackages': [
'apt|vim|2:7.4.1689-3ubuntu1.2',
'pip3|numpy|1.14.0',
'pip3|matplotlib|2.1.1',
'pip3|jupyter|1.0.0',
'pip3|jupyterlab|0.31.1',
'pip3|ipywidgets|7.1.0',
'pip3|pandas|0.22.0'
],
'languages': [
'python3'
],
'license': 'MIT',
'name': 'Data Science Quickstart with JupyterLab',
'osClass': 'ubuntu',
'osRelease': '18.04',
'packageManagers': [
'apt',
'pip3'
],
'readme': 'Empty for now',
'revision': 2,
'schema': 1,
'tags': [
'ubuntu',
'python3',
'jupyterlab'
],
'url': None
}
},
{
'node': {
'componentId': 'ut-busybox',
'cudaVersion': None,
'description': 'Super lightweight image for build testing',
'developmentTools': [
],
'dockerImageNamespace': 'library',
'dockerImageRepository': 'busybox',
'dockerImageServer': 'hub.docker.com',
'dockerImageTag': '1.28.0',
'icon': 'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAQAAAC1HAwCAAAAC0lEQVR42mNk+A8AAQUBAScY42YAAAAASUVORK5CYII=',
'id': 'QmFzZUNvbXBvbmVudDpnaWdhbnR1bV9iYXNlLWltYWdlcy10ZXN0aW5nJnV0LWJ1c3lib3gmMA==',
'installedPackages': [
],
'languages': [
],
'license': 'MIT',
'name': 'Unit Test Busybox',
'osClass': 'busybox',
'osRelease': '1.28',
'packageManagers': [
'apt'
],
'readme': 'Empty for now',
'revision': 0,
'schema': 1,
'tags': [
'busybox'
],
'url': None
}
},
{
'node': {
'componentId': 'ut-jupyterlab-1',
'cudaVersion': None,
'description': 'Unit Test 1',
'developmentTools': [
'jupyterlab'
],
'dockerImageNamespace': 'gigdev',
'dockerImageRepository': 'gm-quickstart',
'dockerImageServer': 'hub.docker.com',
'dockerImageTag': '9718fedc-2018-01-16',
'icon': 'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAQAAAC1HAwCAAAAC0lEQVR42mNk+A8AAQUBAScY42YAAAAASUVORK5CYII=',
'id': 'QmFzZUNvbXBvbmVudDpnaWdhbnR1bV9iYXNlLWltYWdlcy10ZXN0aW5nJnV0LWp1cHl0ZXJsYWItMSYw',
'installedPackages': [
'apt|supervisor|latest',
'apt|curl|latest',
'apt|gosu|latest',
'apt|build-essential|latest',
'apt|python3-dev|latest',
'apt|python3-pip|latest',
'apt|git|latest',
'apt|curl|latest',
'apt|vim|latest',
'pip3|numpy|1.14.0',
'pip3|matplotlib|2.1.1',
'pip3|jupyter|1.0.0',
'pip3|jupyterlab|0.31.1',
'pip3|ipywidgets|7.1.0',
'pip3|pandas|0.22.0'
],
'languages': [
'python3'
],
'license': 'MIT',
'name': 'Unit Test1',
'osClass': 'ubuntu',
'osRelease': '16.04',
'packageManagers': [
'apt',
'pip3'
],
'readme': 'Empty for now',
'revision': 0,
'schema': 1,
'tags': [
'ubuntu',
'python3',
'jupyterlab'
],
'url': None
}
},
{
'node': {
'componentId': 'ut-jupyterlab-2',
'cudaVersion': None,
'description': 'Unit Test 2',
'developmentTools': [
'jupyterlab'
],
'dockerImageNamespace': 'gigdev',
'dockerImageRepository': 'gm-quickstart',
'dockerImageServer': 'hub.docker.com',
'dockerImageTag': '9718fedc-2018-01-16',
'icon': 'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAQAAAC1HAwCAAAAC0lEQVR42mNk+A8AAQUBAScY42YAAAAASUVORK5CYII=',
'id': 'QmFzZUNvbXBvbmVudDpnaWdhbnR1bV9iYXNlLWltYWdlcy10ZXN0aW5nJnV0LWp1cHl0ZXJsYWItMiYw',
'installedPackages': [
'apt|supervisor|latest',
'apt|python3-dev|latest',
'apt|python3-pip|latest',
'apt|git|latest',
'apt|curl|latest',
'apt|vim|latest',
'pip3|numpy|1.14.0',
'pip3|matplotlib|2.1.1',
'pip3|jupyter|1.0.0',
'pip3|jupyterlab|0.31.1'
],
'languages': [
'python3'
],
'license': 'MIT',
'name': 'Unit Test 2',
'osClass': 'ubuntu',
'osRelease': '16.04',
'packageManagers': [
'apt',
'pip3'
],
'readme': 'Empty for now',
'revision': 0,
'schema': 1,
'tags': [
'ubuntu',
'python3',
'jupyterlab'
],
'url': None
}
},
{
'node': {
'componentId': 'ut-jupyterlab-3',
'cudaVersion': None,
'description': 'Unit Test 3',
'developmentTools': [
'jupyterlab'
],
'dockerImageNamespace': 'gigdev',
'dockerImageRepository': 'gm-quickstart',
'dockerImageServer': 'hub.docker.com',
'dockerImageTag': '9718fedc-2018-01-16',
'icon': 'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAQAAAC1HAwCAAAAC0lEQVR42mNk+A8AAQUBAScY42YAAAAASUVORK5CYII=',
'id': 'QmFzZUNvbXBvbmVudDpnaWdhbnR1bV9iYXNlLWltYWdlcy10ZXN0aW5nJnV0LWp1cHl0ZXJsYWItMyYw',
'installedPackages': [
'apt|supervisor|latest',
'apt|python3-dev|latest',
'apt|python3-pip|latest',
'apt|git|latest',
'apt|curl|latest',
'pip3|jupyter|1.0.0',
'pip3|jupyterlab|0.31.1'
],
'languages': [
'python3'
],
'license': 'MIT',
'name': 'Unit Test 3',
'osClass': 'ubuntu',
'osRelease': '16.04',
'packageManagers': [
'apt',
'pip3'
],
'readme': 'Empty for now',
'revision': 0,
'schema': 1,
'tags': [
'ubuntu',
'python3',
'jupyterlab'
],
'url': None
}
},
{
'node': {
'componentId': 'ut-rstudio-server',
'cudaVersion': None,
'description': 'R + tidyverse packages in RStudio® Server',
'developmentTools': [
'rstudio',
'jupyterlab',
'notebook'
],
'dockerImageNamespace': 'gigantum',
'dockerImageRepository': 'rstudio-server',
'dockerImageServer': 'hub.docker.com',
'dockerImageTag': '87c90e7834-2019-07-19',
'icon': 'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAADIAAAAyCAYAAAAeP4ixAAAACXBIWXMAAAsSAAALEgHS3X78AAAGmklEQVRoge1a729TVRh+aBYTh9Cq0ShR10ZJjH5YiX8AnQy3boMVlo2NDdphZJkG6L7IBz/Q/QXsA0YjIEWCDLZBAYO4BLld2Rwak+4P0HQOMJBoVqMmEr2vee+P9v6+t2NVsnCy2/bcX+d9znPe933OOVtFRFgJxbciUDwC8hCWFQOkphov/VrIhYkQIBBABJIOviJ9F5o2NxSWu80HilrZGzMBECJExEeYgDCI/HrD5d9ECiiUfheJKE+AIH0TCVtamxb/MyBT07NBIoqBKEFE9bK9xl43sSAb7wyM/+YIlCaidGxrS0WgPAPJTc9yrycJ1A7FEFIMsQZiZ6yhbnsNJxnQ9m1twrIAyc0wAKSIaCNcjV8SC3yHqa7prCyBEp0d7Y5+ZQskN3MzACIGcED/YrWhqrCg1DWglHsADHd1xlIVAcnNzHLU4bFa/z+xYKrLzdMc+2dPd4eJHROQ3DSDIIFA/opYgMHwsvFZBYhgwwK3x6FaiXgm48tsyW1ytIv07uzM2wKZmp7ll3GD/iWyUCRChjuC39Pa8lZF+WJs4mJAARaTIiNQV25PCwxFEEX6+rpKYEpApm58EyawAfAvgQXu9XS0uTFdieFuZXT0PNuUJKK4BUNFZnH3ru6CDkg2N5N3zgsWQwjSmE1Gmxo9hcilltOfjwXZZ0G0UfUfuXnKxuM9kRKQbG6Go9OhClkYjjZtMkWRV/ff4HOH1PvkUn6X2qvqeX1dd22eCHmAMgAyP368efHUqbMpAtupCwT9/f29aZ8wNR0QSaIPRCJI1PoBQTTUZT8QG6xAlMsDg+Bn6sDJFzhBRIXQwGRy164dKRJpWLKrbI9kh48IsZJzezlEijgPpWUBYbzHD9Dh0MBX6Xi8h8FkNbbWHTv2WcxHJMY8sCCzRTQUbW7UhT0TDM8gtM84gtA+Ew/uvcqjJ2Gwj4FIytWZDVE65luaG0ecQJhZcQJhvmZ9HsZnUnv29BVIFC9qUkHYpw4rBxbk8Qg4+EQ1QZiu+eve+ZLdQSj7iVhf4zE784enEOsRBCvbgoWhEUmcuncKSyhBq8ssgGjr5QS0pbXJY5b2xET6h482WXZMaGCSz290Yla2T9SlB5+iXbS+oPENUScPvBcvw8m2CG4g9FJJOuZ8kkDU+4LZ0VlAXboS8cqI3gh7lmyeD3rwo4IkNFXfFilfQyJlCGgvaxhbyZ2Qe8sFhgcQVqSEBiaDAIdVxN2DgeQfSY1LZFZ9cWUyoCD0WxhvmOwgtH1bm6OvrH/vuiRRvEQnr8nTACK7cLw18uGRowWS1fH8vn17g77W6OZFEsURq/wB0zkx48aIOxMPBII/k0eOHOW1A1XiyxKFP9pamzjtz+k0jKjTM2q9fmz8oqNUryKIIkD9Bzf8vMjGy74rZvfvH0hDu9KopP2iBQsahqT74mfPXRBGz54P2ECpBogsQJGDG+7wpC2juEGRRMlvoQPSvjWaJ5FiFiyUNVgZFM8LCmdGJ2LWrCwbiGEAG9gn3g/fCZBIBc7iDEIkihxIDpb81TRnn7hwmRff0qTMFNVGdKxA95uVaKqvt0uKaC8PXjPMR5bOBIDiEzV/pwdfv8uhNg51GgFEkslB+zm7WsYnLoUVCutsVjNKCgClGRvNgzBy+Pung7/dX3XABcRJQJIoASWs+y1AlNp79vH72L3+npT4QEgkhwZNCtx2XevcWCZAyrqWBQu2DAkLtcjeWu3GREPhk2aJwc4Pxnd8e6921A6E+sxrT/5xM/rCr81DQ+9aLqW6rjSeGZ3gjJ4idb6sZ0GTZ+Tv7MJqTN2udQKBnet/wUtr/iqx+t3dWly7vdYWhAKQpVT49gnrFUfX/ZGe7g6hp7uDVWmDtHypiWJW0c2rT2iPN575HWsf+8cJhCTfAdjOhzxv9PTu7BT6ersYUIhninLegS7vlP3IBQRgmv+8uW7Rw/yE2tclMpaar+KNHmUdiXtm5ET6NAu8CHhvRN0fAfxu0UkxnhNcnldKWLi+suZPAXgqA/AQtgShdg4nwKDRrmXfng4NTFqGX4NBDT8dazEJ0Bffvswr/9etJL9BOQ/fSW/TzVirtIfoliesy8LxLTwXybqA4Hry+fh5HStVAOIFhOMoSLqAgJJ3qsuI+0TKeSjf+nQrJ7uTVs8bOiX+3O7xkuNXhRFrI6yu2ZaUCwj1fCkcL7uzB/de5c3SoAuI/MLxNsfNznWJTBiggAMI9Xf+7qnOxUf/VPOwlUdAHrayMoAA+Bf/LSDw4OFwPgAAAABJRU5ErkJggg==',
'id': 'QmFzZUNvbXBvbmVudDpnaWdhbnR1bV9iYXNlLWltYWdlcy10ZXN0aW5nJnV0LXJzdHVkaW8tc2VydmVyJjE=',
'installedPackages': [
'apt|r-recommended|3.6.1-1bionic',
'apt|r-cran-tidyverse|1.2.1-3cran1ppa0bionic0',
'apt|r-cran-data.table|1.12.2-1cran1ppabionic0'
],
'languages': [
'R',
'python3'
],
'license': 'MIT',
'name': 'R Tidyverse in RStudio® Server',
'osClass': 'ubuntu',
'osRelease': '18.04',
'packageManagers': [
'apt',
'pip',
'conda3'
],
'readme': '''A Base containing R 3.6 and Tidyverse from the official CRAN PPA configured
for use in RStudio® Server. A conda-managed JupyterLab is also installed, and
OpenBLAS is configured.
You should generally use Apt with `r-cran-<package name>` to quickly install
"package name" from CRAN. You can also use Docker snippets for approaches
like `install.packages` or `devtools::install_github`. Don't hesitate to use
the "discuss" link (in the lower-right "?" bubble in the app) if you\'d like
some guidance!
*RStudio® and the RStudio logo are registered trademarks of RStudio, Inc.*
''',
'revision': 1,
'schema': 1,
'tags': [
'ubuntu',
'rstats',
'rstudio'
],
'url': None
}
}
]
}
}
}
snapshots['TestEnvironmentBaseImageQueries.test_get_available_base_images_pagination 1'] = {
'data': {
'availableBases': {
'edges': [
{
'cursor': 'MA==',
'node': {
'componentId': 'quickstart-jupyterlab',
'description': 'Data Science Quickstart using Jupyterlab, numpy, and Matplotlib. A great base for any analysis.',
'developmentTools': [
'jupyterlab'
],
'dockerImageNamespace': 'gigantum',
'dockerImageRepository': 'python3-minimal',
'dockerImageServer': 'hub.docker.com',
'dockerImageTag': '1effaaea-2018-05-23',
'icon': 'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAQAAAC1HAwCAAAAC0lEQVR42mNk+A8AAQUBAScY42YAAAAASUVORK5CYII=',
'id': 'QmFzZUNvbXBvbmVudDpnaWdhbnR1bV9iYXNlLWltYWdlcy10ZXN0aW5nJnF1aWNrc3RhcnQtanVweXRlcmxhYiYy',
'languages': [
'python3'
],
'license': 'MIT',
'name': 'Data Science Quickstart with JupyterLab',
'osClass': 'ubuntu',
'osRelease': '18.04',
'packageManagers': [
'apt',
'pip3'
],
'readme': 'Empty for now',
'tags': [
'ubuntu',
'python3',
'jupyterlab'
],
'url': None
}
}
],
'pageInfo': {
'hasNextPage': True
}
}
}
}
snapshots['TestEnvironmentBaseImageQueries.test_get_available_base_images_pagination 2'] = {
'data': {
'availableBases': {
'edges': [
{
'cursor': 'Mg==',
'node': {
'componentId': 'ut-jupyterlab-1',
'description': 'Unit Test 1',
'developmentTools': [
'jupyterlab'
],
'dockerImageNamespace': 'gigdev',
'dockerImageRepository': 'gm-quickstart',
'dockerImageServer': 'hub.docker.com',
'dockerImageTag': '9718fedc-2018-01-16',
'icon': 'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAQAAAC1HAwCAAAAC0lEQVR42mNk+A8AAQUBAScY42YAAAAASUVORK5CYII=',
'id': 'QmFzZUNvbXBvbmVudDpnaWdhbnR1bV9iYXNlLWltYWdlcy10ZXN0aW5nJnV0LWp1cHl0ZXJsYWItMSYw',
'languages': [
'python3'
],
'license': 'MIT',
'name': 'Unit Test1',
'osClass': 'ubuntu',
'osRelease': '16.04',
'packageManagers': [
'apt',
'pip3'
],
'readme': 'Empty for now',
'tags': [
'ubuntu',
'python3',
'jupyterlab'
],
'url': None
}
},
{
'cursor': 'Mw==',
'node': {
'componentId': 'ut-jupyterlab-2',
'description': 'Unit Test 2',
'developmentTools': [
'jupyterlab'
],
'dockerImageNamespace': 'gigdev',
'dockerImageRepository': 'gm-quickstart',
'dockerImageServer': 'hub.docker.com',
'dockerImageTag': '9718fedc-2018-01-16',
'icon': 'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAQAAAC1HAwCAAAAC0lEQVR42mNk+A8AAQUBAScY42YAAAAASUVORK5CYII=',
'id': 'QmFzZUNvbXBvbmVudDpnaWdhbnR1bV9iYXNlLWltYWdlcy10ZXN0aW5nJnV0LWp1cHl0ZXJsYWItMiYw',
'languages': [
'python3'
],
'license': 'MIT',
'name': 'Unit Test 2',
'osClass': 'ubuntu',
'osRelease': '16.04',
'packageManagers': [
'apt',
'pip3'
],
'readme': 'Empty for now',
'tags': [
'ubuntu',
'python3',
'jupyterlab'
],
'url': None
}
}
],
'pageInfo': {
'hasNextPage': True
}
}
}
}
snapshots['TestEnvironmentBaseImageQueries.test_get_available_base_images_pagination 3'] = {
'data': {
'availableBases': {
'edges': [
{
'cursor': 'Mg==',
'node': {
'componentId': 'ut-jupyterlab-1',
'description': 'Unit Test 1',
'developmentTools': [
'jupyterlab'
],
'dockerImageNamespace': 'gigdev',
'dockerImageRepository': 'gm-quickstart',
'dockerImageServer': 'hub.docker.com',
'dockerImageTag': '9718fedc-2018-01-16',
'icon': 'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAQAAAC1HAwCAAAAC0lEQVR42mNk+A8AAQUBAScY42YAAAAASUVORK5CYII=',
'id': 'QmFzZUNvbXBvbmVudDpnaWdhbnR1bV9iYXNlLWltYWdlcy10ZXN0aW5nJnV0LWp1cHl0ZXJsYWItMSYw',
'languages': [
'python3'
],
'license': 'MIT',
'name': 'Unit Test1',
'osClass': 'ubuntu',
'osRelease': '16.04',
'packageManagers': [
'apt',
'pip3'
],
'readme': 'Empty for now',
'tags': [
'ubuntu',
'python3',
'jupyterlab'
],
'url': None
}
}
],
'pageInfo': {
'hasNextPage': True
}
}
}
}
snapshots['TestEnvironmentBaseImageQueries.test_get_available_base_images_pagination_reverse 1'] = {
'data': {
'availableBases': {
'edges': [
{
'cursor': 'NQ==',
'node': {
'componentId': 'ut-rstudio-server',
'description': 'R + tidyverse packages in RStudio® Server',
'developmentTools': [
'rstudio',
'jupyterlab',
'notebook'
],
'dockerImageNamespace': 'gigantum',
'dockerImageRepository': 'rstudio-server',
'dockerImageServer': 'hub.docker.com',
'dockerImageTag': '87c90e7834-2019-07-19',
'icon': 'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAADIAAAAyCAYAAAAeP4ixAAAACXBIWXMAAAsSAAALEgHS3X78AAAGmklEQVRoge1a729TVRh+aBYTh9Cq0ShR10ZJjH5YiX8AnQy3boMVlo2NDdphZJkG6L7IBz/Q/QXsA0YjIEWCDLZBAYO4BLld2Rwak+4P0HQOMJBoVqMmEr2vee+P9v6+t2NVsnCy2/bcX+d9znPe933OOVtFRFgJxbciUDwC8hCWFQOkphov/VrIhYkQIBBABJIOviJ9F5o2NxSWu80HilrZGzMBECJExEeYgDCI/HrD5d9ECiiUfheJKE+AIH0TCVtamxb/MyBT07NBIoqBKEFE9bK9xl43sSAb7wyM/+YIlCaidGxrS0WgPAPJTc9yrycJ1A7FEFIMsQZiZ6yhbnsNJxnQ9m1twrIAyc0wAKSIaCNcjV8SC3yHqa7prCyBEp0d7Y5+ZQskN3MzACIGcED/YrWhqrCg1DWglHsADHd1xlIVAcnNzHLU4bFa/z+xYKrLzdMc+2dPd4eJHROQ3DSDIIFA/opYgMHwsvFZBYhgwwK3x6FaiXgm48tsyW1ytIv07uzM2wKZmp7ll3GD/iWyUCRChjuC39Pa8lZF+WJs4mJAARaTIiNQV25PCwxFEEX6+rpKYEpApm58EyawAfAvgQXu9XS0uTFdieFuZXT0PNuUJKK4BUNFZnH3ru6CDkg2N5N3zgsWQwjSmE1Gmxo9hcilltOfjwXZZ0G0UfUfuXnKxuM9kRKQbG6Go9OhClkYjjZtMkWRV/ff4HOH1PvkUn6X2qvqeX1dd22eCHmAMgAyP368efHUqbMpAtupCwT9/f29aZ8wNR0QSaIPRCJI1PoBQTTUZT8QG6xAlMsDg+Bn6sDJFzhBRIXQwGRy164dKRJpWLKrbI9kh48IsZJzezlEijgPpWUBYbzHD9Dh0MBX6Xi8h8FkNbbWHTv2WcxHJMY8sCCzRTQUbW7UhT0TDM8gtM84gtA+Ew/uvcqjJ2Gwj4FIytWZDVE65luaG0ecQJhZcQJhvmZ9HsZnUnv29BVIFC9qUkHYpw4rBxbk8Qg4+EQ1QZiu+eve+ZLdQSj7iVhf4zE784enEOsRBCvbgoWhEUmcuncKSyhBq8ssgGjr5QS0pbXJY5b2xET6h482WXZMaGCSz290Yla2T9SlB5+iXbS+oPENUScPvBcvw8m2CG4g9FJJOuZ8kkDU+4LZ0VlAXboS8cqI3gh7lmyeD3rwo4IkNFXfFilfQyJlCGgvaxhbyZ2Qe8sFhgcQVqSEBiaDAIdVxN2DgeQfSY1LZFZ9cWUyoCD0WxhvmOwgtH1bm6OvrH/vuiRRvEQnr8nTACK7cLw18uGRowWS1fH8vn17g77W6OZFEsURq/wB0zkx48aIOxMPBII/k0eOHOW1A1XiyxKFP9pamzjtz+k0jKjTM2q9fmz8oqNUryKIIkD9Bzf8vMjGy74rZvfvH0hDu9KopP2iBQsahqT74mfPXRBGz54P2ECpBogsQJGDG+7wpC2juEGRRMlvoQPSvjWaJ5FiFiyUNVgZFM8LCmdGJ2LWrCwbiGEAG9gn3g/fCZBIBc7iDEIkihxIDpb81TRnn7hwmRff0qTMFNVGdKxA95uVaKqvt0uKaC8PXjPMR5bOBIDiEzV/pwdfv8uhNg51GgFEkslB+zm7WsYnLoUVCutsVjNKCgClGRvNgzBy+Pung7/dX3XABcRJQJIoASWs+y1AlNp79vH72L3+npT4QEgkhwZNCtx2XevcWCZAyrqWBQu2DAkLtcjeWu3GREPhk2aJwc4Pxnd8e6921A6E+sxrT/5xM/rCr81DQ+9aLqW6rjSeGZ3gjJ4idb6sZ0GTZ+Tv7MJqTN2udQKBnet/wUtr/iqx+t3dWly7vdYWhAKQpVT49gnrFUfX/ZGe7g6hp7uDVWmDtHypiWJW0c2rT2iPN575HWsf+8cJhCTfAdjOhzxv9PTu7BT6ersYUIhninLegS7vlP3IBQRgmv+8uW7Rw/yE2tclMpaar+KNHmUdiXtm5ET6NAu8CHhvRN0fAfxu0UkxnhNcnldKWLi+suZPAXgqA/AQtgShdg4nwKDRrmXfng4NTFqGX4NBDT8dazEJ0Bffvswr/9etJL9BOQ/fSW/TzVirtIfoliesy8LxLTwXybqA4Hry+fh5HStVAOIFhOMoSLqAgJJ3qsuI+0TKeSjf+nQrJ7uTVs8bOiX+3O7xkuNXhRFrI6yu2ZaUCwj1fCkcL7uzB/de5c3SoAuI/MLxNsfNznWJTBiggAMI9Xf+7qnOxUf/VPOwlUdAHrayMoAA+Bf/LSDw4OFwPgAAAABJRU5ErkJggg==',
'id': 'QmFzZUNvbXBvbmVudDpnaWdhbnR1bV9iYXNlLWltYWdlcy10ZXN0aW5nJnV0LXJzdHVkaW8tc2VydmVyJjE=',
'languages': [
'R',
'python3'
],
'license': 'MIT',
'name': 'R Tidyverse in RStudio® Server',
'osClass': 'ubuntu',
'osRelease': '18.04',
'packageManagers': [
'apt',
'pip',
'conda3'
],
'readme': '''A Base containing R 3.6 and Tidyverse from the official CRAN PPA configured
for use in RStudio® Server. A conda-managed JupyterLab is also installed, and
OpenBLAS is configured.
You should generally use Apt with `r-cran-<package name>` to quickly install
"package name" from CRAN. You can also use Docker snippets for approaches
like `install.packages` or `devtools::install_github`. Don't hesitate to use
the "discuss" link (in the lower-right "?" bubble in the app) if you\'d like
some guidance!
*RStudio® and the RStudio logo are registered trademarks of RStudio, Inc.*
''',
'tags': [
'ubuntu',
'rstats',
'rstudio'
],
'url': None
}
}
],
'pageInfo': {
'hasNextPage': False,
'hasPreviousPage': True
}
}
}
}
snapshots['TestEnvironmentBaseImageQueries.test_get_available_base_images_pagination_reverse 2'] = {
'data': {
'availableBases': {
'edges': [
{
'cursor': 'MA==',
'node': {
'componentId': 'quickstart-jupyterlab',
'description': 'Data Science Quickstart using Jupyterlab, numpy, and Matplotlib. A great base for any analysis.',
'developmentTools': [
'jupyterlab'
],
'dockerImageNamespace': 'gigantum',
'dockerImageRepository': 'python3-minimal',
'dockerImageServer': 'hub.docker.com',
'dockerImageTag': '1effaaea-2018-05-23',
'icon': 'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAQAAAC1HAwCAAAAC0lEQVR42mNk+A8AAQUBAScY42YAAAAASUVORK5CYII=',
'id': 'QmFzZUNvbXBvbmVudDpnaWdhbnR1bV9iYXNlLWltYWdlcy10ZXN0aW5nJnF1aWNrc3RhcnQtanVweXRlcmxhYiYy',
'languages': [
'python3'
],
'license': 'MIT',
'name': 'Data Science Quickstart with JupyterLab',
'osClass': 'ubuntu',
'osRelease': '18.04',
'packageManagers': [
'apt',
'pip3'
],
'readme': 'Empty for now',
'tags': [
'ubuntu',
'python3',
'jupyterlab'
],
'url': None
}
}
],
'pageInfo': {
'hasNextPage': False,
'hasPreviousPage': False
}
}
}
}
|
"""
This file contains the Reverse module for the PyADBCXY package. It includes the Reverse class,
which implements the reverse mode of automatic differentiation.
"""
import numpy as np
__all__ = ["Reverse"]
class Reverse:
"""
Reverse is the class for implementing the reverse mode auto differentiation including
instantiating the class, storing children, evaluating gradient and all required basic
operations.
>>> x = Reverse(3)
>>> y = Reverse(3)
>>> z = x + y
>>> z.val
6
>>> z.grad
1
"""
def __init__(self, val, grad=1):
"""Constructor for Reverse class
Args:
val (int or float): value of the Reverse
grad (int or float, optional): derivative of the Reverse. Defaults to 1.
"""
self._val = val
self._grad = grad
self._children = []
def __repr__(self):
return f"Reverse(val = {self.val}, grad = {self.grad})"
def __str__(self):
return f"Reverse(val = {self.val}, grad = {self.grad})"
@property
def val(self):
"""Get the value of the Reverse
Examples
--------
>>> x = Reverse(3)
>>> x.val
3
"""
return self._val
@property
def grad(self):
"""Get the gradient of the Reverse object
Examples
--------
>>> x = Reverse(3)
>>> x.grad
1
"""
if self._grad == None:
grad = 0
for der, child in self._children:
grad += der * child.grad
self._grad = grad
return self._grad
@val.setter
def val(self, val):
"""Set the value of the Reverse object
Args:
val (int or float): new value of the Reverse object
Examples
--------
>>> x = Reverse(3)
>>> x.val
3
>>> x.val = 4
>>> x.val
4
"""
self._val = val
@grad.setter
def grad(self, grad):
"""Set the gradient of the Reverse object
Args:
grad (int, float, or array): new gradient of the Reverse object
Examples
--------
>>> x.Variable(3)
>>> x.grad
1
>>> x.grad = 2
>>> x.grad
2
"""
self._grad = grad
def __mul__(self, other):
"""Overload of the '*' operator (Reverse * other). Calculates the value and gradient resulting
from the multiplication of two Reverse objects or a Reverse object and other object.
Args:
other (Reverse object, int, or float): item to be added to the Reverse
Returns:
Reverse: resulting Reverse object
Examples
--------
>>> x1 = Reverse(3)
>>> x2 = Reverse(4)
>>> x3 = x1 * x2
>>> print(x3)
Reverse(val = 12, grad = 7)
>>> x4 = x1 * 3
>>> print(x4)
"Reverse(val = 9, der = 3)"
>>> x5 = x2 * 2.0
>>> print(x5)
"Reverse(val = 8.0, der = 2.0)"
"""
if isinstance(other, Reverse):
val_mul = self.val * other.val
new_RevMod = Reverse(val_mul)
self._children.append((other.val, new_RevMod))
self.grad = None
other._children.append((self.val, new_RevMod))
other.grad = None
return new_RevMod
elif isinstance(other, float) or isinstance(other, int):
val_mul = self.val * other
new_RevMod = Reverse(val_mul) # instantiate class
self._children.append((other, new_RevMod))
self.grad = None
return new_RevMod
else:
raise TypeError("Reverse mode calculation only accepts Reverse object, int, float types.")
def __add__(self, other):
"""Overload of the '+' operator (Reverse + other). Calculates the value and gradient resulting
from the addition of two Reverse objects or a Reverse object and other object.
Args:
other (Reverse object, int, or float): item to be added to the Reverse
Returns:
Reverse: resulting Reverse object
Examples
--------
>>> x1 = Reverse(3)
>>> x2 = Reverse(4)
>>> x3 = x1 + x2
>>> print(x3)
Reverse(val = 7, grad = 2)
>>> x4 = x1 + 5
>>> print(x4)
Reverse(val = 8, grad = 1)
>>> x5 = x2 + 2.0
>>> print(x5)
Reverse(val = 6.0, grad = 1)
"""
if isinstance(other, Reverse):
val_add = self.val + other.val
new_RevMod = Reverse(val_add)
self._children.append((1, new_RevMod))
self.grad = None
other._children.append((1, new_RevMod))
other.grad = None
return new_RevMod
elif isinstance(other, float) or isinstance(other, int):
val_add = self.val + other
new_RevMod = Reverse(val_add)
self._children.append((1, new_RevMod))
self.grad = None
return new_RevMod
else:
raise TypeError("Reverse mode calculation only accepts Reverse object, int, float types.")
def __sub__(self, other):
"""Overload of the '-' operator (Reverse - other). Calculates the value and derivative resulting
from the addition of two Reverse objects or a Reverse object and other object.
Args:
other (Reverse object, int, or float): item to be added to the Reverse
Returns:
Reverse: resulting Reverse object
Examples
--------
>>> x1 = Reverse(3)
>>> x2 = Reverse(4)
>>> x3 = x1 - x2
>>> print(x3)
Reverse(val = -1, grad = 0)
>>> x4 = x1 - 5
>>> print(x4)
Reverse(val = -2, grad = 1)
>>> x5 = x2 - 2.0
>>> print(x5)
Reverse(val = 2.0, grad = 1)
"""
if isinstance(other, Reverse):
val_sub = self.val - other.val
new_RevMod = Reverse(val_sub)
self._children.append((1, new_RevMod))
self.grad = None
other._children.append((-1, new_RevMod))
other.grad = None
return new_RevMod
elif isinstance(other, float) or isinstance(other, int):
val_sub = self.val - other
new_RevMod = Reverse(val_sub)
self._children.append((1, new_RevMod))
self.grad = None
return new_RevMod
else:
raise TypeError("Reverse mode calculation only accepts Reverse object, int, float types.")
def __truediv__(self, other):
"""Overload of the '/' operator (Reverse / other). Calculates the value and derivative resulting
from the division of one Reverse (or other object) from a Reverse.
Args:
other (Reverse, int, or float): item the Reverse is to be divided by
Returns:
Reverse: resulting Reverse object
Examples
--------
>>> x1 = Reverse(8)
>>> x2 = Reverse(4)
>>> x3 = x1 / x2
>>> print(x3)
Reverse(val = 2.0, grad = 1)
>>> x4 = x1 / 2
>>> print(x4)
Reverse(val = 4.0, grad = 4.0)
>>> x5 = x2 / 2.0
>>> print(x5)
Reverse(val = 2.0, grad = 0.5)
"""
if isinstance(other, Reverse):
if other.val == 0:
raise ZeroDivisionError("Cannot divide the variable with 0.")
val_div = self.val / other.val
new_RevMod = Reverse(val_div)
self._children.append(( 1 / other.val, new_RevMod))
self.grad = None
other._children.append(( - self.val / (other.val ** 2) , new_RevMod)) # need confirmation
other.grad = None
return new_RevMod
elif isinstance(other, float) or isinstance(other, int):
if other == 0:
raise ZeroDivisionError("Cannot divide the variable with 0.")
val_div = self.val / other
new_RevMod = Reverse(val_div)
self._children.append(( 1 / other, new_RevMod))
self.grad = None
return new_RevMod
else:
raise TypeError("Reverse mode calculation only accepts Reverse object, int, float types.")
def __radd__(self, other):
"""Overload of the '+' operator (other + Reverse). Calculates the value and derivative resulting
from the addition of two Reverse objects or a other object and Reverse object.
Args:
other (Reverse object, int, or float): item to add Reverse to
Returns:
Reverse: resulting Reverse object
Examples
--------
>>> x = Reverse(3)
>>> z = 3 + x
>>> print(z)
Reverse(val = 6, grad = 1)
"""
return self.__add__(other)
def __rmul__(self, other):
"""Overload of the '*' operator (other * Reverse). Calculates the value and derivative resulting
from the multiplication of two Reverse objects or other object and Reverse object.
Args:
other (Reverse object, int, or float): item to be multiplied with the Reverse
Returns:
Reverse: resulting Reverse object
Examples
--------
>>> x = Reverse(3)
>>> z = -5 * x
>>> print(z)
Reverse(val = -15, grad = -5)
"""
return self.__mul__(other)
def __rsub__(self, other):
"""Overload of the '-' operator (other - Reverse). Calculates the value and derivative resulting
from the subtraction of two Reverse objects or a Reverse object from another object.
Args:
other (Reverse object, int, or float): item to subtract Reverse from
Returns:
Reverse: resulting Reverse object
Examples
--------
>>> x = Reverse(3)
>>> z = 5 - x
>>> print(z)
Reverse(val = 2, grad = 1)
"""
if isinstance(other, Reverse) or isinstance(other, int) or isinstance(other, float):
new_val = other - self.val
new_RevMod= Reverse(new_val)
self._children.append((1, new_RevMod))
self.grad = None
return new_RevMod
else:
raise TypeError("Reverse mode calculation only accepts Reverse object, int, float types.")
def __rtruediv__(self, other):
"""Overload of the '/' operator (other / Reverse). Calculates the value and derivative resulting
from the division of one object by a Reverse object.
Args:
other (Reverse, int, or float): item to divide by Reverse object
Returns:
Reverse: resulting Reverse object
Examples
--------
>>> x = Reverse(3)
>>> z = 9 / x
>>> print(z)
Reverse(val = 3.0, grad = -1.0)
"""
if isinstance(other, Reverse) or isinstance(other, int) or isinstance(other, float):
if self.val == 0:
raise ZeroDivisionError("Cannot divide the variable with 0.")
new_val = other / self.val
new_RevMod = Reverse(new_val)
self._children.append(( - other / self.val ** 2, new_RevMod))
self.grad = None
return new_RevMod
else:
raise TypeError("Reverse mode calculation only accepts Reverse object, int, float types.")
def cos(self):
"""Calculates trigonometric cosine of the current Reverse object.
Args:
none
Returns:
Reverse object
Examples
--------
>>> x = Reverse(np.pi, 1)
>>> z = x.cos()
>>> print(z)
Reverse(val = -1, grad = -1.2246467991473532e-16)
"""
new_val = np.cos(self.val)
new_RevMod = Reverse(new_val)
self._children.append((-np.sin(self.val), new_RevMod)) # -sinx
self.grad = None
return new_RevMod
def tan(self):
"""Calculates trigonometric tangent of the current Reverse object.
Args:
none
Returns:
Reverse object
Examples
--------
>>> x = Reverse(np.pi / 4)
>>> z = x.tan()
>>> print(z)
Reverse(val = 0.9999999999999999, grad = 1.9999999999999996)
"""
new_val = np.tan(self.val)
new_RevMod = Reverse(new_val)
self._children.append(( 1/(np.cos(self.val) ** 2), new_RevMod)) # 1/ sec **2 x
self.grad = None
return new_RevMod
def sin(self):
"""Calculates trigonometric sine of Reverse and returns the result.
Args:
None
Returns:
Reverse object
Examples
--------
>>> x = Reverse(np.pi / 2)
>>> z = x.sin()
>>> print(z)
Reverse(val = 1.0, grad = 6.123233995736766e-17)
"""
new_val = np.sin(self.val)
new_RevMod = Reverse(new_val)
self._children.append((np.cos(self.val), new_RevMod)) #cosx
self.grad = None
return new_RevMod
def cosh(self):
"""Calculates hyperbolic cosine of Reverse and returns the result.
Args:
None
Returns:
Reverse object
Examples
--------
>>> x = Reverse(1)
>>> z = x.cosh()
>>> print(z)
Reverse(val = 1.5430806348152437, grad = 1.1752011936438014)
"""
new_val = np.cosh(self.val)
new_RevMod = Reverse(new_val)
self._children.append((np.sinh(self.val), new_RevMod))
self.grad = None
return new_RevMod
def tanh(self):
"""Calculates hyperbolic tanh of Reverse and returns the result.
Args:
None
Returns:
Reverse object
Examples
--------
>>> x = Reverse(1)
>>> z = x.tanh()
>>> print(z)
Reverse(val = 0.7615941559557649, grad = 0.41997434161402614)
"""
new_val = np.tanh(self.val)
new_RevMod = Reverse(new_val)
self._children.append((1 / np.cosh(self.val) ** 2, new_RevMod))
self.grad = None
return new_RevMod
def sinh(self):
"""Calculates hyperbolic sinh of Reverse and returns the result.
Args:
None
Returns:
Reverse object
Examples
--------
>>> x = Reverse(2)
>>> z = x.sinh()
>>> print(z)
Reverse(val = 3.626860407847019, grad = 3.7621956910836314)
"""
new_val = np.sinh(self.val)
new_RevMod = Reverse(new_val)
self._children.append((np.cosh(self.val), new_RevMod))
self.grad = None
return new_RevMod
def arccos(self):
"""Calculates arc arccos of Reverse object and returns the result.
Args:
None
Returns:
Reverse object
Examples
--------
>>> r = Reverse(0.9, 0.5)
>>> r.arccos()
Reverse(val = 0.45102681179626236, grad = -1.147078669352809)
"""
# TODO: raise ValueError if self.val outside of domain (NumPy won't, it just returns nan)
new_val = np.arccos(self.val)
new_RevMod = Reverse(new_val)
self._children.append((-1 / np.sqrt( 1- self.val ** 2), new_RevMod ))
self.grad = None
return new_RevMod
def arctan(self):
"""Calculates arc tangent of Reverse object and returns the result.
Args:
None
Returns:
Reverse object
Examples
--------
>>> r = Reverse(0.9, 0.5)
>>> r.arctan()
Reverse(val = 0.7328151017865066, grad = 0.27624309392265195)
"""
new_val = np.arctan(self.val)
new_RevMod = Reverse(new_val)
self._children.append((1 / (1 + self.val ** 2), new_RevMod ))
self.grad = None
return new_RevMod
def arcsin(self):
"""Calculates arc sine of Reverse and returns the result.
Args:
None
Returns:
Reverse object.
Examples
--------
>>> r = Reverse(0.9, 0.5)
Reverse(val = 1.1197695149986342, grad = 1.147078669352809)
"""
# TODO: raise ValueError if self.val outside of domain (NumPy won't, it just returns nan)
new_val = np.arcsin(self.val)
new_RevMod = Reverse(new_val)
self._children.append((1 / np.sqrt(1 - self.val**2 ), new_RevMod ))
self.grad = None
return new_RevMod
def exp(self):
"""Calculates exponential (exp()) of Reverse object and returns a Reverse object back.
Args:
None
Returns:
Reverse object
Examples
--------
>>> r = Reverse(4., 5.)
>>> r.exp()
Reverse(val = 54.598150033144236, grad = 272.9907501657212)
"""
new_val = np.exp(self.val)
new_RevMod = Reverse(new_val)
self._children.append((np.exp(self.val), new_RevMod))
self.grad = None
return new_RevMod
def log(self, base=np.e):
"""Calculates logarithm (log()) of Reverse, int, or float and returns the result.
Args:
base (int or float, optional): logarithm base. Defaults to np.e which uses natural logarithm.
Returns:
Reverse: resulting logarithm value
Examples
--------
>>> r = Reverse(4., 5.)
>>> r.log()
Reverse(val = 1.3862943611198906, grad = 1.25)
"""
# TODO: raise ZeroDivisionError if base == 1
if self.val <= 0:
raise ValueError(f"Log cannot be negative for this implementation")
else:
new_val = np.log(self.val)/ np.log(base)
new_RevMod = Reverse(new_val)
self._children.append((1 / (self.val * np.log(base)), new_RevMod))
self.grad = None
return new_RevMod
def __pow__(self, other):
"""Overload of the '**' or 'pow()' operator (Reverse**other). Calculates the value and derivative resulting
from raising Reverse to the power of other.
Args:
other (Reverse, int, or float): item the Reverse is to be raised to
Returns:
Reverse: resulting Reverse object
Examples
--------
>>> Reverse(3) ** Reverse(4., 5.)
Reverse(val = 81.0, der = 552.9379769105844)
"""
if self.val > 0:
if isinstance(other, int) or isinstance(other, float):
other = Reverse(other)
new_val = self.val ** other.val
# der_div = other.val * self.val**(other.val-1) * self.der + np.log(self.val) * self.val**other.val * other.der
new_RevMod = Reverse(new_val)
self._children.append((other.val * self.val ** (other.val -1 ), new_RevMod))
self.grad = None
other._children.append((np.log(self.val) * self.val ** other.val , new_RevMod))
other.grad = None
return new_RevMod
elif isinstance(other, Reverse):
new_val = self.val ** other.val
new_RevMod = Reverse(new_val)
self._children.append((other.val * self.val**(other.val -1), new_RevMod))
self.grad = None
return new_RevMod
else:
raise TypeError("Reverse mode calculation only accepts Reverse object, int, float types.")
else:
raise ValueError('math domain error: the base of exponentiation cannot be non-positive')
def __rpow__(self, other):
"""Overload of the '**' or 'pow()' operator (other**Reverse). Calculates the value and derivative resulting
from raising other to the power of the Reverse.
Args:
other (Reverse, int, or float): item to raise to the power of the Reverse
Returns:
Reverse: resulting Reverse object
Examples
--------
>>> 6 ** Reverse(3)
Reverse(val = 216, grad = 387.0200453532599)
"""
if isinstance(other, int) or isinstance(other, float):
if other > 0:
other = Reverse(other)
new_val = other.val ** self.val
new_RevMod = Reverse(new_val)
self._children.append((np.log(other.val) * (other.val ** self.val), new_RevMod))
self.grad = None
return new_RevMod
else:
raise ValueError('math domain error: the base of exponentiation cannot be non-positive')
else:
raise TypeError(f"unsupported operand type(s) for ** or power: '{type(other)}' and '{type(self)}'")
def __eq__(self, other):
"""Overload of the '==' operator. Determines whether Reverse is equal to
another object.
Args:
other (Reverse, int, or float): item to check equality with Reverse
Returns:
tup(bool): tuple whether the Reverse and other object are equal, first
index corresponds to the value, second to the derivative
Examples
--------
>>> Reverse(3, 4) == Reverse(3, 4)
True
>>> Reverse(3, 4) == Reverse(7, 4)
False
>>> Reverse(3, 4) == Reverse(7, 8)
False
>>> 7 == Reverse(7, 4)
False
"""
if not isinstance(other, Reverse):
return False
if self.val != other.val:
return False
if not self.grad or not other.grad:
return False
else:
return self.grad == other.grad
def __ne__(self, other):
return not self.__eq__(other)
|
def my_print(s, *args):
#print s, args
for i in range(len(args)):
s = s.replace('{' + str(i) +'}', args[i])
print s
#s = s.replace('{1}', p1)
#s = s.replace('{2}', p2)
#print s
my_print('Salam!')
my_print('Salam {0}!', 'ali')
my_print('Salam {0}! {1}', 'ali', 'X')
|
#!/usr/bin/python
command = oslc("-d test.osl")
|
"""
*Sound*
The sound type.
"""
from fivear.sampling import Sampling
class Sound(
Sampled,
):
__metaclass__ = ABCMeta
# DB_PATH = Path("/home/jed/4ear/fourear/sounds/")
# def __init__(self, name: str, sampling_parameters: SamplingParameters):
# self.__name = name
# self._sampling_parameters = sampling_parameters
# @property
# def name(self) -> str:
# return self.__name
# @abstractmethod
# def synthesize(self) -> Soundwave:
# return
# @property
# @abstractmethod
# def amplitude_envelope(self) -> AmplitudeEnvelope:
# return
# @property
# def number_of_samples(self) -> int:
# return self._sampling_parameters.number_of_samples(
# self.amplitude_envelope.ending_frame
# )
# @property
# def duration(self) -> float:
# """Duration in seconds."""
# return self.number_of_samples * self._sampling_parameters.sample_duration
# def save(self, soundwave: Soundwave):
# path = self.DB_PATH.joinpath(self.name)
# soundfile.write(
# f"{path}.wav",
# soundwave.as_array(self.number_of_samples),
# samplerate=self._sampling_parameters.sampling_rate,
# )
|
from django import template
from ..forms import GroupSignupForm
from ..models import CollectionGroup, CollectionEvent
register = template.Library()
@register.simple_tag
def get_event_gantt_chart(event, event_members, user):
return list(
get_gantt_for_member(event, member, user)
for member in event_members
)
def get_gantt_for_member(event, member, user):
duration = event.end - event.start
offset = (member.start - event.start) / duration * 100
member_duration = member.end - member.start
width = member_duration / duration * 100
return {
'style': 'margin-left: {offset}%; width: {width}%'.format(
offset=offset, width=width
),
'member': member,
'can_delete': member.user_id == user.id,
}
@register.simple_tag
def get_signup_form():
return GroupSignupForm()
@register.inclusion_tag('collection/_event_schedule.html')
def show_related_collection(request, event):
calendar = event.calendar
try:
group = CollectionGroup.objects.get(calendar=calendar)
is_member = group.has_member(request.user)
except CollectionGroup.DoesNotExist:
is_member = False
group = None
events = CollectionEvent.objects.filter(
event_occurence__event=event
)
return {
'request': request,
'events': events,
'group': group,
'is_member': is_member
}
|
import os
import cv2
import time
import base64
import numpy as np
__all__ = ['base64_to_cv2', 'cv2_to_base64', 'Processor']
def check_dir(dir_path):
# 目录检查函数
if not os.path.exists(dir_path):
os.makedirs(dir_path)
elif os.path.isfile(dir_path):
os.remove(dir_path)
os.makedirs(dir_path)
def base64_to_cv2(b64str):
# base64转cv2函数
data = base64.b64decode(b64str.encode('utf8'))
data = np.frombuffer(data, np.uint8)
data = cv2.imdecode(data, cv2.IMREAD_COLOR)
return data
def cv2_to_base64(image):
# cv2转base64函数
data = cv2.imencode('.jpg', image)[1]
return base64.b64encode(data.tostring()).decode('utf8')
class Processor():
# 初始化函数
def __init__(self, images=None, paths=None, output_dir='output', batch_size=1):
# 变量设置
self.images = images
self.paths = paths
self.output_dir = output_dir
self.batch_size = batch_size
# 获取原始输入数据
self.datas = self.load_datas()
# 对原始输入数据进行预处理
self.input_datas = self.preprocess()
# 读取数据函数
def load_datas(self):
datas = []
# 读取数据列表
if self.paths is not None:
for im_path in self.paths:
assert os.path.isfile(im_path), "The {} isn't a valid file path.".format(im_path)
im = cv2.imread(im_path)
datas.append(im)
if self.images is not None:
datas = self.images
# 返回数据列表
return datas
# 数据预处理函数
def preprocess(self):
input_datas = []
# 数据预处理
for i, img in enumerate(self.datas):
# 图像缩放
img = cv2.resize(img, (256, 256))
# 归一化
img = (img.astype('float32') / 255.0 - 0.5) / 0.5
# 转置
img = img.transpose((2, 0, 1))
# 增加维度
img = np.expand_dims(img, axis=0)
# 加入输入数据列表
input_datas.append(img)
# 数据按batch_size切分
input_datas = np.concatenate(input_datas, 0)
split_num = len(self.datas) // self.batch_size + 1 if len(self.datas) % self.batch_size != 0 else len(
self.datas) // self.batch_size
input_datas = np.array_split(input_datas, split_num)
# 返回预处理完成的数据
return input_datas
def postprocess(self, outputs, visualization):
results = []
for im_id, output in enumerate(outputs):
# 图像后处理
img = (output * 0.5 + 0.5) * 255.
# 限幅
img = np.clip(img, 0, 255).astype(np.uint8)
# 转置
img = img.transpose((1, 2, 0))
# 可视化
if visualization:
# 检查输出目录
check_dir(self.output_dir)
# 写入输出图片
cv2.imwrite(os.path.join(self.output_dir, '%d_%d.jpg' % (im_id, time.time())), img)
results.append(img)
# 返回结果
return results
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.